summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--.mailmap5
-rw-r--r--CODE_OF_CONDUCT.md2
-rw-r--r--Documentation/CodingGuidelines15
-rw-r--r--Documentation/Makefile65
-rw-r--r--Documentation/MyFirstObjectWalk.txt44
-rw-r--r--Documentation/RelNotes/2.36.0.txt430
-rw-r--r--Documentation/SubmittingPatches36
-rw-r--r--Documentation/config.txt39
-rw-r--r--Documentation/config/advice.txt18
-rw-r--r--Documentation/config/clone.txt5
-rw-r--r--Documentation/config/core.txt119
-rw-r--r--Documentation/config/extensions.txt31
-rw-r--r--Documentation/config/fetch.txt25
-rw-r--r--Documentation/config/gpg.txt4
-rw-r--r--Documentation/config/remote.txt6
-rw-r--r--Documentation/config/repack.txt5
-rw-r--r--Documentation/config/reset.txt2
-rw-r--r--Documentation/config/sparse.txt27
-rw-r--r--Documentation/config/stash.txt7
-rw-r--r--Documentation/config/submodule.txt37
-rw-r--r--Documentation/diff-options.txt21
-rw-r--r--Documentation/fetch-options.txt37
-rw-r--r--Documentation/git-branch.txt19
-rw-r--r--Documentation/git-bundle.txt7
-rw-r--r--Documentation/git-cat-file.txt52
-rw-r--r--Documentation/git-check-ignore.txt4
-rw-r--r--Documentation/git-checkout-index.txt10
-rw-r--r--Documentation/git-clone.txt7
-rw-r--r--Documentation/git-config.txt8
-rw-r--r--Documentation/git-credential-cache--daemon.txt6
-rw-r--r--Documentation/git-fetch-pack.txt4
-rw-r--r--Documentation/git-fetch.txt10
-rw-r--r--Documentation/git-fsmonitor--daemon.txt75
-rw-r--r--Documentation/git-help.txt15
-rw-r--r--Documentation/git-hook.txt45
-rw-r--r--Documentation/git-index-pack.txt8
-rw-r--r--Documentation/git-ls-files.txt2
-rw-r--r--Documentation/git-ls-tree.txt68
-rw-r--r--Documentation/git-maintenance.txt38
-rw-r--r--Documentation/git-mktree.txt2
-rw-r--r--Documentation/git-name-rev.txt30
-rw-r--r--Documentation/git-read-tree.txt16
-rw-r--r--Documentation/git-remote.txt2
-rw-r--r--Documentation/git-reset.txt9
-rw-r--r--Documentation/git-sparse-checkout.txt92
-rw-r--r--Documentation/git-submodule.txt6
-rw-r--r--Documentation/git-update-index.txt65
-rw-r--r--Documentation/git-worktree.txt291
-rw-r--r--Documentation/gitattributes.txt14
-rw-r--r--Documentation/gitcli.txt19
-rw-r--r--Documentation/githooks.txt4
-rw-r--r--Documentation/glossary-content.txt13
-rw-r--r--Documentation/rev-list-options.txt22
-rw-r--r--Documentation/technical/bundle-format.txt11
-rw-r--r--Documentation/technical/commit-graph-format.txt12
-rw-r--r--Documentation/technical/multi-pack-index.txt1
-rw-r--r--Documentation/technical/pack-format.txt13
-rw-r--r--Documentation/technical/partial-clone.txt3
-rw-r--r--Documentation/technical/reftable.txt2
-rwxr-xr-xGIT-VERSION-GEN2
-rw-r--r--Makefile189
-rw-r--r--README.md14
l---------RelNotes2
-rw-r--r--add-interactive.c8
-rw-r--r--add-patch.c20
-rw-r--r--advice.c5
-rw-r--r--advice.h5
-rw-r--r--apply.c66
-rw-r--r--apply.h28
-rw-r--r--archive-tar.c6
-rw-r--r--archive-zip.c7
-rw-r--r--archive.c2
-rw-r--r--attr.c3
-rw-r--r--attr.h1
-rw-r--r--banned.h5
-rw-r--r--bisect.c3
-rw-r--r--bisect.h4
-rw-r--r--blame.c5
-rw-r--r--block-sha1/sha1.c17
-rw-r--r--branch.c377
-rw-r--r--branch.h60
-rw-r--r--builtin.h2
-rw-r--r--builtin/add.c14
-rw-r--r--builtin/am.c15
-rw-r--r--builtin/bisect--helper.c95
-rw-r--r--builtin/blame.c8
-rw-r--r--builtin/branch.c70
-rw-r--r--builtin/bundle.c1
-rw-r--r--builtin/cat-file.c396
-rw-r--r--builtin/checkout-index.c41
-rw-r--r--builtin/checkout.c70
-rw-r--r--builtin/clean.c3
-rw-r--r--builtin/clone.c79
-rw-r--r--builtin/commit-graph.c6
-rw-r--r--builtin/commit.c54
-rw-r--r--builtin/config.c4
-rw-r--r--builtin/count-objects.c2
-rw-r--r--builtin/diff.c6
-rw-r--r--builtin/difftool.c5
-rw-r--r--builtin/fast-export.c4
-rw-r--r--builtin/fast-import.c29
-rw-r--r--builtin/fetch-pack.c8
-rw-r--r--builtin/fetch.c347
-rw-r--r--builtin/fsmonitor--daemon.c1479
-rw-r--r--builtin/gc.c4
-rw-r--r--builtin/grep.c39
-rw-r--r--builtin/hash-object.c13
-rw-r--r--builtin/help.c67
-rw-r--r--builtin/hook.c84
-rw-r--r--builtin/index-pack.c29
-rw-r--r--builtin/log.c33
-rw-r--r--builtin/ls-files.c4
-rw-r--r--builtin/ls-remote.c3
-rw-r--r--builtin/ls-tree.c368
-rw-r--r--builtin/mailsplit.c3
-rw-r--r--builtin/merge-base.c11
-rw-r--r--builtin/merge-recursive.c2
-rw-r--r--builtin/merge.c42
-rw-r--r--builtin/mktag.c11
-rw-r--r--builtin/mktree.c4
-rw-r--r--builtin/name-rev.c103
-rw-r--r--builtin/notes.c8
-rw-r--r--builtin/pack-objects.c96
-rw-r--r--builtin/patch-id.c9
-rw-r--r--builtin/prune-packed.c2
-rw-r--r--builtin/pull.c19
-rw-r--r--builtin/push.c2
-rw-r--r--builtin/read-tree.c14
-rw-r--r--builtin/rebase.c108
-rw-r--r--builtin/receive-pack.c68
-rw-r--r--builtin/reflog.c743
-rw-r--r--builtin/remote.c49
-rw-r--r--builtin/repack.c12
-rw-r--r--builtin/replace.c6
-rw-r--r--builtin/reset.c26
-rw-r--r--builtin/rev-list.c31
-rw-r--r--builtin/send-pack.c2
-rw-r--r--builtin/shortlog.c11
-rw-r--r--builtin/show-branch.c1
-rw-r--r--builtin/sparse-checkout.c123
-rw-r--r--builtin/stash.c40
-rw-r--r--builtin/stripspace.c4
-rw-r--r--builtin/submodule--helper.c847
-rw-r--r--builtin/tag.c3
-rw-r--r--builtin/unpack-objects.c8
-rw-r--r--builtin/update-index.c35
-rw-r--r--builtin/update-server-info.c2
-rw-r--r--builtin/worktree.c164
-rw-r--r--bulk-checkin.c9
-rw-r--r--bundle.c83
-rw-r--r--bundle.h2
-rw-r--r--cache-tree.c8
-rw-r--r--cache.h126
-rwxr-xr-xci/lib.sh1
-rw-r--r--command-list.txt1
-rw-r--r--commit-graph.c46
-rw-r--r--commit-graph.h2
-rw-r--r--commit.c46
-rw-r--r--commit.h9
-rw-r--r--compat/fsmonitor/fsm-darwin-gcc.h92
-rw-r--r--compat/fsmonitor/fsm-listen-darwin.c427
-rw-r--r--compat/fsmonitor/fsm-listen-win32.c586
-rw-r--r--compat/fsmonitor/fsm-listen.h49
-rw-r--r--compat/mingw.c23
-rw-r--r--compat/mingw.h3
-rw-r--r--compat/qsort_s.c14
-rw-r--r--compat/terminal.c265
-rw-r--r--compat/terminal.h17
-rw-r--r--compat/win32/flush.c28
-rw-r--r--compat/winansi.c1
-rw-r--r--compat/zlib-uncompress2.c11
-rw-r--r--config.c295
-rw-r--r--config.h56
-rw-r--r--config.mak.uname39
-rw-r--r--configure.ac21
-rw-r--r--connect.c4
-rw-r--r--contrib/buildsystems/CMakeLists.txt28
-rw-r--r--contrib/coccinelle/strbuf.cocci6
-rw-r--r--contrib/completion/git-completion.bash61
-rw-r--r--contrib/completion/git-prompt.sh59
-rwxr-xr-xcontrib/rerere-train.sh2
-rw-r--r--contrib/scalar/Makefile22
-rw-r--r--contrib/scalar/scalar.c23
-rw-r--r--contrib/scalar/scalar.txt10
-rw-r--r--contrib/scalar/t/Makefile3
-rwxr-xr-xcontrib/scalar/t/t9099-scalar.sh8
-rwxr-xr-xcontrib/subtree/git-subtree.sh4
-rw-r--r--convert.c20
-rw-r--r--credential.c1
-rw-r--r--csum-file.c5
-rw-r--r--csum-file.h3
-rw-r--r--date.c9
-rw-r--r--date.h74
-rw-r--r--diff-merges.c16
-rw-r--r--diff.c235
-rw-r--r--diff.h5
-rw-r--r--diffcore-rename.c2
-rw-r--r--dir.c23
-rw-r--r--environment.c6
-rw-r--r--fetch-negotiator.c7
-rw-r--r--fetch-negotiator.h8
-rw-r--r--fetch-pack.c86
-rw-r--r--fetch-pack.h1
-rw-r--r--fsmonitor--daemon.h166
-rw-r--r--fsmonitor-ipc.c171
-rw-r--r--fsmonitor-ipc.h48
-rw-r--r--fsmonitor-settings.c114
-rw-r--r--fsmonitor-settings.h21
-rw-r--r--fsmonitor.c216
-rw-r--r--fsmonitor.h15
-rw-r--r--git-compat-util.h70
-rwxr-xr-xgit-p4.py254
-rwxr-xr-xgit-send-email.perl22
-rw-r--r--git-sh-setup.sh1
-rwxr-xr-xgit-submodule.sh160
-rw-r--r--git.c3
-rwxr-xr-xgitweb/gitweb.perl4
-rw-r--r--gpg-interface.c47
-rw-r--r--graph.c12
-rw-r--r--graph.h5
-rw-r--r--grep.c185
-rw-r--r--grep.h31
-rw-r--r--help.c41
-rw-r--r--help.h2
-rw-r--r--hook.c138
-rw-r--r--hook.h69
-rw-r--r--http-backend.c1
-rw-r--r--http-push.c2
-rw-r--r--ident.c1
-rw-r--r--imap-send.c14
-rw-r--r--list-objects-filter-options.c40
-rw-r--r--list-objects-filter-options.h49
-rw-r--r--list-objects.c61
-rw-r--r--list-objects.h12
-rw-r--r--ll-merge.c40
-rw-r--r--ll-merge.h9
-rw-r--r--log-tree.c120
-rw-r--r--ls-refs.c3
-rw-r--r--mailmap.c7
-rw-r--r--match-trees.c2
-rw-r--r--mem-pool.c26
-rw-r--r--merge-blobs.c5
-rw-r--r--merge-ort.c119
-rw-r--r--merge-ort.h10
-rw-r--r--merge-recursive.c11
-rw-r--r--merge-recursive.h2
-rw-r--r--midx.c41
-rw-r--r--midx.h1
-rw-r--r--notes-cache.c2
-rw-r--r--notes-merge.c8
-rw-r--r--notes.c8
-rw-r--r--object-file.c249
-rw-r--r--object-name.c130
-rw-r--r--object-store.h24
-rw-r--r--object.c5
-rw-r--r--object.h2
-rw-r--r--pack-bitmap-write.c9
-rw-r--r--pack-bitmap.c28
-rw-r--r--pack-bitmap.h2
-rw-r--r--pack-check.c9
-rw-r--r--pack-revindex.c20
-rw-r--r--pack-write.c13
-rw-r--r--parallel-checkout.c4
-rw-r--r--parse-options.c47
-rw-r--r--parse-options.h31
-rw-r--r--path.h14
-rw-r--r--perl/Git.pm21
-rw-r--r--pretty.h10
-rw-r--r--progress.c66
-rw-r--r--progress.h9
-rw-r--r--range-diff.c30
-rw-r--r--reachable.c2
-rw-r--r--read-cache.c56
-rw-r--r--ref-filter.c3
-rw-r--r--reflog-walk.h1
-rw-r--r--reflog.c434
-rw-r--r--reflog.h43
-rw-r--r--refs.c85
-rw-r--r--refs.h32
-rw-r--r--refs/debug.c81
-rw-r--r--refs/files-backend.c153
-rw-r--r--refs/iterator.c18
-rw-r--r--refs/packed-backend.c158
-rw-r--r--refs/packed-backend.h7
-rw-r--r--refs/ref-cache.c6
-rw-r--r--refs/refs-internal.h17
-rw-r--r--refspec.c14
-rw-r--r--reftable/block.c55
-rw-r--r--reftable/block_test.c27
-rw-r--r--reftable/blocksource.c6
-rw-r--r--reftable/generic.c45
-rw-r--r--reftable/iter.c4
-rw-r--r--reftable/merged.c33
-rw-r--r--reftable/pq.c3
-rw-r--r--reftable/pq_test.c27
-rw-r--r--reftable/reader.c117
-rw-r--r--reftable/readwrite_test.c170
-rw-r--r--reftable/record.c366
-rw-r--r--reftable/record.h49
-rw-r--r--reftable/record_test.c199
-rw-r--r--reftable/reftable-record.h14
-rw-r--r--reftable/reftable-writer.h2
-rw-r--r--reftable/reftable.c115
-rw-r--r--reftable/stack.c10
-rw-r--r--reftable/stack_test.c3
-rw-r--r--reftable/system.h11
-rw-r--r--reftable/writer.c58
-rw-r--r--remote-curl.c18
-rw-r--r--remote.c17
-rw-r--r--repo-settings.c10
-rw-r--r--repository.c23
-rw-r--r--repository.h19
-rw-r--r--rerere.c9
-rw-r--r--reset.c150
-rw-r--r--reset.h48
-rw-r--r--revision.c61
-rw-r--r--revision.h17
-rw-r--r--run-command.c33
-rw-r--r--run-command.h17
-rw-r--r--sequencer.c80
-rw-r--r--sequencer.h3
-rw-r--r--setup.c3
-rw-r--r--shallow.c3
-rw-r--r--shared.mak103
-rw-r--r--sparse-index.c86
-rw-r--r--sparse-index.h1
-rw-r--r--split-index.c3
-rw-r--r--stable-qsort.c16
-rw-r--r--strbuf.c5
-rw-r--r--string-list.h3
-rw-r--r--submodule-config.c63
-rw-r--r--submodule-config.h34
-rw-r--r--submodule.c469
-rw-r--r--submodule.h24
-rw-r--r--t/Makefile3
-rw-r--r--t/README4
-rw-r--r--t/helper/test-chmtime.c15
-rw-r--r--t/helper/test-csprng.c29
-rw-r--r--t/helper/test-date.c5
-rw-r--r--t/helper/test-fsmonitor-client.c116
-rw-r--r--t/helper/test-progress.c50
-rw-r--r--t/helper/test-read-graph.c13
-rw-r--r--t/helper/test-ref-store.c3
-rw-r--r--t/helper/test-reftable.c9
-rw-r--r--t/helper/test-run-command.c14
-rw-r--r--t/helper/test-tool.c2
-rw-r--r--t/helper/test-tool.h2
-rw-r--r--t/interop/Makefile3
-rw-r--r--t/lib-bitmap.sh185
-rwxr-xr-xt/lib-commit-graph.sh58
-rw-r--r--t/lib-gpg.sh11
-rw-r--r--t/lib-read-tree-m-3way.sh168
-rw-r--r--t/perf/Makefile3
-rwxr-xr-xt/perf/p1006-cat-file.sh12
-rwxr-xr-xt/perf/p2000-sparse-operations.sh3
-rwxr-xr-xt/perf/p7519-fsmonitor.sh68
-rw-r--r--t/perf/perf-lib.sh2
-rwxr-xr-xt/t0000-basic.sh23
-rwxr-xr-xt/t0001-init.sh3
-rwxr-xr-xt/t0002-gitfile.sh6
-rwxr-xr-xt/t0003-attributes.sh9
-rwxr-xr-xt/t0006-date.sh2
-rwxr-xr-xt/t0012-help.sh101
-rwxr-xr-xt/t0015-hash.sh6
-rwxr-xr-xt/t0022-crlf-rename.sh4
-rwxr-xr-xt/t0025-crlf-renormalize.sh4
-rwxr-xr-xt/t0027-auto-crlf.sh18
-rwxr-xr-xt/t0029-core-unsetenvvars.sh3
-rwxr-xr-xt/t0030-stripspace.sh75
-rwxr-xr-xt/t0050-filesystem.sh3
-rwxr-xr-xt/t0051-windows-named-pipe.sh7
-rwxr-xr-xt/t0091-bugreport.sh26
-rw-r--r--t/t0211/scrub_perf.perl4
-rwxr-xr-xt/t0410-partial-clone.sh19
-rwxr-xr-xt/t0500-progress-display.sh109
-rwxr-xr-xt/t1001-read-tree-m-2way.sh6
-rwxr-xr-xt/t1002-read-tree-m-u-2way.sh6
-rwxr-xr-xt/t1003-read-tree-prefix.sh10
-rwxr-xr-xt/t1006-cat-file.sh227
-rwxr-xr-xt/t1007-hash-object.sh1
-rwxr-xr-xt/t1011-read-tree-sparse-checkout.sh23
-rwxr-xr-xt/t1090-sparse-checkout-scope.sh19
-rwxr-xr-xt/t1091-sparse-checkout-builtin.sh183
-rwxr-xr-xt/t1092-sparse-checkout-compatibility.sh453
-rwxr-xr-xt/t1300-config.sh118
-rwxr-xr-xt/t1350-config-hooks-path.sh4
-rwxr-xr-xt/t1405-main-ref-store.sh8
-rwxr-xr-xt/t1410-reflog.sh36
-rwxr-xr-xt/t1411-reflog-show.sh5
-rwxr-xr-xt/t1416-ref-transaction-hooks.sh73
-rwxr-xr-xt/t1418-reflog-exists.sh37
-rwxr-xr-xt/t1503-rev-parse-verify.sh5
-rwxr-xr-xt/t1512-rev-parse-disambiguation.sh81
-rwxr-xr-xt/t1800-hook.sh123
-rwxr-xr-xt/t2012-checkout-last.sh51
-rwxr-xr-xt/t2060-switch.sh11
-rwxr-xr-xt/t2108-update-index-refresh-racy.sh64
-rwxr-xr-xt/t2200-add-update.sh33
-rwxr-xr-xt/t2400-worktree-add.sh63
-rwxr-xr-xt/t2402-worktree-list.sh19
-rwxr-xr-xt/t3007-ls-files-recurse-submodules.sh18
-rwxr-xr-xt/t3101-ls-tree-dirname.sh55
-rwxr-xr-xt/t3103-ls-tree-misc.sh15
-rwxr-xr-xt/t3104-ls-tree-format.sh76
-rwxr-xr-xt/t3200-branch.sh35
-rwxr-xr-xt/t3207-branch-submodule.sh328
-rwxr-xr-xt/t3302-notes-index-expensive.sh6
-rwxr-xr-xt/t3303-notes-subtrees.sh9
-rwxr-xr-xt/t3305-notes-fanout.sh14
-rwxr-xr-xt/t3400-rebase.sh18
-rwxr-xr-xt/t3404-rebase-interactive.sh10
-rwxr-xr-xt/t3406-rebase-message.sh23
-rwxr-xr-xt/t3412-rebase-root.sh20
-rwxr-xr-xt/t3413-rebase-hook.sh18
-rwxr-xr-xt/t3418-rebase-continue.sh26
-rwxr-xr-xt/t3430-rebase-merges.sh6
-rwxr-xr-xt/t3701-add-interactive.sh48
-rwxr-xr-xt/t3705-add-sparse-checkout.sh2
-rwxr-xr-xt/t3903-stash.sh123
-rwxr-xr-xt/t4015-diff-whitespace.sh12
-rw-r--r--t/t4018/kotlin-class5
-rw-r--r--t/t4018/kotlin-enum-class5
-rw-r--r--t/t4018/kotlin-fun5
-rw-r--r--t/t4018/kotlin-inheritace-class5
-rw-r--r--t/t4018/kotlin-inline-class5
-rw-r--r--t/t4018/kotlin-interface5
-rw-r--r--t/t4018/kotlin-nested-fun9
-rw-r--r--t/t4018/kotlin-public-class5
-rw-r--r--t/t4018/kotlin-sealed-class5
-rwxr-xr-xt/t4020-diff-external.sh153
-rwxr-xr-xt/t4027-diff-submodule.sh7
-rwxr-xr-xt/t4034-diff-words.sh1
-rw-r--r--t/t4034/kotlin/expect43
-rw-r--r--t/t4034/kotlin/post30
-rw-r--r--t/t4034/kotlin/pre30
-rwxr-xr-xt/t4069-remerge-diff.sh291
-rwxr-xr-xt/t4123-apply-shrink.sh18
-rwxr-xr-xt/t4128-apply-root.sh36
-rwxr-xr-xt/t4150-am.sh26
-rwxr-xr-xt/t4202-log.sh110
-rwxr-xr-xt/t4204-patch-id.sh95
-rwxr-xr-xt/t4216-log-bloom.sh1
-rwxr-xr-xt/t5300-pack-object.sh4
-rwxr-xr-xt/t5302-pack-index.sh8
-rwxr-xr-xt/t5310-pack-bitmaps.sh28
-rwxr-xr-xt/t5312-prune-corruption.sh10
-rwxr-xr-xt/t5316-pack-delta-depth.sh6
-rwxr-xr-xt/t5318-commit-graph.sh55
-rwxr-xr-xt/t5324-split-commit-graph.sh10
-rwxr-xr-xt/t5326-multi-pack-bitmaps.sh186
-rwxr-xr-xt/t5327-multi-pack-bitmaps-rev.sh23
-rwxr-xr-xt/t5328-commit-graph-64bit-time.sh66
-rwxr-xr-xt/t5401-update-hooks.sh64
-rwxr-xr-xt/t5402-post-merge-hook.sh16
-rwxr-xr-xt/t5403-post-checkout-hook.sh70
-rwxr-xr-xt/t5406-remote-rejects.sh2
-rwxr-xr-xt/t5407-post-rewrite-hook.sh14
-rwxr-xr-xt/t5409-colorize-remote-messages.sh2
-rwxr-xr-xt/t5411-proc-receive-hook.sh4
-rw-r--r--t/t5411/once-0010-report-status-v1.sh2
-rw-r--r--t/t5411/test-0002-pre-receive-declined.sh4
-rw-r--r--t/t5411/test-0003-pre-receive-declined--porcelain.sh2
-rw-r--r--t/t5411/test-0013-bad-protocol.sh20
-rw-r--r--t/t5411/test-0014-bad-protocol--porcelain.sh18
-rw-r--r--t/t5411/test-0020-report-ng.sh4
-rw-r--r--t/t5411/test-0021-report-ng--porcelain.sh4
-rw-r--r--t/t5411/test-0022-report-unexpect-ref.sh2
-rw-r--r--t/t5411/test-0023-report-unexpect-ref--porcelain.sh2
-rw-r--r--t/t5411/test-0024-report-unknown-ref.sh2
-rw-r--r--t/t5411/test-0025-report-unknown-ref--porcelain.sh2
-rw-r--r--t/t5411/test-0026-push-options.sh6
-rw-r--r--t/t5411/test-0027-push-options--porcelain.sh6
-rw-r--r--t/t5411/test-0030-report-ok.sh2
-rw-r--r--t/t5411/test-0031-report-ok--porcelain.sh2
-rw-r--r--t/t5411/test-0032-report-with-options.sh14
-rw-r--r--t/t5411/test-0033-report-with-options--porcelain.sh14
-rw-r--r--t/t5411/test-0034-report-ft.sh2
-rw-r--r--t/t5411/test-0035-report-ft--porcelain.sh2
-rw-r--r--t/t5411/test-0036-report-multi-rewrite-for-one-ref.sh6
-rw-r--r--t/t5411/test-0037-report-multi-rewrite-for-one-ref--porcelain.sh6
-rw-r--r--t/t5411/test-0038-report-mixed-refs.sh2
-rw-r--r--t/t5411/test-0039-report-mixed-refs--porcelain.sh2
-rw-r--r--t/t5411/test-0040-process-all-refs.sh2
-rw-r--r--t/t5411/test-0041-process-all-refs--porcelain.sh2
-rw-r--r--t/t5411/test-0050-proc-receive-refs-with-modifiers.sh4
-rwxr-xr-xt/t5500-fetch-pack.sh24
-rwxr-xr-xt/t5503-tagfollow.sh64
-rwxr-xr-xt/t5505-remote.sh4
-rwxr-xr-xt/t5510-fetch.sh46
-rwxr-xr-xt/t5511-refspec.sh1
-rwxr-xr-xt/t5516-fetch-push.sh212
-rwxr-xr-xt/t5520-pull.sh13
-rwxr-xr-xt/t5521-pull-options.sh4
-rwxr-xr-xt/t5526-fetch-submodules.sh545
-rwxr-xr-xt/t5534-push-signed.sh26
-rwxr-xr-xt/t5537-fetch-shallow.sh9
-rwxr-xr-xt/t5540-http-push-webdav.sh4
-rwxr-xr-xt/t5541-http-push-smart.sh32
-rwxr-xr-xt/t5543-atomic-push.sh14
-rwxr-xr-xt/t5547-push-quarantine.sh4
-rwxr-xr-xt/t5548-push-porcelain.sh2
-rwxr-xr-xt/t5550-http-fetch-dumb.sh25
-rwxr-xr-xt/t5571-pre-push-hook.sh128
-rwxr-xr-xt/t5601-clone.sh4
-rwxr-xr-xt/t5616-partial-clone.sh81
-rwxr-xr-xt/t5617-clone-submodules-remote.sh41
-rwxr-xr-xt/t5700-protocol-v1.sh15
-rwxr-xr-xt/t5702-protocol-v2.sh14
-rwxr-xr-xt/t6005-rev-list-count.sh43
-rwxr-xr-xt/t6007-rev-list-cherry-pick-file.sh26
-rwxr-xr-xt/t6012-rev-list-simplify.sh30
-rwxr-xr-xt/t6020-bundle-misc.sh98
-rwxr-xr-xt/t6030-bisect-porcelain.sh45
-rwxr-xr-xt/t6102-rev-list-unexpected-objects.sh13
-rwxr-xr-xt/t6111-rev-list-treesame.sh3
-rwxr-xr-xt/t6120-describe.sh127
-rwxr-xr-xt/t6404-recursive-merge.sh9
-rwxr-xr-xt/t6406-merge-attr.sh9
-rwxr-xr-xt/t6407-merge-binary.sh22
-rwxr-xr-xt/t6423-merge-rename-directories.sh10
-rwxr-xr-xt/t6428-merge-conflicts-sparse.sh23
-rwxr-xr-xt/t6429-merge-sequence-rename-caching.sh67
-rwxr-xr-xt/t6500-gc.sh22
-rwxr-xr-xt/t7001-mv.sh19
-rwxr-xr-xt/t7012-skip-worktree-writing.sh44
-rwxr-xr-xt/t7063-status-untracked-cache.sh21
-rwxr-xr-xt/t7102-reset.sh40
-rwxr-xr-xt/t7103-reset-bare.sh7
-rwxr-xr-xt/t7113-post-index-change-hook.sh7
-rwxr-xr-xt/t7406-submodule-update.sh26
-rwxr-xr-xt/t7408-submodule-reference.sh14
-rwxr-xr-xt/t7500-commit-template-squash-signoff.sh2
-rwxr-xr-xt/t7503-pre-commit-and-pre-merge-commit-hooks.sh150
-rwxr-xr-xt/t7504-commit-msg-hook.sh43
-rwxr-xr-xt/t7505-prepare-commit-msg-hook.sh43
-rwxr-xr-xt/t7508-status.sh30
-rwxr-xr-xt/t7519-status-fsmonitor.sh27
-rwxr-xr-xt/t7520-ignored-hook-warning.sh11
-rwxr-xr-xt/t7527-builtin-fsmonitor.sh609
-rwxr-xr-xt/t7700-repack.sh111
-rwxr-xr-xt/t7810-grep.sh186
-rwxr-xr-xt/t7812-grep-icase-non-ascii.sh16
-rwxr-xr-xt/t7814-grep-recurse-submodules.sh41
-rwxr-xr-xt/t7817-grep-sparse-checkout.sh11
-rwxr-xr-xt/t8007-cat-file-textconv.sh42
-rwxr-xr-xt/t9001-send-email.sh8
-rwxr-xr-xt/t9102-git-svn-deep-rmdir.sh1
-rwxr-xr-xt/t9123-git-svn-rebuild-with-rewriteroot.sh1
-rwxr-xr-xt/t9128-git-svn-cmd-branch.sh1
-rwxr-xr-xt/t9167-git-svn-cmd-branch-subproject.sh1
-rwxr-xr-xt/t9502-gitweb-standalone-parse-output.sh15
-rwxr-xr-xt/t9800-git-p4-basic.sh23
-rwxr-xr-xt/t9902-completion.sh236
-rw-r--r--t/test-lib-functions.sh174
-rw-r--r--t/test-lib.sh83
-rw-r--r--templates/Makefile8
-rw-r--r--tmp-objdir.c5
-rw-r--r--tmp-objdir.h6
-rw-r--r--trace.c80
-rw-r--r--trace.h128
-rw-r--r--trace2.c40
-rw-r--r--trace2.h25
-rw-r--r--trace2/tr2_tgt_event.c64
-rw-r--r--trace2/tr2_tgt_normal.c64
-rw-r--r--trace2/tr2_tgt_perf.c64
-rw-r--r--transport-helper.c3
-rw-r--r--transport.c37
-rw-r--r--transport.h14
-rw-r--r--tree-walk.c6
-rw-r--r--unpack-trees.c190
-rw-r--r--upload-pack.c20
-rw-r--r--urlmatch.c5
-rw-r--r--urlmatch.h1
-rw-r--r--usage.c15
-rw-r--r--userdiff.c48
-rw-r--r--worktree.c84
-rw-r--r--worktree.h21
-rw-r--r--wrapper.c164
-rw-r--r--write-or-die.c33
-rw-r--r--wt-status.c21
-rw-r--r--xdiff/xdiffi.c33
-rw-r--r--xdiff/xhistogram.c3
-rw-r--r--xdiff/xmerge.c42
-rw-r--r--xdiff/xpatience.c21
585 files changed, 21749 insertions, 7363 deletions
diff --git a/.gitignore b/.gitignore
index 054249b..e81de10 100644
--- a/.gitignore
+++ b/.gitignore
@@ -72,11 +72,13 @@
/git-format-patch
/git-fsck
/git-fsck-objects
+/git-fsmonitor--daemon
/git-gc
/git-get-tar-commit-id
/git-grep
/git-hash-object
/git-help
+/git-hook
/git-http-backend
/git-http-fetch
/git-http-push
diff --git a/.mailmap b/.mailmap
index 9c6a446..07db36a 100644
--- a/.mailmap
+++ b/.mailmap
@@ -59,8 +59,9 @@ David Reiss <dreiss@facebook.com> <dreiss@dreiss-vmware.(none)>
David S. Miller <davem@davemloft.net>
David Turner <novalis@novalis.org> <dturner@twopensource.com>
David Turner <novalis@novalis.org> <dturner@twosigma.com>
-Derrick Stolee <dstolee@microsoft.com> <stolee@gmail.com>
-Derrick Stolee <dstolee@microsoft.com> Derrick Stolee via GitGitGadget <gitgitgadget@gmail.com>
+Derrick Stolee <derrickstolee@github.com> <stolee@gmail.com>
+Derrick Stolee <derrickstolee@github.com> Derrick Stolee via GitGitGadget <gitgitgadget@gmail.com>
+Derrick Stolee <derrickstolee@github.com> <dstolee@microsoft.com>
Deskin Miller <deskinm@umich.edu>
Đoàn Trần Công Danh <congdanhqx@gmail.com> Doan Tran Cong Danh
Dirk Süsserott <newsletter@dirk.my1.cc>
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index 65651be..0215b1f 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -70,8 +70,8 @@ git@sfconservancy.org, or individually:
- Ævar Arnfjörð Bjarmason <avarab@gmail.com>
- Christian Couder <christian.couder@gmail.com>
- - Jeff King <peff@peff.net>
- Junio C Hamano <gitster@pobox.com>
+ - Taylor Blau <me@ttaylorr.com>
All complaints will be reviewed and investigated promptly and fairly.
diff --git a/Documentation/CodingGuidelines b/Documentation/CodingGuidelines
index 0e27b53..b20b2f9 100644
--- a/Documentation/CodingGuidelines
+++ b/Documentation/CodingGuidelines
@@ -26,6 +26,13 @@ code. For Git in general, a few rough rules are:
go and fix it up."
Cf. http://lkml.iu.edu/hypermail/linux/kernel/1001.3/01069.html
+ - Log messages to explain your changes are as important as the
+ changes themselves. Clearly written code and in-code comments
+ explain how the code works and what is assumed from the surrounding
+ context. The log messages explain what the changes wanted to
+ achieve and why the changes were necessary (more on this in the
+ accompanying SubmittingPatches document).
+
Make your code readable and sensible, and don't try to be clever.
As for more concrete guidelines, just imitate the existing code
@@ -210,6 +217,9 @@ For C programs:
. since mid 2017 with 512f41cf, we have been using designated
initializers for array (e.g. "int array[10] = { [5] = 2 }").
+ . since early 2021 with 765dc168882, we have been using variadic
+ macros, mostly for printf-like trace and debug macros.
+
These used to be forbidden, but we have not heard any breakage
report, and they are assumed to be safe.
@@ -217,7 +227,10 @@ For C programs:
the first statement (i.e. -Wdeclaration-after-statement).
- Declaring a variable in the for loop "for (int i = 0; i < 10; i++)"
- is still not allowed in this codebase.
+ is still not allowed in this codebase. We are in the process of
+ allowing it by waiting to see that 44ba10d6 (revision: use C99
+ declaration of variable in for() loop, 2021-11-14) does not get
+ complaints. Let's revisit this around November 2022.
- NULL pointers shall be written as NULL, not as 0.
diff --git a/Documentation/Makefile b/Documentation/Makefile
index ed656db..44c080e 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -1,3 +1,6 @@
+# Import tree-wide shared Makefile behavior and libraries
+include ../shared.mak
+
# Guard against environment variables
MAN1_TXT =
MAN5_TXT =
@@ -215,38 +218,6 @@ DEFAULT_EDITOR_SQ = $(subst ','\'',$(DEFAULT_EDITOR))
ASCIIDOC_EXTRA += -a 'git-default-editor=$(DEFAULT_EDITOR_SQ)'
endif
-QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir
-QUIET_SUBDIR1 =
-
-ifneq ($(findstring $(MAKEFLAGS),w),w)
-PRINT_DIR = --no-print-directory
-else # "make -w"
-NO_SUBDIR = :
-endif
-
-ifneq ($(findstring $(MAKEFLAGS),s),s)
-ifndef V
- QUIET = @
- QUIET_ASCIIDOC = @echo ' ' ASCIIDOC $@;
- QUIET_XMLTO = @echo ' ' XMLTO $@;
- QUIET_DB2TEXI = @echo ' ' DB2TEXI $@;
- QUIET_MAKEINFO = @echo ' ' MAKEINFO $@;
- QUIET_DBLATEX = @echo ' ' DBLATEX $@;
- QUIET_XSLTPROC = @echo ' ' XSLTPROC $@;
- QUIET_GEN = @echo ' ' GEN $@;
- QUIET_STDERR = 2> /dev/null
- QUIET_SUBDIR0 = +@subdir=
- QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \
- $(MAKE) $(PRINT_DIR) -C $$subdir
-
- QUIET_LINT_GITLINK = @echo ' ' LINT GITLINK $<;
- QUIET_LINT_MANSEC = @echo ' ' LINT MAN SEC $<;
- QUIET_LINT_MANEND = @echo ' ' LINT MAN END $<;
-
- export V
-endif
-endif
-
all: html man
html: $(DOC_HTML)
@@ -419,7 +390,7 @@ gitman.texi: $(MAN_XML) cat-texi.perl texi.xsl
$(RM) $@+
gitman.info: gitman.texi
- $(QUIET_MAKEINFO)$(MAKEINFO) --no-split --no-validate $*.texi
+ $(QUIET_MAKEINFO)$(MAKEINFO) --no-split --no-validate $<
$(patsubst %.txt,%.texi,$(MAN_TXT)): %.texi : %.xml
$(QUIET_DB2TEXI)$(DOCBOOK2X_TEXI) --to-stdout $*.xml >$@
@@ -463,25 +434,11 @@ quick-install-html: require-htmlrepo
print-man1:
@for i in $(MAN1_TXT); do echo $$i; done
-## Lint: Common
-.build:
- $(QUIET)mkdir $@
-.build/lint-docs: | .build
- $(QUIET)mkdir $@
-
## Lint: gitlink
-.build/lint-docs/gitlink: | .build/lint-docs
- $(QUIET)mkdir $@
-.build/lint-docs/gitlink/howto: | .build/lint-docs/gitlink
- $(QUIET)mkdir $@
-.build/lint-docs/gitlink/config: | .build/lint-docs/gitlink
- $(QUIET)mkdir $@
LINT_DOCS_GITLINK = $(patsubst %.txt,.build/lint-docs/gitlink/%.ok,$(HOWTO_TXT) $(DOC_DEP_TXT))
-$(LINT_DOCS_GITLINK): | .build/lint-docs/gitlink
-$(LINT_DOCS_GITLINK): | .build/lint-docs/gitlink/howto
-$(LINT_DOCS_GITLINK): | .build/lint-docs/gitlink/config
$(LINT_DOCS_GITLINK): lint-gitlink.perl
$(LINT_DOCS_GITLINK): .build/lint-docs/gitlink/%.ok: %.txt
+ $(call mkdir_p_parent_template)
$(QUIET_LINT_GITLINK)$(PERL_PATH) lint-gitlink.perl \
$< \
$(HOWTO_TXT) $(DOC_DEP_TXT) \
@@ -492,23 +449,18 @@ $(LINT_DOCS_GITLINK): .build/lint-docs/gitlink/%.ok: %.txt
lint-docs-gitlink: $(LINT_DOCS_GITLINK)
## Lint: man-end-blurb
-.build/lint-docs/man-end-blurb: | .build/lint-docs
- $(QUIET)mkdir $@
LINT_DOCS_MAN_END_BLURB = $(patsubst %.txt,.build/lint-docs/man-end-blurb/%.ok,$(MAN_TXT))
-$(LINT_DOCS_MAN_END_BLURB): | .build/lint-docs/man-end-blurb
$(LINT_DOCS_MAN_END_BLURB): lint-man-end-blurb.perl
$(LINT_DOCS_MAN_END_BLURB): .build/lint-docs/man-end-blurb/%.ok: %.txt
+ $(call mkdir_p_parent_template)
$(QUIET_LINT_MANEND)$(PERL_PATH) lint-man-end-blurb.perl $< >$@
.PHONY: lint-docs-man-end-blurb
-lint-docs-man-end-blurb: $(LINT_DOCS_MAN_END_BLURB)
## Lint: man-section-order
-.build/lint-docs/man-section-order: | .build/lint-docs
- $(QUIET)mkdir $@
LINT_DOCS_MAN_SECTION_ORDER = $(patsubst %.txt,.build/lint-docs/man-section-order/%.ok,$(MAN_TXT))
-$(LINT_DOCS_MAN_SECTION_ORDER): | .build/lint-docs/man-section-order
$(LINT_DOCS_MAN_SECTION_ORDER): lint-man-section-order.perl
$(LINT_DOCS_MAN_SECTION_ORDER): .build/lint-docs/man-section-order/%.ok: %.txt
+ $(call mkdir_p_parent_template)
$(QUIET_LINT_MANSEC)$(PERL_PATH) lint-man-section-order.perl $< >$@
.PHONY: lint-docs-man-section-order
lint-docs-man-section-order: $(LINT_DOCS_MAN_SECTION_ORDER)
@@ -524,7 +476,4 @@ doc-l10n install-l10n::
$(MAKE) -C po $@
endif
-# Delete the target file on error
-.DELETE_ON_ERROR:
-
.PHONY: FORCE
diff --git a/Documentation/MyFirstObjectWalk.txt b/Documentation/MyFirstObjectWalk.txt
index ca26794..8d9e855 100644
--- a/Documentation/MyFirstObjectWalk.txt
+++ b/Documentation/MyFirstObjectWalk.txt
@@ -522,24 +522,25 @@ function shows that the all-object walk is being performed by
`traverse_commit_list()` or `traverse_commit_list_filtered()`. Those two
functions reside in `list-objects.c`; examining the source shows that, despite
the name, these functions traverse all kinds of objects. Let's have a look at
-the arguments to `traverse_commit_list_filtered()`, which are a superset of the
-arguments to the unfiltered version.
+the arguments to `traverse_commit_list()`.
-- `struct list_objects_filter_options *filter_options`: This is a struct which
- stores a filter-spec as outlined in `Documentation/rev-list-options.txt`.
-- `struct rev_info *revs`: This is the `rev_info` used for the walk.
+- `struct rev_info *revs`: This is the `rev_info` used for the walk. If
+ its `filter` member is not `NULL`, then `filter` contains information for
+ how to filter the object list.
- `show_commit_fn show_commit`: A callback which will be used to handle each
individual commit object.
- `show_object_fn show_object`: A callback which will be used to handle each
non-commit object (so each blob, tree, or tag).
- `void *show_data`: A context buffer which is passed in turn to `show_commit`
and `show_object`.
+
+In addition, `traverse_commit_list_filtered()` has an additional paramter:
+
- `struct oidset *omitted`: A linked-list of object IDs which the provided
filter caused to be omitted.
-It looks like this `traverse_commit_list_filtered()` uses callbacks we provide
-instead of needing us to call it repeatedly ourselves. Cool! Let's add the
-callbacks first.
+It looks like these methods use callbacks we provide instead of needing us
+to call it repeatedly ourselves. Cool! Let's add the callbacks first.
For the sake of this tutorial, we'll simply keep track of how many of each kind
of object we find. At file scope in `builtin/walken.c` add the following
@@ -712,20 +713,9 @@ help understand. In our case, that means we omit trees and blobs not directly
referenced by `HEAD` or `HEAD`'s history, because we begin the walk with only
`HEAD` in the `pending` list.)
-First, we'll need to `#include "list-objects-filter-options.h"` and set up the
-`struct list_objects_filter_options` at the top of the function.
-
-----
-static void walken_object_walk(struct rev_info *rev)
-{
- struct list_objects_filter_options filter_options = { 0 };
-
- ...
-----
-
For now, we are not going to track the omitted objects, so we'll replace those
parameters with `NULL`. For the sake of simplicity, we'll add a simple
-build-time branch to use our filter or not. Replace the line calling
+build-time branch to use our filter or not. Preface the line calling
`traverse_commit_list()` with the following, which will remind us which kind of
walk we've just performed:
@@ -733,19 +723,17 @@ walk we've just performed:
if (0) {
/* Unfiltered: */
trace_printf(_("Unfiltered object walk.\n"));
- traverse_commit_list(rev, walken_show_commit,
- walken_show_object, NULL);
} else {
trace_printf(
_("Filtered object walk with filterspec 'tree:1'.\n"));
- parse_list_objects_filter(&filter_options, "tree:1");
-
- traverse_commit_list_filtered(&filter_options, rev,
- walken_show_commit, walken_show_object, NULL, NULL);
+ CALLOC_ARRAY(rev->filter, 1);
+ parse_list_objects_filter(rev->filter, "tree:1");
}
+ traverse_commit_list(rev, walken_show_commit,
+ walken_show_object, NULL);
----
-`struct list_objects_filter_options` is usually built directly from a command
+The `rev->filter` member is usually built directly from a command
line argument, so the module provides an easy way to build one from a string.
Even though we aren't taking user input right now, we can still build one with
a hardcoded string using `parse_list_objects_filter()`.
@@ -784,7 +772,7 @@ object:
----
...
- traverse_commit_list_filtered(&filter_options, rev,
+ traverse_commit_list_filtered(rev,
walken_show_commit, walken_show_object, NULL, &omitted);
...
diff --git a/Documentation/RelNotes/2.36.0.txt b/Documentation/RelNotes/2.36.0.txt
new file mode 100644
index 0000000..8da5b2e
--- /dev/null
+++ b/Documentation/RelNotes/2.36.0.txt
@@ -0,0 +1,430 @@
+Git 2.36 Release Notes
+======================
+
+Updates since Git 2.35
+----------------------
+
+Backward compatibility warts
+
+ * "git name-rev --stdin" has been deprecated and issues a warning
+ when used; use "git name-rev --annotate-stdin" instead.
+
+ * "git clone --filter=... --recurse-submodules" only makes the
+ top-level a partial clone, while submodules are fully cloned. This
+ behaviour is changed to pass the same filter down to the submodules.
+
+
+Note to those who build from the source
+
+ * Since Git 2.31, our source assumed that the compiler you use to
+ build Git supports variadic macros, with an easy-to-use escape
+ hatch to allow compilation without variadic macros with an request
+ to report that you had to use the escape hatch to the list.
+ Because we haven't heard from anybody who actually needed to use
+ the escape hatch, it has been removed, making support of variadic
+ macros a hard requirement.
+
+
+UI, Workflows & Features
+
+ * Assorted updates to "git cat-file", especially "-h".
+
+ * The command line completion (in contrib/) learns to complete
+ arguments to give to "git sparse-checkout" command.
+
+ * "git log --remerge-diff" shows the difference from mechanical merge
+ result and the result that is actually recorded in a merge commit.
+
+ * "git log" and friends learned an option --exclude-first-parent-only
+ to propagate UNINTERESTING bit down only along the first-parent
+ chain, just like --first-parent option shows commits that lack the
+ UNINTERESTING bit only along the first-parent chain.
+
+ * The command line completion script (in contrib/) learned to
+ complete all Git subcommands, including the ones that are normally
+ hidden, when GIT_COMPLETION_SHOW_ALL_COMMANDS is used.
+
+ * "git branch" learned the "--recurse-submodules" option.
+
+ * A not-so-common mistake is to write a script to feed "git bisect
+ run" without making it executable, in which case all tests will
+ exit with 126 or 127 error codes, even on revisions that are marked
+ as good. Try to recognize this situation and stop iteration early.
+
+ * When "index-pack" dies due to incoming data exceeding the maximum
+ allowed input size, include the value of the limit in the error
+ message.
+
+ * The error message given by "git switch HEAD~4" has been clarified
+ to suggest the "--detach" option that is required.
+
+ * In sparse-checkouts, files mis-marked as missing from the working tree
+ could lead to later problems. Such files were hard to discover, and
+ harder to correct. Automatically detecting and correcting the marking
+ of such files has been added to avoid these problems.
+
+ * "git cat-file" learns "--batch-command" mode, which is a more
+ flexible interface than the existing "--batch" or "--batch-check"
+ modes, to allow different kinds of inquiries made.
+
+ * The level of verbose output from the ort backend during inner merge
+ has been aligned to that of the recursive backend.
+
+ * "git remote rename A B", depending on the number of remote-tracking
+ refs involved, takes long time renaming them. The command has been
+ taught to show progress bar while making the user wait.
+
+ * Bundle file format gets extended to allow a partial bundle,
+ filtered by similar criteria you would give when making a
+ partial/lazy clone.
+
+ * A new built-in userdiff driver for kotlin has been added.
+
+ * "git repack" learned a new configuration to disable triggering of
+ age-old "update-server-info" command, which is rarely useful these
+ days.
+
+ * "git stash" does not allow subcommands it internally runs as its
+ implementation detail, except for "git reset", to emit messages;
+ now "git reset" part has also been squelched.
+
+ * "git ls-tree" learns "--oid-only" option, similar to "--name-only",
+ and more generalized "--format" option.
+
+ * "git fetch --refetch" learned to fetch everything without telling
+ the other side what we already have, which is useful when you
+ cannot trust what you have in the local object store.
+
+ * "git branch" gives hint when branch tracking cannot be established
+ because fetch refspecs from multiple remote repositories overlap.
+
+ * "git worktree list --porcelain" did not c-quote pathnames and lock
+ reasons with unsafe bytes correctly, which is worked around by
+ introducing NUL terminated output format with "-z".
+
+
+Performance, Internal Implementation, Development Support etc.
+
+ * "git apply" (ab)used the util pointer of the string-list to keep
+ track of how each symbolic link needs to be handled, which has been
+ simplified by using strset.
+
+ * Fix a hand-rolled alloca() imitation that may have violated
+ alignment requirement of data being sorted in compatibility
+ implementation of qsort_s() and stable qsort().
+
+ * Use the parse-options API in "git reflog" command.
+
+ * The conditional inclusion mechanism of configuration files using
+ "[includeIf <condition>]" learns to base its decision on the
+ URL of the remote repository the repository interacts with.
+ (merge 399b198489 jt/conditional-config-on-remote-url later to maint).
+
+ * "git name-rev --stdin" does not behave like usual "--stdin" at
+ all. Start the process of renaming it to "--annotate-stdin".
+ (merge a2585719b3 jc/name-rev-stdin later to maint).
+
+ * "git update-index", "git checkout-index", and "git clean" are
+ taught to work better with the sparse checkout feature.
+
+ * Use an internal call to reset_head() helper function instead of
+ spawning "git checkout" in "rebase", and update code paths that are
+ involved in the change.
+
+ * Messages "ort" merge backend prepares while dealing with conflicted
+ paths were unnecessarily confusing since it did not differentiate
+ inner merges and outer merges.
+
+ * Small modernization of the rerere-train script (in contrib/).
+
+ * Use designated initializers we started using in mid 2017 in more
+ parts of the codebase that are relatively quiescent.
+
+ * Improve failure case behaviour of xdiff library when memory
+ allocation fails.
+
+ * General clean-up in reftable implementation, including
+ clarification of the API documentation, tightening the code to
+ honor documented length limit, etc.
+
+ * Remove the escape hatch we added when we introduced the weather
+ balloon to use variadic macros unconditionally, to make it official
+ that we now have a hard dependency on the feature.
+
+ * Makefile refactoring with a bit of suffixes rule stripping to
+ optimize the runtime overhead.
+
+ * "git stash drop" is reimplemented as an internal call to
+ reflog_delete() function, instead of invoking "git reflog delete"
+ via run_command() API.
+
+ * Count string_list items in size_t, not "unsigned int".
+
+ * The single-key interactive operation used by "git add -p" has been
+ made more robust.
+
+ * Remove unneeded <meta http-equiv=content-type...> from gitweb
+ output.
+
+ * "git name-rev" learned to use the generation numbers when setting
+ the lower bound of searching commits used to explain the revision,
+ when available, instead of committer time.
+
+ * Replace core.fsyncObjectFiles with two new configuration variables,
+ core.fsync and core.fsyncMethod.
+
+ * Updates to refs traditionally weren't fsync'ed, but we can
+ configure using core.fsync variable to do so.
+
+ * "git reflog" command now uses parse-options API to parse its
+ command line options.
+
+
+Fixes since v2.35
+-----------------
+
+ * "rebase" and "stash" in secondary worktrees are broken in
+ Git 2.35.0, which has been corrected.
+
+ * "git pull --rebase" ignored the rebase.autostash configuration
+ variable when the remote history is a descendant of our history,
+ which has been corrected.
+ (merge 3013d98d7a pb/pull-rebase-autostash-fix later to maint).
+
+ * "git update-index --refresh" has been taught to deal better with
+ racy timestamps (just like "git status" already does).
+ (merge 2ede073fd2 ms/update-index-racy later to maint).
+
+ * Avoid tests that are run under GIT_TRACE2 set from failing
+ unnecessarily.
+ (merge 944d808e42 js/test-unset-trace2-parents later to maint).
+
+ * The merge-ort misbehaved when merge.renameLimit configuration is
+ set too low and failed to find all renames.
+ (merge 9ae39fef7f en/merge-ort-restart-optim-fix later to maint).
+
+ * We explain that revs come first before the pathspec among command
+ line arguments, but did not spell out that dashed options come
+ before other args, which has been corrected.
+ (merge c11f95010c tl/doc-cli-options-first later to maint).
+
+ * "git add -p" rewritten in C regressed hunk splitting in some cases,
+ which has been corrected.
+ (merge 7008ddc645 pw/add-p-hunk-split-fix later to maint).
+
+ * "git fetch --negotiate-only" is an internal command used by "git
+ push" to figure out which part of our history is missing from the
+ other side. It should never recurse into submodules even when
+ fetch.recursesubmodules configuration variable is set, nor it
+ should trigger "gc". The code has been tightened up to ensure it
+ only does common ancestry discovery and nothing else.
+ (merge de4eaae63a gc/fetch-negotiate-only-early-return later to maint).
+
+ * The code path that verifies signatures made with ssh were made to
+ work better on a system with CRLF line endings.
+ (merge caeef01ea7 fs/ssh-signing-crlf later to maint).
+
+ * "git sparse-checkout init" failed to write into $GIT_DIR/info
+ directory when the repository was created without one, which has
+ been corrected to auto-create it.
+ (merge 7f44842ac1 jt/sparse-checkout-leading-dir-fix later to maint).
+
+ * Cloning from a repository that does not yet have any branches or
+ tags but has other refs resulted in a "remote transport reported
+ error", which has been corrected.
+ (merge dccea605b6 jt/clone-not-quite-empty later to maint).
+
+ * Mark in various places in the code that the sparse index and the
+ split index features are mutually incompatible.
+ (merge 451b66c533 js/sparse-vs-split-index later to maint).
+
+ * Update the logic to compute alignment requirement for our mem-pool.
+ (merge e38bcc66d8 jc/mem-pool-alignment later to maint).
+
+ * Pick a better random number generator and use it when we prepare
+ temporary filenames.
+ (merge 47efda967c bc/csprng-mktemps later to maint).
+
+ * Update the contributor-facing documents on proposed log messages.
+ (merge cdba0295b0 jc/doc-log-messages later to maint).
+
+ * When "git fetch --prune" failed to prune the refs it wanted to
+ prune, the command issued error messages but exited with exit
+ status 0, which has been corrected.
+ (merge c9e04d905e tg/fetch-prune-exit-code-fix later to maint).
+
+ * Problems identified by Coverity in the reftable code have been
+ corrected.
+ (merge 01033de49f hn/reftable-coverity-fixes later to maint).
+
+ * A bug that made multi-pack bitmap and the object order out-of-sync,
+ making the .midx data corrupt, has been fixed.
+ (merge f8b60cf99b tb/midx-bitmap-corruption-fix later to maint).
+
+ * The build procedure has been taught to notice older version of zlib
+ and enable our replacement uncompress2() automatically.
+ (merge 07564773c2 ab/auto-detect-zlib-compress2 later to maint).
+
+ * Interaction between fetch.negotiationAlgorithm and
+ feature.experimental configuration variables has been corrected.
+ (merge 714edc620c en/fetch-negotiation-default-fix later to maint).
+
+ * "git diff --diff-filter=aR" is now parsed correctly.
+ (merge 75408ca949 js/diff-filter-negation-fix later to maint).
+
+ * When "git subtree" wants to create a merge, it used "git merge" and
+ let it be affected by end-user's "merge.ff" configuration, which
+ has been corrected.
+ (merge 9158a3564a tk/subtree-merge-not-ff-only later to maint).
+
+ * Unlike "git apply", "git patch-id" did not handle patches with
+ hunks that has only 1 line in either preimage or postimage, which
+ has been corrected.
+ (merge 757e75c81e jz/patch-id-hunk-header-parsing-fix later to maint).
+
+ * "receive-pack" checks if it will do any ref updates (various
+ conditions could reject a push) before received objects are taken
+ out of the temporary directory used for quarantine purposes, so
+ that a push that is known-to-fail will not leave crufts that a
+ future "gc" needs to clean up.
+ (merge 5407764069 cb/clear-quarantine-early-on-all-ref-update-errors later to maint).
+
+ * Because a deletion of ref would need to remove it from both the
+ loose ref store and the packed ref store, a delete-ref operation
+ that logically removes one ref may end up invoking ref-transaction
+ hook twice, which has been corrected.
+ (merge 2ed1b64ebd ps/avoid-unnecessary-hook-invocation-with-packed-refs later to maint).
+
+ * When there is no object to write .bitmap file for, "git
+ multi-pack-index" triggered an error, instead of just skipping,
+ which has been corrected.
+ (merge eb57277ba3 tb/midx-no-bitmap-for-no-objects later to maint).
+
+ * "git cmd -h" outside a repository should error out cleanly for many
+ commands, but instead it hit a BUG(), which has been corrected.
+ (merge 87ad07d735 js/short-help-outside-repo-fix later to maint).
+
+ * "working tree" and "per-worktree ref" were in glossary, but
+ "worktree" itself wasn't, which has been corrected.
+ (merge 2df5387ed0 jc/glossary-worktree later to maint).
+
+ * L10n support for a few error messages.
+ (merge 3d3c23b3a7 bs/forbid-i18n-of-protocol-token-in-fetch-pack later to maint).
+
+ * Test modernization.
+ (merge d4fe066e4b sy/t0001-use-path-is-helper later to maint).
+
+ * "git log --graph --graph" used to leak a graph structure, and there
+ was no way to countermand "--graph" that appear earlier on the
+ command line. A "--no-graph" option has been added and resource
+ leakage has been plugged.
+
+ * Error output given in response to an ambiguous object name has been
+ improved.
+ (merge 3a73c1dfaf ab/ambiguous-object-name later to maint).
+
+ * "git sparse-checkout" wants to work with per-worktree configuration,
+ but did not work well in a worktree attached to a bare repository.
+ (merge 3ce1138272 ds/sparse-checkout-requires-per-worktree-config later to maint).
+
+ * Setting core.untrackedCache to true failed to add the untracked
+ cache extension to the index.
+
+ * Workaround we have for versions of PCRE2 before their version 10.36
+ were in effect only for their versions newer than 10.36 by mistake,
+ which has been corrected.
+ (merge 97169fc361 rs/pcre-invalid-utf8-fix-fix later to maint).
+
+ * Document Taylor as a new member of Git PLC at SFC. Welcome.
+ (merge e8d56ca863 tb/coc-plc-update later to maint).
+
+ * "git checkout -b branch/with/multi/level/name && git stash" only
+ recorded the last level component of the branch name, which has
+ been corrected.
+
+ * "git fetch" can make two separate fetches, but ref updates coming
+ from them were in two separate ref transactions under "--atomic",
+ which has been corrected.
+
+ * Check the return value from parse_tree_indirect() to turn segfaults
+ into calls to die().
+ (merge 8d2eaf649a gc/parse-tree-indirect-errors later to maint).
+
+ * Newer version of GPGSM changed its output in a backward
+ incompatible way to break our code that parses its output. It also
+ added more processes our tests need to kill when cleaning up.
+ Adjustments have been made to accommodate these changes.
+ (merge b0b70d54c4 fs/gpgsm-update later to maint).
+
+ * The untracked cache newly computed weren't written back to the
+ on-disk index file when there is no other change to the index,
+ which has been corrected.
+
+ * "git config -h" did not describe the "--type" option correctly.
+ (merge 5445124fad mf/fix-type-in-config-h later to maint).
+
+ * The way generation number v2 in the commit-graph files are
+ (not) handled has been corrected.
+ (merge 6dbf4b8172 ds/commit-graph-gen-v2-fixes later to maint).
+
+ * The method to trigger malloc check used in our tests no longer work
+ with newer versions of glibc.
+ (merge baedc59543 ep/test-malloc-check-with-glibc-2.34 later to maint).
+
+ * When "git fetch --recurse-submodules" grabbed submodule commits
+ that would be needed to recursively check out newly fetched commits
+ in the superproject, it only paid attention to submodules that are
+ in the current checkout of the superproject. We now do so for all
+ submodules that have been run "git submodule init" on.
+
+ * "git rebase $base $non_branch_commit", when $base is an ancestor or
+ the $non_branch_commit, modified the current branch, which has been
+ corrected.
+
+ * When "shallow" information is updated, we forgot to update the
+ in-core equivalent, which has been corrected.
+
+ * When creating a loose object file, we didn't report the exact
+ filename of the file we failed to fsync, even though the
+ information was readily available, which has been corrected.
+
+ * "git am" can read from the standard input when no mailbox is given
+ on the command line, but the end-user gets no indication when it
+ happens, making Git appear stuck.
+ (merge 7b20af6a06 jc/mailsplit-warn-on-tty later to maint).
+
+ * "git mv" failed to refresh the cached stat information for the
+ entry it moved.
+ (merge b7f9130a06 vd/mv-refresh-stat later to maint).
+
+ * Other code cleanup, docfix, build fix, etc.
+ (merge cfc5cf428b jc/find-header later to maint).
+ (merge 40e7cfdd46 jh/p4-fix-use-of-process-error-exception later to maint).
+ (merge 727e6ea350 jh/p4-spawning-external-commands-cleanup later to maint).
+ (merge 0a6adc26e2 rs/grep-expr-cleanup later to maint).
+ (merge 4ed7dfa713 po/readme-mention-contributor-hints later to maint).
+ (merge 6046f7a91c en/plug-leaks-in-merge later to maint).
+ (merge 8c591dbfce bc/clarify-eol-attr later to maint).
+ (merge 518e15db74 rs/parse-options-lithelp-help later to maint).
+ (merge cbac0076ef gh/doc-typos later to maint).
+ (merge ce14de03db ab/no-errno-from-resolve-ref-unsafe later to maint).
+ (merge 2826ffad8c rc/negotiate-only-typofix later to maint).
+ (merge 0f03f04c5c en/sparse-checkout-leakfix later to maint).
+ (merge 74f3390dde sy/diff-usage-typofix later to maint).
+ (merge 45d0212a71 ll/doc-mktree-typofix later to maint).
+ (merge e9b272e4c1 js/no-more-legacy-stash later to maint).
+ (merge 6798b08e84 ab/do-not-hide-failures-in-git-dot-pm later to maint).
+ (merge 9325285df4 po/doc-check-ignore-markup-fix later to maint).
+ (merge cd26cd6c7c sy/modernize-t-lib-read-tree-m-3way later to maint).
+ (merge d17294a05e ab/hash-object-leakfix later to maint).
+ (merge b8403129d3 jd/t0015-modernize later to maint).
+ (merge 332acc248d ds/mailmap later to maint).
+ (merge 04bf052eef ab/grep-patterntype later to maint).
+ (merge 6ee36364eb ab/diff-free-more later to maint).
+ (merge 63a36017fe nj/read-tree-doc-reffix later to maint).
+ (merge eed36fce38 sm/no-git-in-upstream-of-pipe-in-tests later to maint).
+ (merge c614beb933 ep/t6423-modernize later to maint).
+ (merge 57be9c6dee ab/reflog-prep-fix later to maint).
+ (merge 5327d8982a js/in-place-reverse-in-sequencer later to maint).
+ (merge 2e2c0be51e dp/worktree-repair-in-usage later to maint).
+ (merge 6563706568 jc/coding-guidelines-decl-in-for-loop later to maint).
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 92b80d9..a6121d1 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -110,6 +110,35 @@ run `git diff --check` on your changes before you commit.
[[describe-changes]]
=== Describe your changes well.
+The log message that explains your changes is just as important as the
+changes themselves. Your code may be clearly written with in-code
+comment to sufficiently explain how it works with the surrounding
+code, but those who need to fix or enhance your code in the future
+will need to know _why_ your code does what it does, for a few
+reasons:
+
+. Your code may be doing something differently from what you wanted it
+ to do. Writing down what you actually wanted to achieve will help
+ them fix your code and make it do what it should have been doing
+ (also, you often discover your own bugs yourself, while writing the
+ log message to summarize the thought behind it).
+
+. Your code may be doing things that were only necessary for your
+ immediate needs (e.g. "do X to directories" without implementing or
+ even designing what is to be done on files). Writing down why you
+ excluded what the code does not do will help guide future developers.
+ Writing down "we do X to directories, because directories have
+ characteristic Y" would help them infer "oh, files also have the same
+ characteristic Y, so perhaps doing X to them would also make sense?".
+ Saying "we don't do the same X to files, because ..." will help them
+ decide if the reasoning is sound (in which case they do not waste
+ time extending your code to cover files), or reason differently (in
+ which case, they can explain why they extend your code to cover
+ files, too).
+
+The goal of your log message is to convey the _why_ behind your
+change to help future developers.
+
The first line of the commit message should be a short description (50
characters is the soft limit, see DISCUSSION in linkgit:git-commit[1]),
and should skip the full stop. It is also conventional in most cases to
@@ -142,6 +171,13 @@ The body should provide a meaningful commit message, which:
. alternate solutions considered but discarded, if any.
+[[present-tense]]
+The problem statement that describes the status quo is written in the
+present tense. Write "The code does X when it is given input Y",
+instead of "The code used to do Y when given input X". You do not
+have to say "Currently"---the status quo in the problem statement is
+about the code _without_ your change, by project convention.
+
[[imperative-mood]]
Describe your changes in imperative mood, e.g. "make xyzzy do frotz"
instead of "[This patch] makes xyzzy do frotz" or "[I] changed xyzzy
diff --git a/Documentation/config.txt b/Documentation/config.txt
index 8d3f518..e284b04 100644
--- a/Documentation/config.txt
+++ b/Documentation/config.txt
@@ -159,6 +159,33 @@ all branches that begin with `foo/`. This is useful if your branches are
organized hierarchically and you would like to apply a configuration to
all the branches in that hierarchy.
+`hasconfig:remote.*.url:`::
+ The data that follows this keyword is taken to
+ be a pattern with standard globbing wildcards and two
+ additional ones, `**/` and `/**`, that can match multiple
+ components. The first time this keyword is seen, the rest of
+ the config files will be scanned for remote URLs (without
+ applying any values). If there exists at least one remote URL
+ that matches this pattern, the include condition is met.
++
+Files included by this option (directly or indirectly) are not allowed
+to contain remote URLs.
++
+Note that unlike other includeIf conditions, resolving this condition
+relies on information that is not yet known at the point of reading the
+condition. A typical use case is this option being present as a
+system-level or global-level config, and the remote URL being in a
+local-level config; hence the need to scan ahead when resolving this
+condition. In order to avoid the chicken-and-egg problem in which
+potentially-included files can affect whether such files are potentially
+included, Git breaks the cycle by prohibiting these files from affecting
+the resolution of these conditions (thus, prohibiting them from
+declaring remote URLs).
++
+As for the naming of this keyword, it is for forwards compatibiliy with
+a naming scheme that supports more variable-based include conditions,
+but currently Git only supports the exact keyword described above.
+
A few more notes on matching via `gitdir` and `gitdir/i`:
* Symlinks in `$GIT_DIR` are not resolved before matching.
@@ -226,6 +253,14 @@ Example
; currently checked out
[includeIf "onbranch:foo-branch"]
path = foo.inc
+
+; include only if a remote with the given URL exists (note
+; that such a URL may be provided later in a file or in a
+; file read after this file is read, as seen in this example)
+[includeIf "hasconfig:remote.*.url:https://example.com/**"]
+ path = foo.inc
+[remote "origin"]
+ url = https://example.com/git
----
Values
@@ -460,8 +495,6 @@ include::config/repack.txt[]
include::config/rerere.txt[]
-include::config/reset.txt[]
-
include::config/safe.txt[]
include::config/sendemail.txt[]
@@ -470,6 +503,8 @@ include::config/sequencer.txt[]
include::config/showbranch.txt[]
+include::config/sparse.txt[]
+
include::config/splitindex.txt[]
include::config/ssh.txt[]
diff --git a/Documentation/config/advice.txt b/Documentation/config/advice.txt
index 063eec2..a00d010 100644
--- a/Documentation/config/advice.txt
+++ b/Documentation/config/advice.txt
@@ -4,6 +4,10 @@ advice.*::
can tell Git that you do not need help by setting these to 'false':
+
--
+ ambiguousFetchRefspec::
+ Advice shown when fetch refspec for multiple remotes map to
+ the same remote-tracking branch namespace and causes branch
+ tracking set-up to fail.
fetchShowForcedUpdates::
Advice shown when linkgit:git-fetch[1] takes a long time
to calculate forced updates after ref updates, or to warn
@@ -67,10 +71,10 @@ advice.*::
commitBeforeMerge::
Advice shown when linkgit:git-merge[1] refuses to
merge to avoid overwriting local changes.
- resetQuiet::
- Advice to consider using the `--quiet` option to linkgit:git-reset[1]
- when the command takes more than 2 seconds to enumerate unstaged
- changes after reset.
+ resetNoRefresh::
+ Advice to consider using the `--no-refresh` option to
+ linkgit:git-reset[1] when the command takes more than 2 seconds
+ to refresh the index after reset.
resolveConflict::
Advice shown by various commands when conflicts
prevent the operation from being performed.
@@ -85,6 +89,9 @@ advice.*::
linkgit:git-switch[1] or linkgit:git-checkout[1]
to move to the detach HEAD state, to instruct how to
create a local branch after the fact.
+ suggestDetachingHead::
+ Advice shown when linkgit:git-switch[1] refuses to detach HEAD
+ without the explicit `--detach` option.
checkoutAmbiguousRemoteBranchName::
Advice shown when the argument to
linkgit:git-checkout[1] and linkgit:git-switch[1]
@@ -116,6 +123,9 @@ advice.*::
submoduleAlternateErrorStrategyDie::
Advice shown when a submodule.alternateErrorStrategy option
configured to "die" causes a fatal error.
+ submodulesNotUpdated::
+ Advice shown when a user runs a submodule command that fails
+ because `git submodule update --init` was not run.
addIgnoredFile::
Advice shown if a user attempts to add an ignored file to
the index.
diff --git a/Documentation/config/clone.txt b/Documentation/config/clone.txt
index 7bcfbd1..26f4fb1 100644
--- a/Documentation/config/clone.txt
+++ b/Documentation/config/clone.txt
@@ -6,3 +6,8 @@ clone.defaultRemoteName::
clone.rejectShallow::
Reject to clone a repository if it is a shallow one, can be overridden by
passing option `--reject-shallow` in command line. See linkgit:git-clone[1]
+
+clone.filterSubmodules::
+ If a partial clone filter is provided (see `--filter` in
+ linkgit:git-rev-list[1]) and `--recurse-submodules` is used, also apply
+ the filter to submodules.
diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt
index c04f62a..e67392c 100644
--- a/Documentation/config/core.txt
+++ b/Documentation/config/core.txt
@@ -62,22 +62,54 @@ core.protectNTFS::
Defaults to `true` on Windows, and `false` elsewhere.
core.fsmonitor::
- If set, the value of this variable is used as a command which
- will identify all files that may have changed since the
- requested date/time. This information is used to speed up git by
- avoiding unnecessary processing of files that have not changed.
- See the "fsmonitor-watchman" section of linkgit:githooks[5].
+ If set to true, enable the built-in file system monitor
+ daemon for this working directory (linkgit:git-fsmonitor{litdd}daemon[1]).
++
+Like hook-based file system monitors, the built-in file system monitor
+can speed up Git commands that need to refresh the Git index
+(e.g. `git status`) in a working directory with many files. The
+built-in monitor eliminates the need to install and maintain an
+external third-party tool.
++
+The built-in file system monitor is currently available only on a
+limited set of supported platforms. Currently, this includes Windows
+and MacOS.
++
+ Otherwise, this variable contains the pathname of the "fsmonitor"
+ hook command.
++
+This hook command is used to identify all files that may have changed
+since the requested date/time. This information is used to speed up
+git by avoiding unnecessary scanning of files that have not changed.
++
+See the "fsmonitor-watchman" section of linkgit:githooks[5].
++
+Note that if you concurrently use multiple versions of Git, such
+as one version on the command line and another version in an IDE
+tool, that the definition of `core.fsmonitor` was extended to
+allow boolean values in addition to hook pathnames. Git versions
+2.35.1 and prior will not understand the boolean values and will
+consider the "true" or "false" values as hook pathnames to be
+invoked. Git versions 2.26 thru 2.35.1 default to hook protocol
+V2 and will fall back to no fsmonitor (full scan). Git versions
+prior to 2.26 default to hook protocol V1 and will silently
+assume there were no changes to report (no scan), so status
+commands may report incomplete results. For this reason, it is
+best to upgrade all of your Git versions before using the built-in
+file system monitor.
core.fsmonitorHookVersion::
- Sets the version of hook that is to be used when calling fsmonitor.
- There are currently versions 1 and 2. When this is not set,
- version 2 will be tried first and if it fails then version 1
- will be tried. Version 1 uses a timestamp as input to determine
- which files have changes since that time but some monitors
- like watchman have race conditions when used with a timestamp.
- Version 2 uses an opaque string so that the monitor can return
- something that can be used to determine what files have changed
- without race conditions.
+ Sets the protocol version to be used when invoking the
+ "fsmonitor" hook.
++
+There are currently versions 1 and 2. When this is not set,
+version 2 will be tried first and if it fails then version 1
+will be tried. Version 1 uses a timestamp as input to determine
+which files have changes since that time but some monitors
+like Watchman have race conditions when used with a timestamp.
+Version 2 uses an opaque string so that the monitor can return
+something that can be used to determine what files have changed
+without race conditions.
core.trustctime::
If false, the ctime differences between the index and the
@@ -547,13 +579,64 @@ core.whitespace::
is relevant for `indent-with-non-tab` and when Git fixes `tab-in-indent`
errors. The default tab width is 8. Allowed values are 1 to 63.
+core.fsync::
+ A comma-separated list of components of the repository that
+ should be hardened via the core.fsyncMethod when created or
+ modified. You can disable hardening of any component by
+ prefixing it with a '-'. Items that are not hardened may be
+ lost in the event of an unclean system shutdown. Unless you
+ have special requirements, it is recommended that you leave
+ this option empty or pick one of `committed`, `added`,
+ or `all`.
++
+When this configuration is encountered, the set of components starts with
+the platform default value, disabled components are removed, and additional
+components are added. `none` resets the state so that the platform default
+is ignored.
++
+The empty string resets the fsync configuration to the platform
+default. The default on most platforms is equivalent to
+`core.fsync=committed,-loose-object`, which has good performance,
+but risks losing recent work in the event of an unclean system shutdown.
++
+* `none` clears the set of fsynced components.
+* `loose-object` hardens objects added to the repo in loose-object form.
+* `pack` hardens objects added to the repo in packfile form.
+* `pack-metadata` hardens packfile bitmaps and indexes.
+* `commit-graph` hardens the commit graph file.
+* `index` hardens the index when it is modified.
+* `objects` is an aggregate option that is equivalent to
+ `loose-object,pack`.
+* `reference` hardens references modified in the repo.
+* `derived-metadata` is an aggregate option that is equivalent to
+ `pack-metadata,commit-graph`.
+* `committed` is an aggregate option that is currently equivalent to
+ `objects`. This mode sacrifices some performance to ensure that work
+ that is committed to the repository with `git commit` or similar commands
+ is hardened.
+* `added` is an aggregate option that is currently equivalent to
+ `committed,index`. This mode sacrifices additional performance to
+ ensure that the results of commands like `git add` and similar operations
+ are hardened.
+* `all` is an aggregate option that syncs all individual components above.
+
+core.fsyncMethod::
+ A value indicating the strategy Git will use to harden repository data
+ using fsync and related primitives.
++
+* `fsync` uses the fsync() system call or platform equivalents.
+* `writeout-only` issues pagecache writeback requests, but depending on the
+ filesystem and storage hardware, data added to the repository may not be
+ durable in the event of a system crash. This is the default mode on macOS.
+
core.fsyncObjectFiles::
This boolean will enable 'fsync()' when writing object files.
+ This setting is deprecated. Use core.fsync instead.
+
-This is a total waste of time and effort on a filesystem that orders
-data writes properly, but can be useful for filesystems that do not use
-journalling (traditional UNIX filesystems) or that only journal metadata
-and not file contents (OS X's HFS+, or Linux ext3 with "data=writeback").
+This setting affects data added to the Git repository in loose-object
+form. When set to true, Git will issue an fsync or similar system call
+to flush caches so that loose-objects remain consistent in the face
+of a unclean system shutdown.
core.preloadIndex::
Enable parallel index preload for operations like 'git diff'
diff --git a/Documentation/config/extensions.txt b/Documentation/config/extensions.txt
index 4e23d73..bccaec7 100644
--- a/Documentation/config/extensions.txt
+++ b/Documentation/config/extensions.txt
@@ -6,3 +6,34 @@ extensions.objectFormat::
Note that this setting should only be set by linkgit:git-init[1] or
linkgit:git-clone[1]. Trying to change it after initialization will not
work and will produce hard-to-diagnose issues.
+
+extensions.worktreeConfig::
+ If enabled, then worktrees will load config settings from the
+ `$GIT_DIR/config.worktree` file in addition to the
+ `$GIT_COMMON_DIR/config` file. Note that `$GIT_COMMON_DIR` and
+ `$GIT_DIR` are the same for the main working tree, while other
+ working trees have `$GIT_DIR` equal to
+ `$GIT_COMMON_DIR/worktrees/<id>/`. The settings in the
+ `config.worktree` file will override settings from any other
+ config files.
++
+When enabling `extensions.worktreeConfig`, you must be careful to move
+certain values from the common config file to the main working tree's
+`config.worktree` file, if present:
++
+* `core.worktree` must be moved from `$GIT_COMMON_DIR/config` to
+ `$GIT_COMMON_DIR/config.worktree`.
+* If `core.bare` is true, then it must be moved from `$GIT_COMMON_DIR/config`
+ to `$GIT_COMMON_DIR/config.worktree`.
++
+It may also be beneficial to adjust the locations of `core.sparseCheckout`
+and `core.sparseCheckoutCone` depending on your desire for customizable
+sparse-checkout settings for each worktree. By default, the `git
+sparse-checkout` builtin enables `extensions.worktreeConfig`, assigns
+these config values on a per-worktree basis, and uses the
+`$GIT_DIR/info/sparse-checkout` file to specify the sparsity for each
+worktree independently. See linkgit:git-sparse-checkout[1] for more
+details.
++
+For historical reasons, `extensions.worktreeConfig` is respected
+regardless of the `core.repositoryFormatVersion` setting.
diff --git a/Documentation/config/fetch.txt b/Documentation/config/fetch.txt
index 63748c0..cd65d23 100644
--- a/Documentation/config/fetch.txt
+++ b/Documentation/config/fetch.txt
@@ -56,18 +56,19 @@ fetch.output::
OUTPUT in linkgit:git-fetch[1] for detail.
fetch.negotiationAlgorithm::
- Control how information about the commits in the local repository is
- sent when negotiating the contents of the packfile to be sent by the
- server. Set to "skipping" to use an algorithm that skips commits in an
- effort to converge faster, but may result in a larger-than-necessary
- packfile; or set to "noop" to not send any information at all, which
- will almost certainly result in a larger-than-necessary packfile, but
- will skip the negotiation step.
- The default is "default" which instructs Git to use the default algorithm
- that never skips commits (unless the server has acknowledged it or one
- of its descendants). If `feature.experimental` is enabled, then this
- setting defaults to "skipping".
- Unknown values will cause 'git fetch' to error out.
+ Control how information about the commits in the local repository
+ is sent when negotiating the contents of the packfile to be sent by
+ the server. Set to "consecutive" to use an algorithm that walks
+ over consecutive commits checking each one. Set to "skipping" to
+ use an algorithm that skips commits in an effort to converge
+ faster, but may result in a larger-than-necessary packfile; or set
+ to "noop" to not send any information at all, which will almost
+ certainly result in a larger-than-necessary packfile, but will skip
+ the negotiation step. Set to "default" to override settings made
+ previously and use the default behaviour. The default is normally
+ "consecutive", but if `feature.experimental` is true, then the
+ default is "skipping". Unknown values will cause 'git fetch' to
+ error out.
+
See also the `--negotiate-only` and `--negotiation-tip` options to
linkgit:git-fetch[1].
diff --git a/Documentation/config/gpg.txt b/Documentation/config/gpg.txt
index 0cb189a..86892ad 100644
--- a/Documentation/config/gpg.txt
+++ b/Documentation/config/gpg.txt
@@ -37,7 +37,7 @@ gpg.minTrustLevel::
gpg.ssh.defaultKeyCommand::
This command that will be run when user.signingkey is not set and a ssh
signature is requested. On successful exit a valid ssh public key is
- expected in the first line of its output. To automatically use the first
+ expected in the first line of its output. To automatically use the first
available key from your ssh-agent set this to "ssh-add -L".
gpg.ssh.allowedSignersFile::
@@ -66,7 +66,7 @@ This way only committers with an already valid key can add or change keys in the
+
Since OpensSSH 8.8 this file allows specifying a key lifetime using valid-after &
valid-before options. Git will mark signatures as valid if the signing key was
-valid at the time of the signatures creation. This allows users to change a
+valid at the time of the signature's creation. This allows users to change a
signing key without invalidating all previously made signatures.
+
Using a SSH CA key with the cert-authority option
diff --git a/Documentation/config/remote.txt b/Documentation/config/remote.txt
index a8e6437..0678b4b 100644
--- a/Documentation/config/remote.txt
+++ b/Documentation/config/remote.txt
@@ -82,5 +82,7 @@ remote.<name>.promisor::
objects.
remote.<name>.partialclonefilter::
- The filter that will be applied when fetching from this
- promisor remote.
+ The filter that will be applied when fetching from this promisor remote.
+ Changing or clearing this value will only affect fetches for new commits.
+ To fetch associated objects for commits already present in the local object
+ database, use the `--refetch` option of linkgit:git-fetch[1].
diff --git a/Documentation/config/repack.txt b/Documentation/config/repack.txt
index 9c413e1..41ac695 100644
--- a/Documentation/config/repack.txt
+++ b/Documentation/config/repack.txt
@@ -25,3 +25,8 @@ repack.writeBitmaps::
space and extra time spent on the initial repack. This has
no effect if multiple packfiles are created.
Defaults to true on bare repos, false otherwise.
+
+repack.updateServerInfo::
+ If set to false, linkgit:git-repack[1] will not run
+ linkgit:git-update-server-info[1]. Defaults to true. Can be overridden
+ when true by the `-n` option of linkgit:git-repack[1].
diff --git a/Documentation/config/reset.txt b/Documentation/config/reset.txt
deleted file mode 100644
index 63b7c45..0000000
--- a/Documentation/config/reset.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-reset.quiet::
- When set to true, 'git reset' will default to the '--quiet' option.
diff --git a/Documentation/config/sparse.txt b/Documentation/config/sparse.txt
new file mode 100644
index 0000000..aff49a8
--- /dev/null
+++ b/Documentation/config/sparse.txt
@@ -0,0 +1,27 @@
+sparse.expectFilesOutsideOfPatterns::
+ Typically with sparse checkouts, files not matching any
+ sparsity patterns are marked with a SKIP_WORKTREE bit in the
+ index and are missing from the working tree. Accordingly, Git
+ will ordinarily check whether files with the SKIP_WORKTREE bit
+ are in fact present in the working tree contrary to
+ expectations. If Git finds any, it marks those paths as
+ present by clearing the relevant SKIP_WORKTREE bits. This
+ option can be used to tell Git that such
+ present-despite-skipped files are expected and to stop
+ checking for them.
++
+The default is `false`, which allows Git to automatically recover
+from the list of files in the index and working tree falling out of
+sync.
++
+Set this to `true` if you are in a setup where some external factor
+relieves Git of the responsibility for maintaining the consistency
+between the presence of working tree files and sparsity patterns. For
+example, if you have a Git-aware virtual file system that has a robust
+mechanism for keeping the working tree and the sparsity patterns up to
+date based on access patterns.
++
+Regardless of this setting, Git does not check for
+present-despite-skipped files unless sparse checkout is enabled, so
+this config option has no effect unless `core.sparseCheckout` is
+`true`.
diff --git a/Documentation/config/stash.txt b/Documentation/config/stash.txt
index 9ed7752..b9f609e 100644
--- a/Documentation/config/stash.txt
+++ b/Documentation/config/stash.txt
@@ -1,10 +1,3 @@
-stash.useBuiltin::
- Unused configuration variable. Used in Git versions 2.22 to
- 2.26 as an escape hatch to enable the legacy shellscript
- implementation of stash. Now the built-in rewrite of it in C
- is always used. Setting this will emit a warning, to alert any
- remaining users that setting this now does nothing.
-
stash.showIncludeUntracked::
If this is set to true, the `git stash show` command will show
the untracked files of a stash entry. Defaults to false. See
diff --git a/Documentation/config/submodule.txt b/Documentation/config/submodule.txt
index ee454f8..6490527 100644
--- a/Documentation/config/submodule.txt
+++ b/Documentation/config/submodule.txt
@@ -59,18 +59,33 @@ submodule.active::
submodule.recurse::
A boolean indicating if commands should enable the `--recurse-submodules`
- option by default.
- Applies to all commands that support this option
- (`checkout`, `fetch`, `grep`, `pull`, `push`, `read-tree`, `reset`,
- `restore` and `switch`) except `clone` and `ls-files`.
+ option by default. Defaults to false.
++
+When set to true, it can be deactivated via the
+`--no-recurse-submodules` option. Note that some Git commands
+lacking this option may call some of the above commands affected by
+`submodule.recurse`; for instance `git remote update` will call
+`git fetch` but does not have a `--no-recurse-submodules` option.
+For these commands a workaround is to temporarily change the
+configuration value by using `git -c submodule.recurse=0`.
++
+The following list shows the commands that accept
+`--recurse-submodules` and whether they are supported by this
+setting.
+
+* `checkout`, `fetch`, `grep`, `pull`, `push`, `read-tree`,
+`reset`, `restore` and `switch` are always supported.
+* `clone` and `ls-files` are not supported.
+* `branch` is supported only if `submodule.propagateBranches` is
+enabled
+
+submodule.propagateBranches::
+ [EXPERIMENTAL] A boolean that enables branching support when
+ using `--recurse-submodules` or `submodule.recurse=true`.
+ Enabling this will allow certain commands to accept
+ `--recurse-submodules` and certain commands that already accept
+ `--recurse-submodules` will now consider branches.
Defaults to false.
- When set to true, it can be deactivated via the
- `--no-recurse-submodules` option. Note that some Git commands
- lacking this option may call some of the above commands affected by
- `submodule.recurse`; for instance `git remote update` will call
- `git fetch` but does not have a `--no-recurse-submodules` option.
- For these commands a workaround is to temporarily change the
- configuration value by using `git -c submodule.recurse=0`.
submodule.fetchJobs::
Specifies how many submodules are fetched/cloned at the same time.
diff --git a/Documentation/diff-options.txt b/Documentation/diff-options.txt
index c89d530..3674ac4 100644
--- a/Documentation/diff-options.txt
+++ b/Documentation/diff-options.txt
@@ -34,7 +34,7 @@ endif::git-diff[]
endif::git-format-patch[]
ifdef::git-log[]
---diff-merges=(off|none|on|first-parent|1|separate|m|combined|c|dense-combined|cc)::
+--diff-merges=(off|none|on|first-parent|1|separate|m|combined|c|dense-combined|cc|remerge|r)::
--no-diff-merges::
Specify diff format to be used for merge commits. Default is
{diff-merges-default} unless `--first-parent` is in use, in which case
@@ -64,6 +64,18 @@ ifdef::git-log[]
each of the parents. Separate log entry and diff is generated
for each parent.
+
+--diff-merges=remerge:::
+--diff-merges=r:::
+--remerge-diff:::
+ With this option, two-parent merge commits are remerged to
+ create a temporary tree object -- potentially containing files
+ with conflict markers and such. A diff is then shown between
+ that temporary tree and the actual merge commit.
++
+The output emitted when this option is used is subject to change, and
+so is its interaction with other options (unless explicitly
+documented).
++
--diff-merges=combined:::
--diff-merges=c:::
-c:::
@@ -616,11 +628,8 @@ ifndef::git-format-patch[]
Also, these upper-case letters can be downcased to exclude. E.g.
`--diff-filter=ad` excludes added and deleted paths.
+
-Note that not all diffs can feature all types. For instance, diffs
-from the index to the working tree can never have Added entries
-(because the set of paths included in the diff is limited by what is in
-the index). Similarly, copied and renamed entries cannot appear if
-detection for those types is disabled.
+Note that not all diffs can feature all types. For instance, copied and
+renamed entries cannot appear if detection for those types is disabled.
-S<string>::
Look for differences that change the number of occurrences of
diff --git a/Documentation/fetch-options.txt b/Documentation/fetch-options.txt
index e967ff1..622bd84 100644
--- a/Documentation/fetch-options.txt
+++ b/Documentation/fetch-options.txt
@@ -71,6 +71,7 @@ configuration variables documented in linkgit:git-config[1], and the
ancestors of the provided `--negotiation-tip=*` arguments,
which we have in common with the server.
+
+This is incompatible with `--recurse-submodules=[yes|on-demand]`.
Internally this is used to implement the `push.negotiate` option, see
linkgit:git-config[1].
@@ -162,6 +163,16 @@ endif::git-pull[]
behavior for a remote may be specified with the remote.<name>.tagOpt
setting. See linkgit:git-config[1].
+ifndef::git-pull[]
+--refetch::
+ Instead of negotiating with the server to avoid transferring commits and
+ associated objects that are already present locally, this option fetches
+ all objects as a fresh clone would. Use this to reapply a partial clone
+ filter from configuration or using `--filter=` when the filter
+ definition has changed. Automatic post-fetch maintenance will perform
+ object database pack consolidation to remove any duplicate objects.
+endif::git-pull[]
+
--refmap=<refspec>::
When fetching refs listed on the command line, use the
specified refspec (can be given more than once) to map the
@@ -185,15 +196,23 @@ endif::git-pull[]
ifndef::git-pull[]
--recurse-submodules[=yes|on-demand|no]::
This option controls if and under what conditions new commits of
- populated submodules should be fetched too. It can be used as a
- boolean option to completely disable recursion when set to 'no' or to
- unconditionally recurse into all populated submodules when set to
- 'yes', which is the default when this option is used without any
- value. Use 'on-demand' to only recurse into a populated submodule
- when the superproject retrieves a commit that updates the submodule's
- reference to a commit that isn't already in the local submodule
- clone. By default, 'on-demand' is used, unless
- `fetch.recurseSubmodules` is set (see linkgit:git-config[1]).
+ submodules should be fetched too. When recursing through submodules,
+ `git fetch` always attempts to fetch "changed" submodules, that is, a
+ submodule that has commits that are referenced by a newly fetched
+ superproject commit but are missing in the local submodule clone. A
+ changed submodule can be fetched as long as it is present locally e.g.
+ in `$GIT_DIR/modules/` (see linkgit:gitsubmodules[7]); if the upstream
+ adds a new submodule, that submodule cannot be fetched until it is
+ cloned e.g. by `git submodule update`.
++
+When set to 'on-demand', only changed submodules are fetched. When set
+to 'yes', all populated submodules are fetched and submodules that are
+both unpopulated and changed are fetched. When set to 'no', submodules
+are never fetched.
++
+When unspecified, this uses the value of `fetch.recurseSubmodules` if it
+is set (see linkgit:git-config[1]), defaulting to 'on-demand' if unset.
+When this option is used without any value, it defaults to 'yes'.
endif::git-pull[]
-j::
diff --git a/Documentation/git-branch.txt b/Documentation/git-branch.txt
index 731e340..c8b4f9c 100644
--- a/Documentation/git-branch.txt
+++ b/Documentation/git-branch.txt
@@ -16,7 +16,8 @@ SYNOPSIS
[--points-at <object>] [--format=<format>]
[(-r | --remotes) | (-a | --all)]
[--list] [<pattern>...]
-'git branch' [--track[=(direct|inherit)] | --no-track] [-f] <branchname> [<start-point>]
+'git branch' [--track[=(direct|inherit)] | --no-track] [-f]
+ [--recurse-submodules] <branchname> [<start-point>]
'git branch' (--set-upstream-to=<upstream> | -u <upstream>) [<branchname>]
'git branch' --unset-upstream [<branchname>]
'git branch' (-m | -M) [<oldbranch>] <newbranch>
@@ -235,6 +236,22 @@ how the `branch.<name>.remote` and `branch.<name>.merge` options are used.
Do not set up "upstream" configuration, even if the
branch.autoSetupMerge configuration variable is set.
+--recurse-submodules::
+ THIS OPTION IS EXPERIMENTAL! Causes the current command to
+ recurse into submodules if `submodule.propagateBranches` is
+ enabled. See `submodule.propagateBranches` in
+ linkgit:git-config[1]. Currently, only branch creation is
+ supported.
++
+When used in branch creation, a new branch <branchname> will be created
+in the superproject and all of the submodules in the superproject's
+<start-point>. In submodules, the branch will point to the submodule
+commit in the superproject's <start-point> but the branch's tracking
+information will be set up based on the submodule's branches and remotes
+e.g. `git branch --recurse-submodules topic origin/main` will create the
+submodule branch "topic" that points to the submodule commit in the
+superproject's "origin/main", but tracks the submodule's "origin/main".
+
--set-upstream::
As this option had confusing syntax, it is no longer supported.
Please use `--track` or `--set-upstream-to` instead.
diff --git a/Documentation/git-bundle.txt b/Documentation/git-bundle.txt
index 72ab813..7685b57 100644
--- a/Documentation/git-bundle.txt
+++ b/Documentation/git-bundle.txt
@@ -75,8 +75,11 @@ verify <file>::
cleanly to the current repository. This includes checks on the
bundle format itself as well as checking that the prerequisite
commits exist and are fully linked in the current repository.
- 'git bundle' prints a list of missing commits, if any, and exits
- with a non-zero status.
+ Then, 'git bundle' prints a list of missing commits, if any.
+ Finally, information about additional capabilities, such as "object
+ filter", is printed. See "Capabilities" in link:technical/bundle-format.html
+ for more information. The exit code is zero for success, but will
+ be nonzero if the bundle file is invalid.
list-heads <file>::
Lists the references defined in the bundle. If followed by a
diff --git a/Documentation/git-cat-file.txt b/Documentation/git-cat-file.txt
index 27b27e2..24a811f 100644
--- a/Documentation/git-cat-file.txt
+++ b/Documentation/git-cat-file.txt
@@ -9,8 +9,14 @@ git-cat-file - Provide content or type and size information for repository objec
SYNOPSIS
--------
[verse]
-'git cat-file' (-t [--allow-unknown-type]| -s [--allow-unknown-type]| -e | -p | <type> | --textconv | --filters ) [--path=<path>] <object>
-'git cat-file' (--batch[=<format>] | --batch-check[=<format>]) [ --textconv | --filters ] [--follow-symlinks]
+'git cat-file' <type> <object>
+'git cat-file' (-e | -p) <object>
+'git cat-file' (-t | -s) [--allow-unknown-type] <object>
+'git cat-file' (--batch | --batch-check | --batch-command) [--batch-all-objects]
+ [--buffer] [--follow-symlinks] [--unordered]
+ [--textconv | --filters]
+'git cat-file' (--textconv | --filters)
+ [<rev>:<path|tree-ish> | --path=<path|tree-ish> <rev>]
DESCRIPTION
-----------
@@ -90,6 +96,33 @@ OPTIONS
need to specify the path, separated by whitespace. See the
section `BATCH OUTPUT` below for details.
+--batch-command::
+--batch-command=<format>::
+ Enter a command mode that reads commands and arguments from stdin. May
+ only be combined with `--buffer`, `--textconv` or `--filters`. In the
+ case of `--textconv` or `--filters`, the input lines also need to specify
+ the path, separated by whitespace. See the section `BATCH OUTPUT` below
+ for details.
++
+`--batch-command` recognizes the following commands:
++
+--
+contents <object>::
+ Print object contents for object reference `<object>`. This corresponds to
+ the output of `--batch`.
+
+info <object>::
+ Print object info for object reference `<object>`. This corresponds to the
+ output of `--batch-check`.
+
+flush::
+ Used with `--buffer` to execute all preceding commands that were issued
+ since the beginning or since the last flush was issued. When `--buffer`
+ is used, no output will come until a `flush` is issued. When `--buffer`
+ is not used, commands are flushed each time without issuing `flush`.
+--
++
+
--batch-all-objects::
Instead of reading a list of objects on stdin, perform the
requested batch operation on all objects in the repository and
@@ -104,7 +137,7 @@ OPTIONS
that a process can interactively read and write from
`cat-file`. With this option, the output uses normal stdio
buffering; this is much more efficient when invoking
- `--batch-check` on a large number of objects.
+ `--batch-check` or `--batch-command` on a large number of objects.
--unordered::
When `--batch-all-objects` is in use, visit objects in an
@@ -196,6 +229,13 @@ from stdin, one per line, and print information about them. By default,
the whole line is considered as an object, as if it were fed to
linkgit:git-rev-parse[1].
+When `--batch-command` is given, `cat-file` will read commands from stdin,
+one per line, and print information based on the command given. With
+`--batch-command`, the `info` command followed by an object will print
+information about the object the same way `--batch-check` would, and the
+`contents` command followed by an object prints contents in the same way
+`--batch` would.
+
You can specify the information shown for each object by using a custom
`<format>`. The `<format>` is copied literally to stdout for each
object, with placeholders of the form `%(atom)` expanded, followed by a
@@ -231,9 +271,9 @@ newline. The available atoms are:
If no format is specified, the default format is `%(objectname)
%(objecttype) %(objectsize)`.
-If `--batch` is specified, the object information is followed by the
-object contents (consisting of `%(objectsize)` bytes), followed by a
-newline.
+If `--batch` is specified, or if `--batch-command` is used with the `contents`
+command, the object information is followed by the object contents (consisting
+of `%(objectsize)` bytes), followed by a newline.
For example, `--batch` without a custom format would produce:
diff --git a/Documentation/git-check-ignore.txt b/Documentation/git-check-ignore.txt
index 0c3924a..2892799 100644
--- a/Documentation/git-check-ignore.txt
+++ b/Documentation/git-check-ignore.txt
@@ -33,7 +33,7 @@ OPTIONS
Instead of printing the paths that are excluded, for each path
that matches an exclude pattern, print the exclude pattern
together with the path. (Matching an exclude pattern usually
- means the path is excluded, but if the pattern begins with '!'
+ means the path is excluded, but if the pattern begins with "`!`"
then it is a negated pattern and matching it means the path is
NOT excluded.)
+
@@ -77,7 +77,7 @@ If `--verbose` is specified, the output is a series of lines of the form:
<pathname> is the path of a file being queried, <pattern> is the
matching pattern, <source> is the pattern's source file, and <linenum>
is the line number of the pattern within that source. If the pattern
-contained a `!` prefix or `/` suffix, it will be preserved in the
+contained a "`!`" prefix or "`/`" suffix, it will be preserved in the
output. <source> will be an absolute path when referring to the file
configured by `core.excludesFile`, or relative to the repository root
when referring to `.git/info/exclude` or a per-directory exclude file.
diff --git a/Documentation/git-checkout-index.txt b/Documentation/git-checkout-index.txt
index 4d33e7b..01dbd5c 100644
--- a/Documentation/git-checkout-index.txt
+++ b/Documentation/git-checkout-index.txt
@@ -12,6 +12,7 @@ SYNOPSIS
'git checkout-index' [-u] [-q] [-a] [-f] [-n] [--prefix=<string>]
[--stage=<number>|all]
[--temp]
+ [--ignore-skip-worktree-bits]
[-z] [--stdin]
[--] [<file>...]
@@ -37,8 +38,9 @@ OPTIONS
-a::
--all::
- checks out all files in the index. Cannot be used
- together with explicit filenames.
+ checks out all files in the index except for those with the
+ skip-worktree bit set (see `--ignore-skip-worktree-bits`).
+ Cannot be used together with explicit filenames.
-n::
--no-create::
@@ -59,6 +61,10 @@ OPTIONS
write the content to temporary files. The temporary name
associations will be written to stdout.
+--ignore-skip-worktree-bits::
+ Check out all files, including those with the skip-worktree bit
+ set.
+
--stdin::
Instead of taking list of paths from the command line,
read list of paths from the standard input. Paths are
diff --git a/Documentation/git-clone.txt b/Documentation/git-clone.txt
index 984d194..632bd13 100644
--- a/Documentation/git-clone.txt
+++ b/Documentation/git-clone.txt
@@ -16,7 +16,7 @@ SYNOPSIS
[--depth <depth>] [--[no-]single-branch] [--no-tags]
[--recurse-submodules[=<pathspec>]] [--[no-]shallow-submodules]
[--[no-]remote-submodules] [--jobs <n>] [--sparse] [--[no-]reject-shallow]
- [--filter=<filter>] [--] <repository>
+ [--filter=<filter> [--also-filter-submodules]] [--] <repository>
[<directory>]
DESCRIPTION
@@ -182,6 +182,11 @@ objects from the source repository into a pack in the cloned repository.
at least `<size>`. For more details on filter specifications, see
the `--filter` option in linkgit:git-rev-list[1].
+--also-filter-submodules::
+ Also apply the partial clone filter to any submodules in the repository.
+ Requires `--filter` and `--recurse-submodules`. This can be turned on by
+ default by setting the `clone.filterSubmodules` config option.
+
--mirror::
Set up a mirror of the source repository. This implies `--bare`.
Compared to `--bare`, `--mirror` not only maps local branches of the
diff --git a/Documentation/git-config.txt b/Documentation/git-config.txt
index 2285eff..bdcfd94 100644
--- a/Documentation/git-config.txt
+++ b/Documentation/git-config.txt
@@ -141,9 +141,13 @@ from all available files.
See also <<FILES>>.
--worktree::
- Similar to `--local` except that `.git/config.worktree` is
+ Similar to `--local` except that `$GIT_DIR/config.worktree` is
read from or written to if `extensions.worktreeConfig` is
- present. If not it's the same as `--local`.
+ enabled. If not it's the same as `--local`. Note that `$GIT_DIR`
+ is equal to `$GIT_COMMON_DIR` for the main working tree, but is of
+ the form `$GIT_DIR/worktrees/<id>/` for other working trees. See
+ linkgit:git-worktree[1] to learn how to enable
+ `extensions.worktreeConfig`.
-f <config-file>::
--file <config-file>::
diff --git a/Documentation/git-credential-cache--daemon.txt b/Documentation/git-credential-cache--daemon.txt
index 7051c6b..01e1c21 100644
--- a/Documentation/git-credential-cache--daemon.txt
+++ b/Documentation/git-credential-cache--daemon.txt
@@ -1,5 +1,5 @@
-git-credential-cache--daemon(1)
-===============================
+git-credential-cache{litdd}daemon(1)
+====================================
NAME
----
@@ -8,7 +8,7 @@ git-credential-cache--daemon - Temporarily store user credentials in memory
SYNOPSIS
--------
[verse]
-git credential-cache--daemon [--debug] <socket>
+'git credential-cache{litdd}daemon' [--debug] <socket>
DESCRIPTION
-----------
diff --git a/Documentation/git-fetch-pack.txt b/Documentation/git-fetch-pack.txt
index c975884..46747d5 100644
--- a/Documentation/git-fetch-pack.txt
+++ b/Documentation/git-fetch-pack.txt
@@ -101,6 +101,10 @@ be in a separate packet, and the list must end with a flush packet.
current shallow boundary instead of from the tip of each
remote branch history.
+--refetch::
+ Skips negotiating commits with the server in order to fetch all matching
+ objects. Use to reapply a new partial clone blob/tree filter.
+
--no-progress::
Do not show the progress.
diff --git a/Documentation/git-fetch.txt b/Documentation/git-fetch.txt
index 550c16c..e9d3646 100644
--- a/Documentation/git-fetch.txt
+++ b/Documentation/git-fetch.txt
@@ -287,12 +287,10 @@ include::transfer-data-leaks.txt[]
BUGS
----
-Using --recurse-submodules can only fetch new commits in already checked
-out submodules right now. When e.g. upstream added a new submodule in the
-just fetched commits of the superproject the submodule itself cannot be
-fetched, making it impossible to check out that submodule later without
-having to do a fetch again. This is expected to be fixed in a future Git
-version.
+Using --recurse-submodules can only fetch new commits in submodules that are
+present locally e.g. in `$GIT_DIR/modules/`. If the upstream adds a new
+submodule, that submodule cannot be fetched until it is cloned e.g. by `git
+submodule update`. This is expected to be fixed in a future Git version.
SEE ALSO
--------
diff --git a/Documentation/git-fsmonitor--daemon.txt b/Documentation/git-fsmonitor--daemon.txt
new file mode 100644
index 0000000..cc142fb
--- /dev/null
+++ b/Documentation/git-fsmonitor--daemon.txt
@@ -0,0 +1,75 @@
+git-fsmonitor{litdd}daemon(1)
+=============================
+
+NAME
+----
+git-fsmonitor--daemon - A Built-in File System Monitor
+
+SYNOPSIS
+--------
+[verse]
+'git fsmonitor{litdd}daemon' start
+'git fsmonitor{litdd}daemon' run
+'git fsmonitor{litdd}daemon' stop
+'git fsmonitor{litdd}daemon' status
+
+DESCRIPTION
+-----------
+
+A daemon to watch the working directory for file and directory
+changes using platform-specific file system notification facilities.
+
+This daemon communicates directly with commands like `git status`
+using the link:technical/api-simple-ipc.html[simple IPC] interface
+instead of the slower linkgit:githooks[5] interface.
+
+This daemon is built into Git so that no third-party tools are
+required.
+
+OPTIONS
+-------
+
+start::
+ Starts a daemon in the background.
+
+run::
+ Runs a daemon in the foreground.
+
+stop::
+ Stops the daemon running in the current working
+ directory, if present.
+
+status::
+ Exits with zero status if a daemon is watching the
+ current working directory.
+
+REMARKS
+-------
+
+This daemon is a long running process used to watch a single working
+directory and maintain a list of the recently changed files and
+directories. Performance of commands such as `git status` can be
+increased if they just ask for a summary of changes to the working
+directory and can avoid scanning the disk.
+
+When `core.fsmonitor` is set to `true` (see linkgit:git-config[1])
+commands, such as `git status`, will ask the daemon for changes and
+automatically start it (if necessary).
+
+For more information see the "File System Monitor" section in
+linkgit:git-update-index[1].
+
+CAVEATS
+-------
+
+The fsmonitor daemon does not currently know about submodules and does
+not know to filter out file system events that happen within a
+submodule. If fsmonitor daemon is watching a super repo and a file is
+modified within the working directory of a submodule, it will report
+the change (as happening against the super repo). However, the client
+will properly ignore these extra events, so performance may be affected
+but it will not cause an incorrect result.
+
+GIT
+---
+Part of the linkgit:git[1] suite
diff --git a/Documentation/git-help.txt b/Documentation/git-help.txt
index 44ea63c..239c68d 100644
--- a/Documentation/git-help.txt
+++ b/Documentation/git-help.txt
@@ -8,8 +8,8 @@ git-help - Display help information about Git
SYNOPSIS
--------
[verse]
-'git help' [-a|--all [--[no-]verbose]]
- [[-i|--info] [-m|--man] [-w|--web]] [<command>|<guide>]
+'git help' [-a|--all] [--[no-]verbose] [--[no-]external-commands] [--[no-]aliases]
+'git help' [[-i|--info] [-m|--man] [-w|--web]] [<command>|<guide>]
'git help' [-g|--guides]
'git help' [-c|--config]
@@ -46,8 +46,15 @@ OPTIONS
-------
-a::
--all::
- Prints all the available commands on the standard output. This
- option overrides any given command or guide name.
+ Prints all the available commands on the standard output.
+
+--no-external-commands::
+ When used with `--all`, exclude the listing of external "git-*"
+ commands found in the `$PATH`.
+
+--no-aliases::
+ When used with `--all`, exclude the listing of configured
+ aliases.
--verbose::
When used with `--all` print description for all recognized
diff --git a/Documentation/git-hook.txt b/Documentation/git-hook.txt
new file mode 100644
index 0000000..77c3a8a
--- /dev/null
+++ b/Documentation/git-hook.txt
@@ -0,0 +1,45 @@
+git-hook(1)
+===========
+
+NAME
+----
+git-hook - Run git hooks
+
+SYNOPSIS
+--------
+[verse]
+'git hook' run [--ignore-missing] <hook-name> [-- <hook-args>]
+
+DESCRIPTION
+-----------
+
+A command interface to running git hooks (see linkgit:githooks[5]),
+for use by other scripted git commands.
+
+SUBCOMMANDS
+-----------
+
+run::
+ Run the `<hook-name>` hook. See linkgit:githooks[5] for
+ supported hook names.
++
+
+Any positional arguments to the hook should be passed after a
+mandatory `--` (or `--end-of-options`, see linkgit:gitcli[7]). See
+linkgit:githooks[5] for arguments hooks might expect (if any).
+
+OPTIONS
+-------
+
+--ignore-missing::
+ Ignore any missing hook by quietly returning zero. Used for
+ tools that want to do a blind one-shot run of a hook that may
+ or may not be present.
+
+SEE ALSO
+--------
+linkgit:githooks[5]
+
+GIT
+---
+Part of the linkgit:git[1] suite
diff --git a/Documentation/git-index-pack.txt b/Documentation/git-index-pack.txt
index 1f1e359..4e71c25 100644
--- a/Documentation/git-index-pack.txt
+++ b/Documentation/git-index-pack.txt
@@ -122,6 +122,14 @@ This option cannot be used with --stdin.
+
include::object-format-disclaimer.txt[]
+--promisor[=<message>]::
+ Before committing the pack-index, create a .promisor file for this
+ pack. Particularly helpful when writing a promisor pack with --fix-thin
+ since the name of the pack is not final until the pack has been fully
+ written. If a `<message>` is provided, then that content will be
+ written to the .promisor file for future reference. See
+ link:technical/partial-clone.html[partial clone] for more information.
+
NOTES
-----
diff --git a/Documentation/git-ls-files.txt b/Documentation/git-ls-files.txt
index 48cc7c0..0dabf3f 100644
--- a/Documentation/git-ls-files.txt
+++ b/Documentation/git-ls-files.txt
@@ -156,7 +156,7 @@ a space) at the start of each line:
--recurse-submodules::
Recursively calls ls-files on each active submodule in the repository.
- Currently there is only support for the --cached mode.
+ Currently there is only support for the --cached and --stage modes.
--abbrev[=<n>]::
Instead of showing the full 40-byte hexadecimal object
diff --git a/Documentation/git-ls-tree.txt b/Documentation/git-ls-tree.txt
index db02d6d..0240adb 100644
--- a/Documentation/git-ls-tree.txt
+++ b/Documentation/git-ls-tree.txt
@@ -10,7 +10,7 @@ SYNOPSIS
--------
[verse]
'git ls-tree' [-d] [-r] [-t] [-l] [-z]
- [--name-only] [--name-status] [--full-name] [--full-tree] [--abbrev[=<n>]]
+ [--name-only] [--name-status] [--object-only] [--full-name] [--full-tree] [--abbrev[=<n>]] [--format=<format>]
<tree-ish> [<path>...]
DESCRIPTION
@@ -59,6 +59,15 @@ OPTIONS
--name-only::
--name-status::
List only filenames (instead of the "long" output), one per line.
+ Cannot be combined with `--object-only`.
+
+--object-only::
+ List only names of the objects, one per line. Cannot be combined
+ with `--name-only` or `--name-status`.
+ This is equivalent to specifying `--format='%(objectname)'`, but
+ for both this option and that exact format the command takes a
+ hand-optimized codepath instead of going through the generic
+ formatting mechanism.
--abbrev[=<n>]::
Instead of showing the full 40-byte hexadecimal object
@@ -74,6 +83,16 @@ OPTIONS
Do not limit the listing to the current working directory.
Implies --full-name.
+--format=<format>::
+ A string that interpolates `%(fieldname)` from the result
+ being shown. It also interpolates `%%` to `%`, and
+ `%xx` where `xx` are hex digits interpolates to character
+ with hex code `xx`; for example `%00` interpolates to
+ `\0` (NUL), `%09` to `\t` (TAB) and `%0a` to `\n` (LF).
+ When specified, `--format` cannot be combined with other
+ format-altering options, including `--long`, `--name-only`
+ and `--object-only`.
+
[<path>...]::
When paths are given, show them (note that this isn't really raw
pathnames, but rather a list of patterns to match). Otherwise
@@ -82,16 +101,29 @@ OPTIONS
Output Format
-------------
- <mode> SP <type> SP <object> TAB <file>
+
+The output format of `ls-tree` is determined by either the `--format`
+option, or other format-altering options such as `--name-only` etc.
+(see `--format` above).
+
+The use of certain `--format` directives is equivalent to using those
+options, but invoking the full formatting machinery can be slower than
+using an appropriate formatting option.
+
+In cases where the `--format` would exactly map to an existing option
+`ls-tree` will use the appropriate faster path. Thus the default format
+is equivalent to:
+
+ %(objectmode) %(objecttype) %(objectname)%x09%(path)
This output format is compatible with what `--index-info --stdin` of
'git update-index' expects.
When the `-l` option is used, format changes to
- <mode> SP <type> SP <object> SP <object size> TAB <file>
+ %(objectmode) %(objecttype) %(objectname) %(objectsize:padded)%x09%(path)
-Object size identified by <object> is given in bytes, and right-justified
+Object size identified by <objectname> is given in bytes, and right-justified
with minimum width of 7 characters. Object size is given only for blobs
(file) entries; for other entries `-` character is used in place of size.
@@ -100,6 +132,34 @@ quoted as explained for the configuration variable `core.quotePath`
(see linkgit:git-config[1]). Using `-z` the filename is output
verbatim and the line is terminated by a NUL byte.
+Customized format:
+
+It is possible to print in a custom format by using the `--format` option,
+which is able to interpolate different fields using a `%(fieldname)` notation.
+For example, if you only care about the "objectname" and "path" fields, you
+can execute with a specific "--format" like
+
+ git ls-tree --format='%(objectname) %(path)' <tree-ish>
+
+FIELD NAMES
+-----------
+
+Various values from structured fields can be used to interpolate
+into the resulting output. For each outputing line, the following
+names can be used:
+
+objectmode::
+ The mode of the object.
+objecttype::
+ The type of the object (`commit`, `blob` or `tree`).
+objectname::
+ The name of the object.
+objectsize[:padded]::
+ The size of a `blob` object ("-" if it's a `commit` or `tree`).
+ It also supports a padded format of size with "%(objectsize:padded)".
+path::
+ The pathname of the object.
+
GIT
---
Part of the linkgit:git[1] suite
diff --git a/Documentation/git-maintenance.txt b/Documentation/git-maintenance.txt
index e2cfb68..e56bad2 100644
--- a/Documentation/git-maintenance.txt
+++ b/Documentation/git-maintenance.txt
@@ -10,6 +10,8 @@ SYNOPSIS
--------
[verse]
'git maintenance' run [<options>]
+'git maintenance' start [--scheduler=<scheduler>]
+'git maintenance' (stop|register|unregister)
DESCRIPTION
@@ -29,6 +31,24 @@ Git repository.
SUBCOMMANDS
-----------
+run::
+ Run one or more maintenance tasks. If one or more `--task` options
+ are specified, then those tasks are run in that order. Otherwise,
+ the tasks are determined by which `maintenance.<task>.enabled`
+ config options are true. By default, only `maintenance.gc.enabled`
+ is true.
+
+start::
+ Start running maintenance on the current repository. This performs
+ the same config updates as the `register` subcommand, then updates
+ the background scheduler to run `git maintenance run --scheduled`
+ on an hourly basis.
+
+stop::
+ Halt the background maintenance schedule. The current repository
+ is not removed from the list of maintained repositories, in case
+ the background maintenance is restarted later.
+
register::
Initialize Git config values so any scheduled maintenance will
start running on this repository. This adds the repository to the
@@ -55,24 +75,6 @@ task:
setting `maintenance.auto = false` in the current repository. This config
setting will remain after a `git maintenance unregister` command.
-run::
- Run one or more maintenance tasks. If one or more `--task` options
- are specified, then those tasks are run in that order. Otherwise,
- the tasks are determined by which `maintenance.<task>.enabled`
- config options are true. By default, only `maintenance.gc.enabled`
- is true.
-
-start::
- Start running maintenance on the current repository. This performs
- the same config updates as the `register` subcommand, then updates
- the background scheduler to run `git maintenance run --scheduled`
- on an hourly basis.
-
-stop::
- Halt the background maintenance schedule. The current repository
- is not removed from the list of maintained repositories, in case
- the background maintenance is restarted later.
-
unregister::
Remove the current repository from background maintenance. This
only removes the repository from the configured list. It does not
diff --git a/Documentation/git-mktree.txt b/Documentation/git-mktree.txt
index 27fe2b3..76b44f4 100644
--- a/Documentation/git-mktree.txt
+++ b/Documentation/git-mktree.txt
@@ -31,7 +31,7 @@ OPTIONS
--batch::
Allow building of more than one tree object before exiting. Each
- tree is separated by as single blank line. The final new-line is
+ tree is separated by a single blank line. The final new-line is
optional. Note - if the `-z` option is used, lines are terminated
with NUL.
diff --git a/Documentation/git-name-rev.txt b/Documentation/git-name-rev.txt
index 5cb0eb0..ec8a27c 100644
--- a/Documentation/git-name-rev.txt
+++ b/Documentation/git-name-rev.txt
@@ -42,11 +42,37 @@ OPTIONS
--all::
List all commits reachable from all refs
---stdin::
+--annotate-stdin::
Transform stdin by substituting all the 40-character SHA-1
hexes (say $hex) with "$hex ($rev_name)". When used with
--name-only, substitute with "$rev_name", omitting $hex
- altogether. Intended for the scripter's use.
+ altogether.
++
+For example:
++
+-----------
+$ cat sample.txt
+
+An abbreviated revision 2ae0a9cb82 will not be substituted.
+The full name after substitution is 2ae0a9cb8298185a94e5998086f380a355dd8907,
+while its tree object is 70d105cc79e63b81cfdcb08a15297c23e60b07ad
+
+$ git name-rev --annotate-stdin <sample.txt
+
+An abbreviated revision 2ae0a9cb82 will not be substituted.
+The full name after substitution is 2ae0a9cb8298185a94e5998086f380a355dd8907 (master),
+while its tree object is 70d105cc79e63b81cfdcb08a15297c23e60b07ad
+
+$ git name-rev --name-only --annotate-stdin <sample.txt
+
+An abbreviated revision 2ae0a9cb82 will not be substituted.
+The full name after substitution is master,
+while its tree object is 70d105cc79e63b81cfdcb08a15297c23e60b07ad
+-----------
+
+--stdin::
+ This option is deprecated in favor of 'git name-rev --annotate-stdin'.
+ They are functionally equivalent.
--name-only::
Instead of printing both the SHA-1 and the name, print only
diff --git a/Documentation/git-read-tree.txt b/Documentation/git-read-tree.txt
index 8c3aceb..a5356a2 100644
--- a/Documentation/git-read-tree.txt
+++ b/Documentation/git-read-tree.txt
@@ -375,9 +375,14 @@ have finished your work-in-progress), attempt the merge again.
SPARSE CHECKOUT
---------------
+Note: The `update-index` and `read-tree` primitives for supporting the
+skip-worktree bit predated the introduction of
+linkgit:git-sparse-checkout[1]. Users are encouraged to use
+`sparse-checkout` in preference to these low-level primitives.
+
"Sparse checkout" allows populating the working directory sparsely.
-It uses the skip-worktree bit (see linkgit:git-update-index[1]) to tell
-Git whether a file in the working directory is worth looking at.
+It uses the skip-worktree bit (see linkgit:git-update-index[1]) to
+tell Git whether a file in the working directory is worth looking at.
'git read-tree' and other merge-based commands ('git merge', 'git
checkout'...) can help maintaining the skip-worktree bitmap and working
@@ -385,7 +390,8 @@ directory update. `$GIT_DIR/info/sparse-checkout` is used to
define the skip-worktree reference bitmap. When 'git read-tree' needs
to update the working directory, it resets the skip-worktree bit in the index
based on this file, which uses the same syntax as .gitignore files.
-If an entry matches a pattern in this file, skip-worktree will not be
+If an entry matches a pattern in this file, or the entry corresponds to
+a file present in the working tree, then skip-worktree will not be
set on that entry. Otherwise, skip-worktree will be set.
Then it compares the new skip-worktree value with the previous one. If
@@ -420,8 +426,8 @@ support.
SEE ALSO
--------
-linkgit:git-write-tree[1]; linkgit:git-ls-files[1];
-linkgit:gitignore[5]; linkgit:git-sparse-checkout[1];
+linkgit:git-write-tree[1], linkgit:git-ls-files[1],
+linkgit:gitignore[5], linkgit:git-sparse-checkout[1]
GIT
---
diff --git a/Documentation/git-remote.txt b/Documentation/git-remote.txt
index 2bebc32..cde9614 100644
--- a/Documentation/git-remote.txt
+++ b/Documentation/git-remote.txt
@@ -11,7 +11,7 @@ SYNOPSIS
[verse]
'git remote' [-v | --verbose]
'git remote add' [-t <branch>] [-m <master>] [-f] [--[no-]tags] [--mirror=(fetch|push)] <name> <URL>
-'git remote rename' <old> <new>
+'git remote rename' [--[no-]progress] <old> <new>
'git remote remove' <name>
'git remote set-head' <name> (-a | --auto | -d | --delete | <branch>)
'git remote set-branches' [--add] <name> <branch>...
diff --git a/Documentation/git-reset.txt b/Documentation/git-reset.txt
index 6f7685f..01cb4c9 100644
--- a/Documentation/git-reset.txt
+++ b/Documentation/git-reset.txt
@@ -105,10 +105,11 @@ OPTIONS
-q::
--quiet::
---no-quiet::
- Be quiet, only report errors. The default behavior is set by the
- `reset.quiet` config option. `--quiet` and `--no-quiet` will
- override the default behavior.
+ Be quiet, only report errors.
+
+--refresh::
+--no-refresh::
+ Refresh the index after a mixed reset. Enabled by default.
--pathspec-from-file=<file>::
Pathspec is passed in `<file>` instead of commandline args. If
diff --git a/Documentation/git-sparse-checkout.txt b/Documentation/git-sparse-checkout.txt
index b81dbe0..88e55f4 100644
--- a/Documentation/git-sparse-checkout.txt
+++ b/Documentation/git-sparse-checkout.txt
@@ -3,9 +3,7 @@ git-sparse-checkout(1)
NAME
----
-git-sparse-checkout - Initialize and modify the sparse-checkout
-configuration, which reduces the checkout to a set of paths
-given by a list of patterns.
+git-sparse-checkout - Reduce your working tree to a subset of tracked files
SYNOPSIS
@@ -17,8 +15,20 @@ SYNOPSIS
DESCRIPTION
-----------
-Initialize and modify the sparse-checkout configuration, which reduces
-the checkout to a set of paths given by a list of patterns.
+This command is used to create sparse checkouts, which means that it
+changes the working tree from having all tracked files present, to only
+have a subset of them. It can also switch which subset of files are
+present, or undo and go back to having all tracked files present in the
+working copy.
+
+The subset of files is chosen by providing a list of directories in
+cone mode (which is recommended), or by providing a list of patterns
+in non-cone mode.
+
+When in a sparse-checkout, other Git commands behave a bit differently.
+For example, switching branches will not update paths outside the
+sparse-checkout directories/patterns, and `git commit -a` will not record
+paths outside the sparse-checkout directories/patterns as deleted.
THIS COMMAND IS EXPERIMENTAL. ITS BEHAVIOR, AND THE BEHAVIOR OF OTHER
COMMANDS IN THE PRESENCE OF SPARSE-CHECKOUTS, WILL LIKELY CHANGE IN
@@ -28,30 +38,44 @@ THE FUTURE.
COMMANDS
--------
'list'::
- Describe the patterns in the sparse-checkout file.
+ Describe the directories or patterns in the sparse-checkout file.
'set'::
- Enable the necessary config settings
- (extensions.worktreeConfig, core.sparseCheckout,
- core.sparseCheckoutCone) if they are not already enabled, and
- write a set of patterns to the sparse-checkout file from the
+ Enable the necessary sparse-checkout config settings
+ (`core.sparseCheckout`, `core.sparseCheckoutCone`, and
+ `index.sparse`) if they are not already set to the desired values,
+ and write a set of patterns to the sparse-checkout file from the
list of arguments following the 'set' subcommand. Update the
working directory to match the new patterns.
+
-When the `--stdin` option is provided, the patterns are read from
-standard in as a newline-delimited list instead of from the arguments.
+To ensure that adjusting the sparse-checkout settings within a worktree
+does not alter the sparse-checkout settings in other worktrees, the 'set'
+subcommand will upgrade your repository config to use worktree-specific
+config if not already present. The sparsity defined by the arguments to
+the 'set' subcommand are stored in the worktree-specific sparse-checkout
+file. See linkgit:git-worktree[1] and the documentation of
+`extensions.worktreeConfig` in linkgit:git-config[1] for more details.
++
+When the `--stdin` option is provided, the directories or patterns are
+read from standard in as a newline-delimited list instead of from the
+arguments.
+
When `--cone` is passed or `core.sparseCheckoutCone` is enabled, the
-input list is considered a list of directories instead of
-sparse-checkout patterns. This allows for better performance with a
-limited set of patterns (see 'CONE PATTERN SET' below). Note that the
-set command will write patterns to the sparse-checkout file to include
-all files contained in those directories (recursively) as well as
-files that are siblings of ancestor directories. The input format
-matches the output of `git ls-tree --name-only`. This includes
-interpreting pathnames that begin with a double quote (") as C-style
-quoted strings. This may become the default in the future; --no-cone
-can be passed to request non-cone mode.
+input list is considered a list of directories. This allows for
+better performance with a limited set of patterns (see 'CONE PATTERN
+SET' below). The input format matches the output of `git ls-tree
+--name-only`. This includes interpreting pathnames that begin with a
+double quote (") as C-style quoted strings. Note that the set command
+will write patterns to the sparse-checkout file to include all files
+contained in those directories (recursively) as well as files that are
+siblings of ancestor directories. This may become the default in the
+future; --no-cone can be passed to request non-cone mode.
++
+When `--no-cone` is passed or `core.sparseCheckoutCone` is not enabled,
+the input list is considered a list of patterns. This mode is harder
+to use and less performant, and is thus not recommended. See the
+"Sparse Checkout" section of linkgit:git-read-tree[1] and the "Pattern
+Set" sections below for more details.
+
Use the `--[no-]sparse-index` option to use a sparse index (the
default is to not use it). A sparse index reduces the size of the
@@ -69,11 +93,10 @@ understand the sparse directory entries index extension and may fail to
interact with your repository until it is disabled.
'add'::
- Update the sparse-checkout file to include additional patterns.
- By default, these patterns are read from the command-line arguments,
- but they can be read from stdin using the `--stdin` option. When
- `core.sparseCheckoutCone` is enabled, the given patterns are interpreted
- as directory names as in the 'set' subcommand.
+ Update the sparse-checkout file to include additional directories
+ (in cone mode) or patterns (in non-cone mode). By default, these
+ directories or patterns are read from the command-line arguments,
+ but they can be read from stdin using the `--stdin` option.
'reapply'::
Reapply the sparsity pattern rules to paths in the working tree.
@@ -117,13 +140,14 @@ decreased in utility.
SPARSE CHECKOUT
---------------
-"Sparse checkout" allows populating the working directory sparsely.
-It uses the skip-worktree bit (see linkgit:git-update-index[1]) to tell
-Git whether a file in the working directory is worth looking at. If
-the skip-worktree bit is set, then the file is ignored in the working
-directory. Git will avoid populating the contents of those files, which
-makes a sparse checkout helpful when working in a repository with many
-files, but only a few are important to the current user.
+"Sparse checkout" allows populating the working directory sparsely. It
+uses the skip-worktree bit (see linkgit:git-update-index[1]) to tell Git
+whether a file in the working directory is worth looking at. If the
+skip-worktree bit is set, and the file is not present in the working tree,
+then its absence is ignored. Git will avoid populating the contents of
+those files, which makes a sparse checkout helpful when working in a
+repository with many files, but only a few are important to the current
+user.
The `$GIT_DIR/info/sparse-checkout` file is used to define the
skip-worktree reference bitmap. When Git updates the working
diff --git a/Documentation/git-submodule.txt b/Documentation/git-submodule.txt
index 7e5f995..4d3ab6b 100644
--- a/Documentation/git-submodule.txt
+++ b/Documentation/git-submodule.txt
@@ -133,7 +133,7 @@ If you really want to remove a submodule from the repository and commit
that use linkgit:git-rm[1] instead. See linkgit:gitsubmodules[7] for removal
options.
-update [--init] [--remote] [-N|--no-fetch] [--[no-]recommend-shallow] [-f|--force] [--checkout|--rebase|--merge] [--reference <repository>] [--depth <depth>] [--recursive] [--jobs <n>] [--[no-]single-branch] [--] [<path>...]::
+update [--init] [--remote] [-N|--no-fetch] [--[no-]recommend-shallow] [-f|--force] [--checkout|--rebase|--merge] [--reference <repository>] [--depth <depth>] [--recursive] [--jobs <n>] [--[no-]single-branch] [--filter <filter spec>] [--] [<path>...]::
+
--
Update the registered submodules to match what the superproject
@@ -177,6 +177,10 @@ submodule with the `--init` option.
If `--recursive` is specified, this command will recurse into the
registered submodules, and update any nested submodules within.
+
+If `--filter <filter spec>` is specified, the given partial clone filter will be
+applied to the submodule. See linkgit:git-rev-list[1] for details on filter
+specifications.
--
set-branch (-b|--branch) <branch> [--] <path>::
set-branch (-d|--default) [--] <path>::
diff --git a/Documentation/git-update-index.txt b/Documentation/git-update-index.txt
index 2853f16..5ea2f2c 100644
--- a/Documentation/git-update-index.txt
+++ b/Documentation/git-update-index.txt
@@ -351,6 +351,10 @@ unchanged". Note that "assume unchanged" bit is *not* set if
the index (use `git update-index --really-refresh` if you want
to mark them as "assume unchanged").
+Sometimes users confuse the assume-unchanged bit with the
+skip-worktree bit. See the final paragraph in the "Skip-worktree bit"
+section below for an explanation of the differences.
+
EXAMPLES
--------
@@ -392,22 +396,47 @@ M foo.c
SKIP-WORKTREE BIT
-----------------
-Skip-worktree bit can be defined in one (long) sentence: When reading
-an entry, if it is marked as skip-worktree, then Git pretends its
-working directory version is up to date and read the index version
-instead.
-
-To elaborate, "reading" means checking for file existence, reading
-file attributes or file content. The working directory version may be
-present or absent. If present, its content may match against the index
-version or not. Writing is not affected by this bit, content safety
-is still first priority. Note that Git _can_ update working directory
-file, that is marked skip-worktree, if it is safe to do so (i.e.
-working directory version matches index version)
+Skip-worktree bit can be defined in one (long) sentence: Tell git to
+avoid writing the file to the working directory when reasonably
+possible, and treat the file as unchanged when it is not
+present in the working directory.
+
+Note that not all git commands will pay attention to this bit, and
+some only partially support it.
+
+The update-index flags and the read-tree capabilities relating to the
+skip-worktree bit predated the introduction of the
+linkgit:git-sparse-checkout[1] command, which provides a much easier
+way to configure and handle the skip-worktree bits. If you want to
+reduce your working tree to only deal with a subset of the files in
+the repository, we strongly encourage the use of
+linkgit:git-sparse-checkout[1] in preference to the low-level
+update-index and read-tree primitives.
+
+The primary purpose of the skip-worktree bit is to enable sparse
+checkouts, i.e. to have working directories with only a subset of
+paths present. When the skip-worktree bit is set, Git commands (such
+as `switch`, `pull`, `merge`) will avoid writing these files.
+However, these commands will sometimes write these files anyway in
+important cases such as conflicts during a merge or rebase. Git
+commands will also avoid treating the lack of such files as an
+intentional deletion; for example `git add -u` will not not stage a
+deletion for these files and `git commit -a` will not make a commit
+deleting them either.
Although this bit looks similar to assume-unchanged bit, its goal is
-different from assume-unchanged bit's. Skip-worktree also takes
-precedence over assume-unchanged bit when both are set.
+different. The assume-unchanged bit is for leaving the file in the
+working tree but having Git omit checking it for changes and presuming
+that the file has not been changed (though if it can determine without
+stat'ing the file that it has changed, it is free to record the
+changes). skip-worktree tells Git to ignore the absence of the file,
+avoid updating it when possible with commands that normally update
+much of the working directory (e.g. `checkout`, `switch`, `pull`,
+etc.), and not have its absence be recorded in commits. Note that in
+sparse checkouts (setup by `git sparse-checkout` or by configuring
+core.sparseCheckout to true), if a file is marked as skip-worktree in
+the index but is found in the working tree, Git will clear the
+skip-worktree bit for that file.
SPLIT INDEX
-----------
@@ -498,7 +527,9 @@ FILE SYSTEM MONITOR
This feature is intended to speed up git operations for repos that have
large working directories.
-It enables git to work together with a file system monitor (see the
+It enables git to work together with a file system monitor (see
+linkgit:git-fsmonitor{litdd}daemon[1]
+and the
"fsmonitor-watchman" section of linkgit:githooks[5]) that can
inform it as to what files have been modified. This enables git to avoid
having to lstat() every file to find modified files.
@@ -509,8 +540,8 @@ looking for new files.
If you want to enable (or disable) this feature, it is easier to use
the `core.fsmonitor` configuration variable (see
-linkgit:git-config[1]) than using the `--fsmonitor` option to
-`git update-index` in each repository, especially if you want to do so
+linkgit:git-config[1]) than using the `--fsmonitor` option to `git
+update-index` in each repository, especially if you want to do so
across all repositories you use, because you can set the configuration
variable in your `$HOME/.gitconfig` just once and have it affect all
repositories you touch.
diff --git a/Documentation/git-worktree.txt b/Documentation/git-worktree.txt
index 9e862fb..ada30c8 100644
--- a/Documentation/git-worktree.txt
+++ b/Documentation/git-worktree.txt
@@ -10,7 +10,7 @@ SYNOPSIS
--------
[verse]
'git worktree add' [-f] [--detach] [--checkout] [--lock [--reason <string>]] [-b <new-branch>] <path> [<commit-ish>]
-'git worktree list' [-v | --porcelain]
+'git worktree list' [-v | --porcelain [-z]]
'git worktree lock' [--reason <string>] <worktree>
'git worktree move' <worktree> <new-path>
'git worktree prune' [-n] [-v] [--expire <expire>]
@@ -25,45 +25,49 @@ Manage multiple working trees attached to the same repository.
A git repository can support multiple working trees, allowing you to check
out more than one branch at a time. With `git worktree add` a new working
-tree is associated with the repository. This new working tree is called a
-"linked working tree" as opposed to the "main working tree" prepared by
-linkgit:git-init[1] or linkgit:git-clone[1].
-A repository has one main working tree (if it's not a
-bare repository) and zero or more linked working trees. When you are done
-with a linked working tree, remove it with `git worktree remove`.
+tree is associated with the repository, along with additional metadata
+that differentiates that working tree from others in the same repository.
+The working tree, along with this metadata, is called a "worktree".
+
+This new worktree is called a "linked worktree" as opposed to the "main
+worktree" prepared by linkgit:git-init[1] or linkgit:git-clone[1].
+A repository has one main worktree (if it's not a bare repository) and
+zero or more linked worktrees. When you are done with a linked worktree,
+remove it with `git worktree remove`.
In its simplest form, `git worktree add <path>` automatically creates a
new branch whose name is the final component of `<path>`, which is
convenient if you plan to work on a new topic. For instance, `git
worktree add ../hotfix` creates new branch `hotfix` and checks it out at
-path `../hotfix`. To instead work on an existing branch in a new working
-tree, use `git worktree add <path> <branch>`. On the other hand, if you
-just plan to make some experimental changes or do testing without
-disturbing existing development, it is often convenient to create a
-'throwaway' working tree not associated with any branch. For instance,
-`git worktree add -d <path>` creates a new working tree with a detached
-`HEAD` at the same commit as the current branch.
+path `../hotfix`. To instead work on an existing branch in a new worktree,
+use `git worktree add <path> <branch>`. On the other hand, if you just
+plan to make some experimental changes or do testing without disturbing
+existing development, it is often convenient to create a 'throwaway'
+worktree not associated with any branch. For instance,
+`git worktree add -d <path>` creates a new worktree with a detached `HEAD`
+at the same commit as the current branch.
If a working tree is deleted without using `git worktree remove`, then
its associated administrative files, which reside in the repository
(see "DETAILS" below), will eventually be removed automatically (see
`gc.worktreePruneExpire` in linkgit:git-config[1]), or you can run
-`git worktree prune` in the main or any linked working tree to
-clean up any stale administrative files.
+`git worktree prune` in the main or any linked worktree to clean up any
+stale administrative files.
-If a linked working tree is stored on a portable device or network share
-which is not always mounted, you can prevent its administrative files from
-being pruned by issuing the `git worktree lock` command, optionally
-specifying `--reason` to explain why the working tree is locked.
+If the working tree for a linked worktree is stored on a portable device
+or network share which is not always mounted, you can prevent its
+administrative files from being pruned by issuing the `git worktree lock`
+command, optionally specifying `--reason` to explain why the worktree is
+locked.
COMMANDS
--------
add <path> [<commit-ish>]::
-Create `<path>` and checkout `<commit-ish>` into it. The new working directory
-is linked to the current repository, sharing everything except working
-directory specific files such as `HEAD`, `index`, etc. As a convenience,
-`<commit-ish>` may be a bare "`-`", which is synonymous with `@{-1}`.
+Create a worktree at `<path>` and checkout `<commit-ish>` into it. The new worktree
+is linked to the current repository, sharing everything except per-worktree
+files such as `HEAD`, `index`, etc. As a convenience, `<commit-ish>` may
+be a bare "`-`", which is synonymous with `@{-1}`.
+
If `<commit-ish>` is a branch name (call it `<branch>`) and is not found,
and neither `-b` nor `-B` nor `--detach` are used, but there does
@@ -84,100 +88,97 @@ branches from there if `<branch>` is ambiguous but exists on the
linkgit:git-config[1].
+
If `<commit-ish>` is omitted and neither `-b` nor `-B` nor `--detach` used,
-then, as a convenience, the new working tree is associated with a branch
-(call it `<branch>`) named after `$(basename <path>)`. If `<branch>`
-doesn't exist, a new branch based on `HEAD` is automatically created as
-if `-b <branch>` was given. If `<branch>` does exist, it will be
-checked out in the new working tree, if it's not checked out anywhere
-else, otherwise the command will refuse to create the working tree (unless
-`--force` is used).
+then, as a convenience, the new worktree is associated with a branch (call
+it `<branch>`) named after `$(basename <path>)`. If `<branch>` doesn't
+exist, a new branch based on `HEAD` is automatically created as if
+`-b <branch>` was given. If `<branch>` does exist, it will be checked out
+in the new worktree, if it's not checked out anywhere else, otherwise the
+command will refuse to create the worktree (unless `--force` is used).
list::
-List details of each working tree. The main working tree is listed first,
-followed by each of the linked working trees. The output details include
-whether the working tree is bare, the revision currently checked out, the
+List details of each worktree. The main worktree is listed first,
+followed by each of the linked worktrees. The output details include
+whether the worktree is bare, the revision currently checked out, the
branch currently checked out (or "detached HEAD" if none), "locked" if
-the worktree is locked, "prunable" if the worktree can be pruned by `prune`
-command.
+the worktree is locked, "prunable" if the worktree can be pruned by the
+`prune` command.
lock::
-If a working tree is on a portable device or network share which
-is not always mounted, lock it to prevent its administrative
-files from being pruned automatically. This also prevents it from
-being moved or deleted. Optionally, specify a reason for the lock
-with `--reason`.
+If a worktree is on a portable device or network share which is not always
+mounted, lock it to prevent its administrative files from being pruned
+automatically. This also prevents it from being moved or deleted.
+Optionally, specify a reason for the lock with `--reason`.
move::
-Move a working tree to a new location. Note that the main working tree
-or linked working trees containing submodules cannot be moved with this
-command. (The `git worktree repair` command, however, can reestablish
-the connection with linked working trees if you move the main working
-tree manually.)
+Move a worktree to a new location. Note that the main worktree or linked
+worktrees containing submodules cannot be moved with this command. (The
+`git worktree repair` command, however, can reestablish the connection
+with linked worktrees if you move the main worktree manually.)
prune::
-Prune working tree information in `$GIT_DIR/worktrees`.
+Prune worktree information in `$GIT_DIR/worktrees`.
remove::
-Remove a working tree. Only clean working trees (no untracked files
-and no modification in tracked files) can be removed. Unclean working
-trees or ones with submodules can be removed with `--force`. The main
-working tree cannot be removed.
+Remove a worktree. Only clean worktrees (no untracked files and no
+modification in tracked files) can be removed. Unclean worktrees or ones
+with submodules can be removed with `--force`. The main worktree cannot be
+removed.
repair [<path>...]::
-Repair working tree administrative files, if possible, if they have
-become corrupted or outdated due to external factors.
+Repair worktree administrative files, if possible, if they have become
+corrupted or outdated due to external factors.
+
-For instance, if the main working tree (or bare repository) is moved,
-linked working trees will be unable to locate it. Running `repair` in
-the main working tree will reestablish the connection from linked
-working trees back to the main working tree.
+For instance, if the main worktree (or bare repository) is moved, linked
+worktrees will be unable to locate it. Running `repair` in the main
+worktree will reestablish the connection from linked worktrees back to the
+main worktree.
+
-Similarly, if a linked working tree is moved without using `git worktree
-move`, the main working tree (or bare repository) will be unable to
-locate it. Running `repair` within the recently-moved working tree will
-reestablish the connection. If multiple linked working trees are moved,
-running `repair` from any working tree with each tree's new `<path>` as
-an argument, will reestablish the connection to all the specified paths.
+Similarly, if the working tree for a linked worktree is moved without
+using `git worktree move`, the main worktree (or bare repository) will be
+unable to locate it. Running `repair` within the recently-moved worktree
+will reestablish the connection. If multiple linked worktrees are moved,
+running `repair` from any worktree with each tree's new `<path>` as an
+argument, will reestablish the connection to all the specified paths.
+
-If both the main working tree and linked working trees have been moved
-manually, then running `repair` in the main working tree and specifying the
-new `<path>` of each linked working tree will reestablish all connections
-in both directions.
+If both the main worktree and linked worktrees have been moved manually,
+then running `repair` in the main worktree and specifying the new `<path>`
+of each linked worktree will reestablish all connections in both
+directions.
unlock::
-Unlock a working tree, allowing it to be pruned, moved or deleted.
+Unlock a worktree, allowing it to be pruned, moved or deleted.
OPTIONS
-------
-f::
--force::
- By default, `add` refuses to create a new working tree when
+ By default, `add` refuses to create a new worktree when
`<commit-ish>` is a branch name and is already checked out by
- another working tree, or if `<path>` is already assigned to some
- working tree but is missing (for instance, if `<path>` was deleted
+ another worktree, or if `<path>` is already assigned to some
+ worktree but is missing (for instance, if `<path>` was deleted
manually). This option overrides these safeguards. To add a missing but
- locked working tree path, specify `--force` twice.
+ locked worktree path, specify `--force` twice.
+
-`move` refuses to move a locked working tree unless `--force` is specified
-twice. If the destination is already assigned to some other working tree but is
+`move` refuses to move a locked worktree unless `--force` is specified
+twice. If the destination is already assigned to some other worktree but is
missing (for instance, if `<new-path>` was deleted manually), then `--force`
allows the move to proceed; use `--force` twice if the destination is locked.
+
-`remove` refuses to remove an unclean working tree unless `--force` is used.
-To remove a locked working tree, specify `--force` twice.
+`remove` refuses to remove an unclean worktree unless `--force` is used.
+To remove a locked worktree, specify `--force` twice.
-b <new-branch>::
-B <new-branch>::
With `add`, create a new branch named `<new-branch>` starting at
- `<commit-ish>`, and check out `<new-branch>` into the new working tree.
+ `<commit-ish>`, and check out `<new-branch>` into the new worktree.
If `<commit-ish>` is omitted, it defaults to `HEAD`.
By default, `-b` refuses to create a new branch if it already
exists. `-B` overrides this safeguard, resetting `<new-branch>` to
@@ -185,7 +186,7 @@ To remove a locked working tree, specify `--force` twice.
-d::
--detach::
- With `add`, detach `HEAD` in the new working tree. See "DETACHED HEAD"
+ With `add`, detach `HEAD` in the new worktree. See "DETACHED HEAD"
in linkgit:git-checkout[1].
--[no-]checkout::
@@ -211,7 +212,7 @@ This can also be set up as the default behaviour by using the
`--track` in linkgit:git-branch[1] for details.
--lock::
- Keep the working tree locked after creation. This is the
+ Keep the worktree locked after creation. This is the
equivalent of `git worktree lock` after `git worktree add`,
but without a race condition.
@@ -223,7 +224,14 @@ This can also be set up as the default behaviour by using the
--porcelain::
With `list`, output in an easy-to-parse format for scripts.
This format will remain stable across Git versions and regardless of user
- configuration. See below for details.
+ configuration. It is recommended to combine this with `-z`.
+ See below for details.
+
+-z::
+ Terminate each line with a NUL rather than a newline when
+ `--porcelain` is specified with `list`. This makes it possible
+ to parse the output when a worktree path contains a newline
+ character.
-q::
--quiet::
@@ -236,43 +244,42 @@ This can also be set up as the default behaviour by using the
With `list`, output additional information about worktrees (see below).
--expire <time>::
- With `prune`, only expire unused working trees older than `<time>`.
+ With `prune`, only expire unused worktrees older than `<time>`.
+
-With `list`, annotate missing working trees as prunable if they are
-older than `<time>`.
+With `list`, annotate missing worktrees as prunable if they are older than
+`<time>`.
--reason <string>::
- With `lock` or with `add --lock`, an explanation why the working tree is locked.
+ With `lock` or with `add --lock`, an explanation why the worktree
+ is locked.
<worktree>::
- Working trees can be identified by path, either relative or
- absolute.
+ Worktrees can be identified by path, either relative or absolute.
+
-If the last path components in the working tree's path is unique among
-working trees, it can be used to identify a working tree. For example if
-you only have two working trees, at `/abc/def/ghi` and `/abc/def/ggg`,
-then `ghi` or `def/ghi` is enough to point to the former working tree.
+If the last path components in the worktree's path is unique among
+worktrees, it can be used to identify a worktree. For example if you only
+have two worktrees, at `/abc/def/ghi` and `/abc/def/ggg`, then `ghi` or
+`def/ghi` is enough to point to the former worktree.
REFS
----
-In multiple working trees, some refs may be shared between all working
-trees and some refs are local. One example is `HEAD` which is different for each
-working tree. This section is about the sharing rules and how to access
-refs of one working tree from another.
-
-In general, all pseudo refs are per working tree and all refs starting
-with `refs/` are shared. Pseudo refs are ones like `HEAD` which are
-directly under `$GIT_DIR` instead of inside `$GIT_DIR/refs`. There are
-exceptions, however: refs inside `refs/bisect` and `refs/worktree` are not
-shared.
-
-Refs that are per working tree can still be accessed from another
-working tree via two special paths, `main-worktree` and `worktrees`. The
-former gives access to per-working tree refs of the main working tree,
-while the latter to all linked working trees.
+When using multiple worktrees, some refs are shared between all worktrees,
+but others are specific to an individual worktree. One example is `HEAD`,
+which is different for each worktree. This section is about the sharing
+rules and how to access refs of one worktree from another.
+
+In general, all pseudo refs are per-worktree and all refs starting with
+`refs/` are shared. Pseudo refs are ones like `HEAD` which are directly
+under `$GIT_DIR` instead of inside `$GIT_DIR/refs`. There are exceptions,
+however: refs inside `refs/bisect` and `refs/worktree` are not shared.
+
+Refs that are per-worktree can still be accessed from another worktree via
+two special paths, `main-worktree` and `worktrees`. The former gives
+access to per-worktree refs of the main worktree, while the latter to all
+linked worktrees.
For example, `main-worktree/HEAD` or `main-worktree/refs/bisect/good`
-resolve to the same value as the main working tree's `HEAD` and
+resolve to the same value as the main worktree's `HEAD` and
`refs/bisect/good` respectively. Similarly, `worktrees/foo/HEAD` or
`worktrees/bar/refs/bisect/bad` are the same as
`$GIT_COMMON_DIR/worktrees/foo/HEAD` and
@@ -284,13 +291,13 @@ which will handle refs correctly.
CONFIGURATION FILE
------------------
-By default, the repository `config` file is shared across all working
-trees. If the config variables `core.bare` or `core.worktree` are
-already present in the config file, they will be applied to the main
-working trees only.
+By default, the repository `config` file is shared across all worktrees.
+If the config variables `core.bare` or `core.worktree` are present in the
+common config file and `extensions.worktreeConfig` is disabled, then they
+will be applied to the main worktree only.
-In order to have configuration specific to working trees, you can turn
-on the `worktreeConfig` extension, e.g.:
+In order to have worktree-specific configuration, you can turn on the
+`worktreeConfig` extension, e.g.:
------------
$ git config extensions.worktreeConfig true
@@ -303,40 +310,45 @@ versions will refuse to access repositories with this extension.
Note that in this file, the exception for `core.bare` and `core.worktree`
is gone. If they exist in `$GIT_DIR/config`, you must move
-them to the `config.worktree` of the main working tree. You may also
-take this opportunity to review and move other configuration that you
-do not want to share to all working trees:
+them to the `config.worktree` of the main worktree. You may also take this
+opportunity to review and move other configuration that you do not want to
+share to all worktrees:
+
+ - `core.worktree` should never be shared.
+
+ - `core.bare` should not be shared if the value is `core.bare=true`.
- - `core.worktree` and `core.bare` should never be shared
+ - `core.sparseCheckout` should not be shared, unless you are sure you
+ always use sparse checkout for all worktrees.
- - `core.sparseCheckout` is recommended per working tree, unless you
- are sure you always use sparse checkout for all working trees.
+See the documentation of `extensions.worktreeConfig` in
+linkgit:git-config[1] for more details.
DETAILS
-------
-Each linked working tree has a private sub-directory in the repository's
+Each linked worktree has a private sub-directory in the repository's
`$GIT_DIR/worktrees` directory. The private sub-directory's name is usually
-the base name of the linked working tree's path, possibly appended with a
+the base name of the linked worktree's path, possibly appended with a
number to make it unique. For example, when `$GIT_DIR=/path/main/.git` the
command `git worktree add /path/other/test-next next` creates the linked
-working tree in `/path/other/test-next` and also creates a
+worktree in `/path/other/test-next` and also creates a
`$GIT_DIR/worktrees/test-next` directory (or `$GIT_DIR/worktrees/test-next1`
if `test-next` is already taken).
-Within a linked working tree, `$GIT_DIR` is set to point to this private
+Within a linked worktree, `$GIT_DIR` is set to point to this private
directory (e.g. `/path/main/.git/worktrees/test-next` in the example) and
-`$GIT_COMMON_DIR` is set to point back to the main working tree's `$GIT_DIR`
+`$GIT_COMMON_DIR` is set to point back to the main worktree's `$GIT_DIR`
(e.g. `/path/main/.git`). These settings are made in a `.git` file located at
-the top directory of the linked working tree.
+the top directory of the linked worktree.
Path resolution via `git rev-parse --git-path` uses either
`$GIT_DIR` or `$GIT_COMMON_DIR` depending on the path. For example, in the
-linked working tree `git rev-parse --git-path HEAD` returns
+linked worktree `git rev-parse --git-path HEAD` returns
`/path/main/.git/worktrees/test-next/HEAD` (not
`/path/other/test-next/.git/HEAD` or `/path/main/.git/HEAD`) while `git
rev-parse --git-path refs/heads/master` uses
`$GIT_COMMON_DIR` and returns `/path/main/.git/refs/heads/master`,
-since refs are shared across all working trees, except `refs/bisect` and
+since refs are shared across all worktrees, except `refs/bisect` and
`refs/worktree`.
See linkgit:gitrepository-layout[5] for more information. The rule of
@@ -344,8 +356,8 @@ thumb is do not make any assumption about whether a path belongs to
`$GIT_DIR` or `$GIT_COMMON_DIR` when you need to directly access something
inside `$GIT_DIR`. Use `git rev-parse --git-path` to get the final path.
-If you manually move a linked working tree, you need to update the `gitdir` file
-in the entry's directory. For example, if a linked working tree is moved
+If you manually move a linked worktree, you need to update the `gitdir` file
+in the entry's directory. For example, if a linked worktree is moved
to `/newpath/test-next` and its `.git` file points to
`/path/main/.git/worktrees/test-next`, then update
`/path/main/.git/worktrees/test-next/gitdir` to reference `/newpath/test-next`
@@ -354,10 +366,10 @@ automatically.
To prevent a `$GIT_DIR/worktrees` entry from being pruned (which
can be useful in some situations, such as when the
-entry's working tree is stored on a portable device), use the
+entry's worktree is stored on a portable device), use the
`git worktree lock` command, which adds a file named
`locked` to the entry's directory. The file contains the reason in
-plain text. For example, if a linked working tree's `.git` file points
+plain text. For example, if a linked worktree's `.git` file points
to `/path/main/.git/worktrees/test-next` then a file named
`/path/main/.git/worktrees/test-next/locked` will prevent the
`test-next` entry from being pruned. See
@@ -378,11 +390,11 @@ $ git worktree list
/path/to/other-linked-worktree 1234abc (detached HEAD)
------------
-The command also shows annotations for each working tree, according to its state.
+The command also shows annotations for each worktree, according to its state.
These annotations are:
- * `locked`, if the working tree is locked.
- * `prunable`, if the working tree can be pruned via `git worktree prune`.
+ * `locked`, if the worktree is locked.
+ * `prunable`, if the worktree can be pruned via `git worktree prune`.
------------
$ git worktree list
@@ -400,23 +412,24 @@ $ git worktree list --verbose
/path/to/linked-worktree abcd1234 [master]
/path/to/locked-worktree-no-reason abcd5678 (detached HEAD) locked
/path/to/locked-worktree-with-reason 1234abcd (brancha)
- locked: working tree path is mounted on a portable device
+ locked: worktree path is mounted on a portable device
/path/to/prunable-worktree 5678abc1 (detached HEAD)
prunable: gitdir file points to non-existent location
------------
Note that the annotation is moved to the next line if the additional
information is available, otherwise it stays on the same line as the
-working tree itself.
+worktree itself.
Porcelain Format
~~~~~~~~~~~~~~~~
-The porcelain format has a line per attribute. Attributes are listed with a
+The porcelain format has a line per attribute. If `-z` is given then the lines
+are terminated with NUL rather than a newline. Attributes are listed with a
label and value separated by a single space. Boolean attributes (like `bare`
and `detached`) are listed as a label only, and are present only
if the value is true. Some attributes (like `locked`) can be listed as a label
only or with a value depending upon whether a reason is available. The first
-attribute of a working tree is always `worktree`, an empty line indicates the
+attribute of a worktree is always `worktree`, an empty line indicates the
end of the record. For example:
------------
@@ -449,7 +462,7 @@ prunable gitdir file points to non-existent location
------------
-If the lock reason contains "unusual" characters such as newline, they
+Unless `-z` is used any "unusual" characters in the lock reason such as newlines
are escaped and the entire reason is quoted as explained for the
configuration variable `core.quotePath` (see linkgit:git-config[1]).
For Example:
@@ -468,7 +481,7 @@ demands that you fix something immediately. You might typically use
linkgit:git-stash[1] to store your changes away temporarily, however, your
working tree is in such a state of disarray (with new, moved, and removed
files, and other bits and pieces strewn around) that you don't want to risk
-disturbing any of it. Instead, you create a temporary linked working tree to
+disturbing any of it. Instead, you create a temporary linked worktree to
make the emergency fix, remove it when done, and then resume your earlier
refactoring session.
diff --git a/Documentation/gitattributes.txt b/Documentation/gitattributes.txt
index 83fd4e1..4b36d51 100644
--- a/Documentation/gitattributes.txt
+++ b/Documentation/gitattributes.txt
@@ -160,11 +160,13 @@ unspecified.
^^^^^
This attribute sets a specific line-ending style to be used in the
-working directory. It enables end-of-line conversion without any
-content checks, effectively setting the `text` attribute. Note that
-setting this attribute on paths which are in the index with CRLF line
-endings may make the paths to be considered dirty. Adding the path to
-the index again will normalize the line endings in the index.
+working directory. This attribute has effect only if the `text`
+attribute is set or unspecified, or if it is set to `auto`, the file is
+detected as text, and it is stored with LF endings in the index. Note
+that setting this attribute on paths which are in the index with CRLF
+line endings may make the paths to be considered dirty unless
+`text=auto` is set. Adding the path to the index again will normalize
+the line endings in the index.
Set to string value "crlf"::
@@ -827,6 +829,8 @@ patterns are available:
- `java` suitable for source code in the Java language.
+- `kotlin` suitable for source code in the Kotlin language.
+
- `markdown` suitable for Markdown documents.
- `matlab` suitable for source code in the MATLAB and Octave languages.
diff --git a/Documentation/gitcli.txt b/Documentation/gitcli.txt
index 92e4ba6..1819a5a 100644
--- a/Documentation/gitcli.txt
+++ b/Documentation/gitcli.txt
@@ -19,6 +19,15 @@ Many commands take revisions (most often "commits", but sometimes
"tree-ish", depending on the context and command) and paths as their
arguments. Here are the rules:
+ * Options come first and then args.
+ A subcommand may take dashed options (which may take their own
+ arguments, e.g. "--max-parents 2") and arguments. You SHOULD
+ give dashed options first and then arguments. Some commands may
+ accept dashed options after you have already gave non-option
+ arguments (which may make the command ambiguous), but you should
+ not rely on it (because eventually we may find a way to fix
+ these ambiguity by enforcing the "options then args" rule).
+
* Revisions come first and then paths.
E.g. in `git diff v1.0 v2.0 arch/x86 include/asm-x86`,
`v1.0` and `v2.0` are revisions and `arch/x86` and `include/asm-x86`
@@ -72,24 +81,24 @@ you will.
Here are the rules regarding the "flags" that you should follow when you are
scripting Git:
- * it's preferred to use the non-dashed form of Git commands, which means that
+ * It's preferred to use the non-dashed form of Git commands, which means that
you should prefer `git foo` to `git-foo`.
- * splitting short options to separate words (prefer `git foo -a -b`
+ * Splitting short options to separate words (prefer `git foo -a -b`
to `git foo -ab`, the latter may not even work).
- * when a command-line option takes an argument, use the 'stuck' form. In
+ * When a command-line option takes an argument, use the 'stuck' form. In
other words, write `git foo -oArg` instead of `git foo -o Arg` for short
options, and `git foo --long-opt=Arg` instead of `git foo --long-opt Arg`
for long options. An option that takes optional option-argument must be
written in the 'stuck' form.
- * when you give a revision parameter to a command, make sure the parameter is
+ * When you give a revision parameter to a command, make sure the parameter is
not ambiguous with a name of a file in the work tree. E.g. do not write
`git log -1 HEAD` but write `git log -1 HEAD --`; the former will not work
if you happen to have a file called `HEAD` in the work tree.
- * many commands allow a long option `--option` to be abbreviated
+ * Many commands allow a long option `--option` to be abbreviated
only to their unique prefix (e.g. if there is no other option
whose name begins with `opt`, you may be able to spell `--opt` to
invoke the `--option` flag), but you should fully spell them out
diff --git a/Documentation/githooks.txt b/Documentation/githooks.txt
index b51959f..a16e62b 100644
--- a/Documentation/githooks.txt
+++ b/Documentation/githooks.txt
@@ -698,6 +698,10 @@ and "0" meaning they were not.
Only one parameter should be set to "1" when the hook runs. The hook
running passing "1", "1" should not be possible.
+SEE ALSO
+--------
+linkgit:git-hook[1]
+
GIT
---
Part of the linkgit:git[1] suite
diff --git a/Documentation/glossary-content.txt b/Documentation/glossary-content.txt
index c077971..aa2f41f 100644
--- a/Documentation/glossary-content.txt
+++ b/Documentation/glossary-content.txt
@@ -312,7 +312,7 @@ Pathspecs are used on the command line of "git ls-files", "git
ls-tree", "git add", "git grep", "git diff", "git checkout",
and many other commands to
limit the scope of operations to some subset of the tree or
-worktree. See the documentation of each command for whether
+working tree. See the documentation of each command for whether
paths are relative to the current directory or toplevel. The
pathspec syntax is as follows:
+
@@ -446,7 +446,7 @@ exclude;;
interface than the <<def_plumbing,plumbing>>.
[[def_per_worktree_ref]]per-worktree ref::
- Refs that are per-<<def_working_tree,worktree>>, rather than
+ Refs that are per-<<def_worktree,worktree>>, rather than
global. This is presently only <<def_HEAD,HEAD>> and any refs
that start with `refs/bisect/`, but might later include other
unusual refs.
@@ -669,3 +669,12 @@ The most notable example is `HEAD`.
The tree of actual checked out files. The working tree normally
contains the contents of the <<def_HEAD,HEAD>> commit's tree,
plus any local changes that you have made but not yet committed.
+
+[[def_worktree]]worktree::
+ A repository can have zero (i.e. bare repository) or one or
+ more worktrees attached to it. One "worktree" consists of a
+ "working tree" and repository metadata, most of which are
+ shared among other worktrees of a single repository, and
+ some of which are maintained separately per worktree
+ (e.g. the index, HEAD and pseudorefs like MERGE_HEAD,
+ per-worktree refs and per-worktree configuration file).
diff --git a/Documentation/rev-list-options.txt b/Documentation/rev-list-options.txt
index 43a86fa..fd4f4e2 100644
--- a/Documentation/rev-list-options.txt
+++ b/Documentation/rev-list-options.txt
@@ -122,19 +122,27 @@ again. Equivalent forms are `--min-parents=0` (any commit has 0 or more
parents) and `--max-parents=-1` (negative numbers denote no upper limit).
--first-parent::
- Follow only the first parent commit upon seeing a merge
- commit. This option can give a better overview when
- viewing the evolution of a particular topic branch,
- because merges into a topic branch tend to be only about
- adjusting to updated upstream from time to time, and
- this option allows you to ignore the individual commits
- brought in to your history by such a merge.
+ When finding commits to include, follow only the first
+ parent commit upon seeing a merge commit. This option
+ can give a better overview when viewing the evolution of
+ a particular topic branch, because merges into a topic
+ branch tend to be only about adjusting to updated upstream
+ from time to time, and this option allows you to ignore
+ the individual commits brought in to your history by such
+ a merge.
ifdef::git-log[]
+
This option also changes default diff format for merge commits
to `first-parent`, see `--diff-merges=first-parent` for details.
endif::git-log[]
+--exclude-first-parent-only::
+ When finding commits to exclude (with a '{caret}'), follow only
+ the first parent commit upon seeing a merge commit.
+ This can be used to find the set of changes in a topic branch
+ from the point where it diverged from the remote branch, given
+ that arbitrary merges can be valid topic branch changes.
+
--not::
Reverses the meaning of the '{caret}' prefix (or lack thereof)
for all following revision specifiers, up to the next `--not`.
diff --git a/Documentation/technical/bundle-format.txt b/Documentation/technical/bundle-format.txt
index bac558d..b9be864 100644
--- a/Documentation/technical/bundle-format.txt
+++ b/Documentation/technical/bundle-format.txt
@@ -71,6 +71,11 @@ and the Git bundle v2 format cannot represent a shallow clone repository.
== Capabilities
Because there is no opportunity for negotiation, unknown capabilities cause 'git
-bundle' to abort. The only known capability is `object-format`, which specifies
-the hash algorithm in use, and can take the same values as the
-`extensions.objectFormat` configuration value.
+bundle' to abort.
+
+* `object-format` specifies the hash algorithm in use, and can take the same
+ values as the `extensions.objectFormat` configuration value.
+
+* `filter` specifies an object filter as in the `--filter` option in
+ linkgit:git-rev-list[1]. The resulting pack-file must be marked as a
+ `.promisor` pack-file after it is unbundled.
diff --git a/Documentation/technical/commit-graph-format.txt b/Documentation/technical/commit-graph-format.txt
index 87971c2..484b185 100644
--- a/Documentation/technical/commit-graph-format.txt
+++ b/Documentation/technical/commit-graph-format.txt
@@ -93,7 +93,7 @@ CHUNK DATA:
2 bits of the lowest byte, storing the 33rd and 34th bit of the
commit time.
- Generation Data (ID: {'G', 'D', 'A', 'T' }) (N * 4 bytes) [Optional]
+ Generation Data (ID: {'G', 'D', 'A', '2' }) (N * 4 bytes) [Optional]
* This list of 4-byte values store corrected commit date offsets for the
commits, arranged in the same order as commit data chunk.
* If the corrected commit date offset cannot be stored within 31 bits,
@@ -104,7 +104,7 @@ CHUNK DATA:
by compatible versions of Git and in case of split commit-graph chains,
the topmost layer also has Generation Data chunk.
- Generation Data Overflow (ID: {'G', 'D', 'O', 'V' }) [Optional]
+ Generation Data Overflow (ID: {'G', 'D', 'O', '2' }) [Optional]
* This list of 8-byte values stores the corrected commit date offsets
for commits with corrected commit date offsets that cannot be
stored within 31 bits.
@@ -156,3 +156,11 @@ CHUNK DATA:
TRAILER:
H-byte HASH-checksum of all of the above.
+
+== Historical Notes:
+
+The Generation Data (GDA2) and Generation Data Overflow (GDO2) chunks have
+the number '2' in their chunk IDs because a previous version of Git wrote
+possibly erroneous data in these chunks with the IDs "GDAT" and "GDOV". By
+changing the IDs, newer versions of Git will silently ignore those older
+chunks and write the new information without trusting the incorrect data.
diff --git a/Documentation/technical/multi-pack-index.txt b/Documentation/technical/multi-pack-index.txt
index b39c69d..f2221d2 100644
--- a/Documentation/technical/multi-pack-index.txt
+++ b/Documentation/technical/multi-pack-index.txt
@@ -24,6 +24,7 @@ and their offsets into multiple packfiles. It contains:
** An offset within the jth packfile for the object.
* If large offsets are required, we use another list of large
offsets similar to version 2 pack-indexes.
+- An optional list of objects in pseudo-pack order (used with MIDX bitmaps).
Thus, we can provide O(log N) lookup time for any number
of packfiles.
diff --git a/Documentation/technical/pack-format.txt b/Documentation/technical/pack-format.txt
index 8d2f42f..6d3efb7 100644
--- a/Documentation/technical/pack-format.txt
+++ b/Documentation/technical/pack-format.txt
@@ -376,6 +376,11 @@ CHUNK DATA:
[Optional] Object Large Offsets (ID: {'L', 'O', 'F', 'F'})
8-byte offsets into large packfiles.
+ [Optional] Bitmap pack order (ID: {'R', 'I', 'D', 'X'})
+ A list of MIDX positions (one per object in the MIDX, num_objects in
+ total, each a 4-byte unsigned integer in network byte order), sorted
+ according to their relative bitmap/pseudo-pack positions.
+
TRAILER:
Index checksum of the above contents.
@@ -456,9 +461,5 @@ In short, a MIDX's pseudo-pack is the de-duplicated concatenation of
objects in packs stored by the MIDX, laid out in pack order, and the
packs arranged in MIDX order (with the preferred pack coming first).
-Finally, note that the MIDX's reverse index is not stored as a chunk in
-the multi-pack-index itself. This is done because the reverse index
-includes the checksum of the pack or MIDX to which it belongs, which
-makes it impossible to write in the MIDX. To avoid races when rewriting
-the MIDX, a MIDX reverse index includes the MIDX's checksum in its
-filename (e.g., `multi-pack-index-xyz.rev`).
+The MIDX's reverse index is stored in the optional 'RIDX' chunk within
+the MIDX itself.
diff --git a/Documentation/technical/partial-clone.txt b/Documentation/technical/partial-clone.txt
index a0dd7c6..99f0eb3 100644
--- a/Documentation/technical/partial-clone.txt
+++ b/Documentation/technical/partial-clone.txt
@@ -181,6 +181,9 @@ Fetching Missing Objects
currently fetches all objects referred to by the requested objects, even
though they are not necessary.
+- Fetching with `--refetch` will request a complete new filtered packfile from
+ the remote, which can be used to change a filter without needing to
+ dynamically fetch missing objects.
Using many promisor remotes
---------------------------
diff --git a/Documentation/technical/reftable.txt b/Documentation/technical/reftable.txt
index d7c3b64..6a67cc4 100644
--- a/Documentation/technical/reftable.txt
+++ b/Documentation/technical/reftable.txt
@@ -443,7 +443,7 @@ Obj block format
Object blocks are optional. Writers may choose to omit object blocks,
especially if readers will not use the object name to ref mapping.
-Object blocks use unique, abbreviated 2-32 object name keys, mapping to
+Object blocks use unique, abbreviated 2-31 byte object name keys, mapping to
ref blocks containing references pointing to that object directly, or as
the peeled value of an annotated tag. Like ref blocks, object blocks use
the file's standard block size. The abbreviation length is available in
diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
index 3a7a9f0..1c5f645 100755
--- a/GIT-VERSION-GEN
+++ b/GIT-VERSION-GEN
@@ -1,7 +1,7 @@
#!/bin/sh
GVF=GIT-VERSION-FILE
-DEF_VER=v2.35.2
+DEF_VER=v2.36.0-rc1
LF='
'
diff --git a/Makefile b/Makefile
index 5580859..f8bccfa 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,9 @@
# The default target of this Makefile is...
all::
+# Import tree-wide shared Makefile behavior and libraries
+include shared.mak
+
# Define V=1 to have a more verbose compile.
#
# Define SHELL_PATH to a POSIX shell if your /bin/sh is broken.
@@ -234,6 +237,14 @@ all::
# Define NO_TRUSTABLE_FILEMODE if your filesystem may claim to support
# the executable mode bit, but doesn't really do so.
#
+# Define CSPRNG_METHOD to "arc4random" if your system has arc4random and
+# arc4random_buf, "libbsd" if your system has those functions from libbsd,
+# "getrandom" if your system has getrandom, "getentropy" if your system has
+# getentropy, "rtlgenrandom" for RtlGenRandom (Windows only), or "openssl" if
+# you'd want to use the OpenSSL CSPRNG. You may set multiple options with
+# spaces, in which case a suitable option will be chosen. If unset or set to
+# anything else, defaults to using "/dev/urandom".
+#
# Define NEEDS_MODE_TRANSLATION if your OS strays from the typical file type
# bits in mode values (e.g. z/OS defines I_SFMT to 0xFF000000 as opposed to the
# usual 0xF000).
@@ -256,8 +267,6 @@ all::
#
# Define NO_DEFLATE_BOUND if your zlib does not have deflateBound.
#
-# Define NO_UNCOMPRESS2 if your zlib does not have uncompress2.
-#
# Define NO_NORETURN if using buggy versions of gcc 4.6+ and profile feedback,
# as the compiler can crash (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=49299)
#
@@ -405,6 +414,8 @@ all::
#
# Define HAVE_CLOCK_MONOTONIC if your platform has CLOCK_MONOTONIC.
#
+# Define HAVE_SYNC_FILE_RANGE if your platform has sync_file_range.
+#
# Define NEEDS_LIBRT if your platform requires linking with librt (glibc version
# before 2.17) for clock_gettime and CLOCK_MONOTONIC.
#
@@ -464,6 +475,11 @@ all::
# directory, and the JSON compilation database 'compile_commands.json' will be
# created at the root of the repository.
#
+# If your platform supports a built-in fsmonitor backend, set
+# FSMONITOR_DAEMON_BACKEND to the "<name>" of the corresponding
+# `compat/fsmonitor/fsm-listen-<name>.c` that implements the
+# `fsm_listen__*()` routines.
+#
# Define DEVELOPER to enable more compiler warnings. Compiler version
# and family are auto detected, but could be overridden by defining
# COMPILER_FEATURES (see config.mak.dev). You can still set
@@ -693,6 +709,7 @@ TEST_BUILTINS_OBJS += test-bloom.o
TEST_BUILTINS_OBJS += test-chmtime.o
TEST_BUILTINS_OBJS += test-config.o
TEST_BUILTINS_OBJS += test-crontab.o
+TEST_BUILTINS_OBJS += test-csprng.o
TEST_BUILTINS_OBJS += test-ctype.o
TEST_BUILTINS_OBJS += test-date.o
TEST_BUILTINS_OBJS += test-delta.o
@@ -704,6 +721,7 @@ TEST_BUILTINS_OBJS += test-dump-split-index.o
TEST_BUILTINS_OBJS += test-dump-untracked-cache.o
TEST_BUILTINS_OBJS += test-example-decorate.o
TEST_BUILTINS_OBJS += test-fast-rebase.o
+TEST_BUILTINS_OBJS += test-fsmonitor-client.o
TEST_BUILTINS_OBJS += test-genrandom.o
TEST_BUILTINS_OBJS += test-genzeros.o
TEST_BUILTINS_OBJS += test-getcwd.o
@@ -823,12 +841,33 @@ GENERATED_H += hook-list.h
.PHONY: generated-hdrs
generated-hdrs: $(GENERATED_H)
-LIB_H := $(sort $(patsubst ./%,%,$(shell git ls-files '*.h' ':!t/' ':!Documentation/' 2>/dev/null || \
+## Exhaustive lists of our source files, either dynamically generated,
+## or hardcoded.
+SOURCES_CMD = ( \
+ git ls-files \
+ '*.[hcS]' \
+ '*.sh' \
+ ':!*[tp][0-9][0-9][0-9][0-9]*' \
+ ':!contrib' \
+ 2>/dev/null || \
$(FIND) . \
- -name .git -prune -o \
- -name t -prune -o \
- -name Documentation -prune -o \
- -name '*.h' -print)))
+ \( -name .git -type d -prune \) \
+ -o \( -name '[tp][0-9][0-9][0-9][0-9]*' -prune \) \
+ -o \( -name contrib -type d -prune \) \
+ -o \( -name build -type d -prune \) \
+ -o \( -name 'trash*' -type d -prune \) \
+ -o \( -name '*.[hcS]' -type f -print \) \
+ -o \( -name '*.sh' -type f -print \) \
+ | sed -e 's|^\./||' \
+ )
+FOUND_SOURCE_FILES := $(shell $(SOURCES_CMD))
+
+FOUND_C_SOURCES = $(filter %.c,$(FOUND_SOURCE_FILES))
+FOUND_H_SOURCES = $(filter %.h,$(FOUND_SOURCE_FILES))
+
+COCCI_SOURCES = $(filter-out $(THIRD_PARTY_SOURCES),$(FOUND_C_SOURCES))
+
+LIB_H = $(FOUND_H_SOURCES)
LIB_OBJS += abspath.o
LIB_OBJS += add-interactive.o
@@ -862,6 +901,7 @@ LIB_OBJS += commit-reach.o
LIB_OBJS += commit.o
LIB_OBJS += compat/obstack.o
LIB_OBJS += compat/terminal.o
+LIB_OBJS += compat/zlib-uncompress2.o
LIB_OBJS += config.o
LIB_OBJS += connect.o
LIB_OBJS += connected.o
@@ -899,6 +939,8 @@ LIB_OBJS += fetch-pack.o
LIB_OBJS += fmt-merge-msg.o
LIB_OBJS += fsck.o
LIB_OBJS += fsmonitor.o
+LIB_OBJS += fsmonitor-ipc.o
+LIB_OBJS += fsmonitor-settings.o
LIB_OBJS += gettext.o
LIB_OBJS += gpg-interface.o
LIB_OBJS += graph.o
@@ -981,6 +1023,7 @@ LIB_OBJS += rebase-interactive.o
LIB_OBJS += rebase.o
LIB_OBJS += ref-filter.o
LIB_OBJS += reflog-walk.o
+LIB_OBJS += reflog.o
LIB_OBJS += refs.o
LIB_OBJS += refs/debug.o
LIB_OBJS += refs/files-backend.o
@@ -1104,11 +1147,13 @@ BUILTIN_OBJS += builtin/fmt-merge-msg.o
BUILTIN_OBJS += builtin/for-each-ref.o
BUILTIN_OBJS += builtin/for-each-repo.o
BUILTIN_OBJS += builtin/fsck.o
+BUILTIN_OBJS += builtin/fsmonitor--daemon.o
BUILTIN_OBJS += builtin/gc.o
BUILTIN_OBJS += builtin/get-tar-commit-id.o
BUILTIN_OBJS += builtin/grep.o
BUILTIN_OBJS += builtin/hash-object.o
BUILTIN_OBJS += builtin/help.o
+BUILTIN_OBJS += builtin/hook.o
BUILTIN_OBJS += builtin/index-pack.o
BUILTIN_OBJS += builtin/init-db.o
BUILTIN_OBJS += builtin/interpret-trailers.o
@@ -1194,7 +1239,8 @@ THIRD_PARTY_SOURCES += compat/regex/%
THIRD_PARTY_SOURCES += sha1collisiondetection/%
THIRD_PARTY_SOURCES += sha1dc/%
-GITLIBS = common-main.o $(LIB_FILE) $(XDIFF_LIB) $(REFTABLE_LIB)
+# xdiff and reftable libs may in turn depend on what is in libgit.a
+GITLIBS = common-main.o $(LIB_FILE) $(XDIFF_LIB) $(REFTABLE_LIB) $(LIB_FILE)
EXTLIBS =
GIT_USER_AGENT = git/$(GIT_VERSION)
@@ -1255,10 +1301,6 @@ endif
ALL_CFLAGS = $(DEVELOPER_CFLAGS) $(CPPFLAGS) $(CFLAGS)
ALL_LDFLAGS = $(LDFLAGS)
-comma := ,
-empty :=
-space := $(empty) $(empty)
-
ifdef SANITIZE
SANITIZERS := $(foreach flag,$(subst $(comma),$(space),$(SANITIZE)),$(flag))
BASIC_CFLAGS += -fsanitize=$(SANITIZE) -fno-sanitize-recover=$(SANITIZE)
@@ -1726,11 +1768,6 @@ ifdef NO_DEFLATE_BOUND
BASIC_CFLAGS += -DNO_DEFLATE_BOUND
endif
-ifdef NO_UNCOMPRESS2
- BASIC_CFLAGS += -DNO_UNCOMPRESS2
- REFTABLE_OBJS += compat/zlib-uncompress2.o
-endif
-
ifdef NO_POSIX_GOODIES
BASIC_CFLAGS += -DNO_POSIX_GOODIES
endif
@@ -1892,6 +1929,10 @@ ifdef HAVE_CLOCK_MONOTONIC
BASIC_CFLAGS += -DHAVE_CLOCK_MONOTONIC
endif
+ifdef HAVE_SYNC_FILE_RANGE
+ BASIC_CFLAGS += -DHAVE_SYNC_FILE_RANGE
+endif
+
ifdef NEEDS_LIBRT
EXTLIBS += -lrt
endif
@@ -1908,6 +1949,32 @@ ifdef HAVE_GETDELIM
BASIC_CFLAGS += -DHAVE_GETDELIM
endif
+ifneq ($(findstring arc4random,$(CSPRNG_METHOD)),)
+ BASIC_CFLAGS += -DHAVE_ARC4RANDOM
+endif
+
+ifneq ($(findstring libbsd,$(CSPRNG_METHOD)),)
+ BASIC_CFLAGS += -DHAVE_ARC4RANDOM_LIBBSD
+ EXTLIBS += -lbsd
+endif
+
+ifneq ($(findstring getrandom,$(CSPRNG_METHOD)),)
+ BASIC_CFLAGS += -DHAVE_GETRANDOM
+endif
+
+ifneq ($(findstring getentropy,$(CSPRNG_METHOD)),)
+ BASIC_CFLAGS += -DHAVE_GETENTROPY
+endif
+
+ifneq ($(findstring rtlgenrandom,$(CSPRNG_METHOD)),)
+ BASIC_CFLAGS += -DHAVE_RTLGENRANDOM
+endif
+
+ifneq ($(findstring openssl,$(CSPRNG_METHOD)),)
+ BASIC_CFLAGS += -DHAVE_OPENSSL_CSPRNG
+ EXTLIBS += -lcrypto -lssl
+endif
+
ifneq ($(PROCFS_EXECUTABLE_PATH),)
procfs_executable_path_SQ = $(subst ','\'',$(PROCFS_EXECUTABLE_PATH))
BASIC_CFLAGS += '-DPROCFS_EXECUTABLE_PATH="$(procfs_executable_path_SQ)"'
@@ -1935,6 +2002,11 @@ ifdef NEED_ACCESS_ROOT_HANDLER
COMPAT_OBJS += compat/access.o
endif
+ifdef FSMONITOR_DAEMON_BACKEND
+ COMPAT_CFLAGS += -DHAVE_FSMONITOR_DAEMON_BACKEND
+ COMPAT_OBJS += compat/fsmonitor/fsm-listen-$(FSMONITOR_DAEMON_BACKEND).o
+endif
+
ifeq ($(TCLTK_PATH),)
NO_TCLTK = NoThanks
endif
@@ -1951,39 +2023,6 @@ ifndef PAGER_ENV
PAGER_ENV = LESS=FRX LV=-c
endif
-QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir
-QUIET_SUBDIR1 =
-
-ifneq ($(findstring w,$(MAKEFLAGS)),w)
-PRINT_DIR = --no-print-directory
-else # "make -w"
-NO_SUBDIR = :
-endif
-
-ifneq ($(findstring s,$(MAKEFLAGS)),s)
-ifndef V
- QUIET_CC = @echo ' ' CC $@;
- QUIET_AR = @echo ' ' AR $@;
- QUIET_LINK = @echo ' ' LINK $@;
- QUIET_BUILT_IN = @echo ' ' BUILTIN $@;
- QUIET_GEN = @echo ' ' GEN $@;
- QUIET_LNCP = @echo ' ' LN/CP $@;
- QUIET_XGETTEXT = @echo ' ' XGETTEXT $@;
- QUIET_MSGFMT = @echo ' ' MSGFMT $@;
- QUIET_GCOV = @echo ' ' GCOV $@;
- QUIET_SP = @echo ' ' SP $<;
- QUIET_HDR = @echo ' ' HDR $(<:hcc=h);
- QUIET_RC = @echo ' ' RC $@;
- QUIET_SPATCH = @echo ' ' SPATCH $<;
- QUIET_SUBDIR0 = +@subdir=
- QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \
- $(MAKE) $(PRINT_DIR) -C $$subdir
- export V
- export QUIET_GEN
- export QUIET_BUILT_IN
-endif
-endif
-
ifdef NO_INSTALL_HARDLINKS
export NO_INSTALL_HARDLINKS
endif
@@ -2164,16 +2203,6 @@ shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell
strip: $(PROGRAMS) git$X
$(STRIP) $(STRIP_OPTS) $^
-### Flags affecting all rules
-
-# A GNU make extension since gmake 3.72 (released in late 1994) to
-# remove the target of rules if commands in those rules fail. The
-# default is to only do that if make itself receives a signal. Affects
-# all targets, see:
-#
-# info make --index-search=.DELETE_ON_ERROR
-.DELETE_ON_ERROR:
-
### Target-specific flags and dependencies
# The generic compilation pattern rule and automatically
@@ -2536,8 +2565,6 @@ ASM_SRC := $(wildcard $(OBJECTS:o=S))
ASM_OBJ := $(ASM_SRC:S=o)
C_OBJ := $(filter-out $(ASM_OBJ),$(OBJECTS))
-.SUFFIXES:
-
$(C_OBJ): %.o: %.c GIT-CFLAGS $(missing_dep_dirs) $(missing_compdb_dir)
$(QUIET_CC)$(CC) -o $*.o -c $(dep_args) $(compdb_args) $(ALL_CFLAGS) $(EXTRA_CPPFLAGS) $<
$(ASM_OBJ): %.o: %.S GIT-CFLAGS $(missing_dep_dirs) $(missing_compdb_dir)
@@ -2740,7 +2767,8 @@ all:: $(MOFILES)
endif
po/build/locale/%/LC_MESSAGES/git.mo: po/%.po
- $(QUIET_MSGFMT)mkdir -p $(dir $@) && $(MSGFMT) -o $@ $<
+ $(call mkdir_p_parent_template)
+ $(QUIET_MSGFMT)$(MSGFMT) -o $@ $<
LIB_PERL := $(wildcard perl/Git.pm perl/Git/*.pm perl/Git/*/*.pm perl/Git/*/*/*.pm)
LIB_PERL_GEN := $(patsubst perl/%.pm,perl/build/lib/%.pm,$(LIB_PERL))
@@ -2756,35 +2784,16 @@ NO_PERL_CPAN_FALLBACKS_SQ = $(subst ','\'',$(NO_PERL_CPAN_FALLBACKS))
endif
perl/build/lib/%.pm: perl/%.pm GIT-PERL-DEFINES
- $(QUIET_GEN)mkdir -p $(dir $@) && \
+ $(call mkdir_p_parent_template)
+ $(QUIET_GEN) \
sed -e 's|@@LOCALEDIR@@|$(perl_localedir_SQ)|g' \
-e 's|@@NO_GETTEXT@@|$(NO_GETTEXT_SQ)|g' \
-e 's|@@NO_PERL_CPAN_FALLBACKS@@|$(NO_PERL_CPAN_FALLBACKS_SQ)|g' \
< $< > $@
perl/build/man/man3/Git.3pm: perl/Git.pm
- $(QUIET_GEN)mkdir -p $(dir $@) && \
- pod2man $< $@
-
-FIND_SOURCE_FILES = ( \
- git ls-files \
- '*.[hcS]' \
- '*.sh' \
- ':!*[tp][0-9][0-9][0-9][0-9]*' \
- ':!contrib' \
- 2>/dev/null || \
- $(FIND) . \
- \( -name .git -type d -prune \) \
- -o \( -name '[tp][0-9][0-9][0-9][0-9]*' -prune \) \
- -o \( -name contrib -type d -prune \) \
- -o \( -name build -type d -prune \) \
- -o \( -name 'trash*' -type d -prune \) \
- -o \( -name '*.[hcS]' -type f -print \) \
- -o \( -name '*.sh' -type f -print \) \
- | sed -e 's|^\./||' \
- )
-
-FOUND_SOURCE_FILES = $(shell $(FIND_SOURCE_FILES))
+ $(call mkdir_p_parent_template)
+ $(QUIET_GEN)pod2man $< $@
$(ETAGS_TARGET): $(FOUND_SOURCE_FILES)
$(QUIET_GEN)$(RM) $@+ && \
@@ -2854,6 +2863,9 @@ GIT-BUILD-OPTIONS: FORCE
@echo DC_SHA1=\''$(subst ','\'',$(subst ','\'',$(DC_SHA1)))'\' >>$@+
@echo SANITIZE_LEAK=\''$(subst ','\'',$(subst ','\'',$(SANITIZE_LEAK)))'\' >>$@+
@echo X=\'$(X)\' >>$@+
+ifdef FSMONITOR_DAEMON_BACKEND
+ @echo FSMONITOR_DAEMON_BACKEND=\''$(subst ','\'',$(subst ','\'',$(FSMONITOR_DAEMON_BACKEND)))'\' >>$@+
+endif
ifdef TEST_OUTPUT_DIRECTORY
@echo TEST_OUTPUT_DIRECTORY=\''$(subst ','\'',$(subst ','\'',$(TEST_OUTPUT_DIRECTORY)))'\' >>$@+
endif
@@ -2918,7 +2930,7 @@ test_bindir_programs := $(patsubst %,bin-wrappers/%,$(BINDIR_PROGRAMS_NEED_X) $(
all:: $(TEST_PROGRAMS) $(test_bindir_programs)
bin-wrappers/%: wrap-for-bin.sh
- @mkdir -p bin-wrappers
+ $(call mkdir_p_parent_template)
$(QUIET_GEN)sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \
-e 's|@@BUILD_DIR@@|$(shell pwd)|' \
-e 's|@@PROG@@|$(patsubst test-%,t/helper/test-%$(X),$(@F))$(patsubst git%,$(X),$(filter $(@F),$(BINDIR_PROGRAMS_NEED_X)))|' < $< > $@ && \
@@ -2995,9 +3007,6 @@ check: $(GENERATED_H)
exit 1; \
fi
-FOUND_C_SOURCES = $(filter %.c,$(FOUND_SOURCE_FILES))
-COCCI_SOURCES = $(filter-out $(THIRD_PARTY_SOURCES),$(FOUND_C_SOURCES))
-
%.cocci.patch: %.cocci $(COCCI_SOURCES)
$(QUIET_SPATCH) \
if test $(SPATCH_BATCH_SIZE) = 0; then \
diff --git a/README.md b/README.md
index f6f43e7..7ce4f05 100644
--- a/README.md
+++ b/README.md
@@ -32,10 +32,16 @@ installed).
The user discussion and development of Git take place on the Git
mailing list -- everyone is welcome to post bug reports, feature
requests, comments and patches to git@vger.kernel.org (read
-[Documentation/SubmittingPatches][] for instructions on patch submission).
+[Documentation/SubmittingPatches][] for instructions on patch submission
+and [Documentation/CodingGuidelines][]).
+
+Those wishing to help with error message, usage and informational message
+string translations (localization l10) should see [po/README.md][]
+(a `po` file is a Portable Object file that holds the translations).
+
To subscribe to the list, send an email with just "subscribe git" in
-the body to majordomo@vger.kernel.org. The mailing list archives are
-available at <https://lore.kernel.org/git/>,
+the body to majordomo@vger.kernel.org (not the Git list). The mailing
+list archives are available at <https://lore.kernel.org/git/>,
<http://marc.info/?l=git> and other archival sites.
Issues which are security relevant should be disclosed privately to
@@ -64,3 +70,5 @@ and the name as (depending on your mood):
[Documentation/giteveryday.txt]: Documentation/giteveryday.txt
[Documentation/gitcvs-migration.txt]: Documentation/gitcvs-migration.txt
[Documentation/SubmittingPatches]: Documentation/SubmittingPatches
+[Documentation/CodingGuidelines]: Documentation/CodingGuidelines
+[po/README.md]: po/README.md
diff --git a/RelNotes b/RelNotes
index 286559e..8105226 120000
--- a/RelNotes
+++ b/RelNotes
@@ -1 +1 @@
-Documentation/RelNotes/2.35.2.txt \ No newline at end of file
+Documentation/RelNotes/2.36.0.txt \ No newline at end of file
diff --git a/add-interactive.c b/add-interactive.c
index 6498ae1..7247210 100644
--- a/add-interactive.c
+++ b/add-interactive.c
@@ -70,6 +70,8 @@ void init_add_i_state(struct add_i_state *s, struct repository *r)
&s->interactive_diff_algorithm);
git_config_get_bool("interactive.singlekey", &s->use_single_key);
+ if (s->use_single_key)
+ setbuf(stdin, NULL);
}
void clear_add_i_state(struct add_i_state *s)
@@ -797,14 +799,14 @@ static int run_revert(struct add_i_state *s, const struct pathspec *ps,
diffopt.flags.override_submodule_config = 1;
diffopt.repo = s->r;
- if (do_diff_cache(&oid, &diffopt))
+ if (do_diff_cache(&oid, &diffopt)) {
+ diff_free(&diffopt);
res = -1;
- else {
+ } else {
diffcore_std(&diffopt);
diff_flush(&diffopt);
}
free(paths);
- clear_pathspec(&diffopt.pathspec);
if (!res && write_locked_index(s->r->index, &index_lock,
COMMIT_LOCK) < 0)
diff --git a/add-patch.c b/add-patch.c
index 573eef0..55d719f 100644
--- a/add-patch.c
+++ b/add-patch.c
@@ -383,6 +383,17 @@ static int is_octal(const char *p, size_t len)
return 1;
}
+static void complete_file(char marker, struct hunk *hunk)
+{
+ if (marker == '-' || marker == '+')
+ /*
+ * Last hunk ended in non-context line (i.e. it
+ * appended lines to the file, so there are no
+ * trailing context lines).
+ */
+ hunk->splittable_into++;
+}
+
static int parse_diff(struct add_p_state *s, const struct pathspec *ps)
{
struct strvec args = STRVEC_INIT;
@@ -472,6 +483,7 @@ static int parse_diff(struct add_p_state *s, const struct pathspec *ps)
eol = pend;
if (starts_with(p, "diff ")) {
+ complete_file(marker, hunk);
ALLOC_GROW_BY(s->file_diff, s->file_diff_nr, 1,
file_diff_alloc);
file_diff = s->file_diff + s->file_diff_nr - 1;
@@ -598,13 +610,7 @@ static int parse_diff(struct add_p_state *s, const struct pathspec *ps)
file_diff->hunk->colored_end = hunk->colored_end;
}
}
-
- if (marker == '-' || marker == '+')
- /*
- * Last hunk ended in non-context line (i.e. it appended lines
- * to the file, so there are no trailing context lines).
- */
- hunk->splittable_into++;
+ complete_file(marker, hunk);
/* non-colored shorter than colored? */
if (colored_p != colored_pend) {
diff --git a/advice.c b/advice.c
index 1dfc91d..6fda9ed 100644
--- a/advice.c
+++ b/advice.c
@@ -39,9 +39,11 @@ static struct {
[ADVICE_ADD_EMPTY_PATHSPEC] = { "addEmptyPathspec", 1 },
[ADVICE_ADD_IGNORED_FILE] = { "addIgnoredFile", 1 },
[ADVICE_AM_WORK_DIR] = { "amWorkDir", 1 },
+ [ADVICE_AMBIGUOUS_FETCH_REFSPEC] = { "ambiguousFetchRefspec", 1 },
[ADVICE_CHECKOUT_AMBIGUOUS_REMOTE_BRANCH_NAME] = { "checkoutAmbiguousRemoteBranchName", 1 },
[ADVICE_COMMIT_BEFORE_MERGE] = { "commitBeforeMerge", 1 },
[ADVICE_DETACHED_HEAD] = { "detachedHead", 1 },
+ [ADVICE_SUGGEST_DETACHING_HEAD] = { "suggestDetachingHead", 1 },
[ADVICE_FETCH_SHOW_FORCED_UPDATES] = { "fetchShowForcedUpdates", 1 },
[ADVICE_GRAFT_FILE_DEPRECATED] = { "graftFileDeprecated", 1 },
[ADVICE_IGNORED_HOOK] = { "ignoredHook", 1 },
@@ -60,7 +62,7 @@ static struct {
[ADVICE_PUSH_NON_FF_MATCHING] = { "pushNonFFMatching", 1 },
[ADVICE_PUSH_UNQUALIFIED_REF_NAME] = { "pushUnqualifiedRefName", 1 },
[ADVICE_PUSH_UPDATE_REJECTED] = { "pushUpdateRejected", 1 },
- [ADVICE_RESET_QUIET_WARNING] = { "resetQuiet", 1 },
+ [ADVICE_RESET_NO_REFRESH_WARNING] = { "resetNoRefresh", 1 },
[ADVICE_RESOLVE_CONFLICT] = { "resolveConflict", 1 },
[ADVICE_RM_HINTS] = { "rmHints", 1 },
[ADVICE_SEQUENCER_IN_USE] = { "sequencerInUse", 1 },
@@ -70,6 +72,7 @@ static struct {
[ADVICE_STATUS_HINTS] = { "statusHints", 1 },
[ADVICE_STATUS_U_OPTION] = { "statusUoption", 1 },
[ADVICE_SUBMODULE_ALTERNATE_ERROR_STRATEGY_DIE] = { "submoduleAlternateErrorStrategyDie", 1 },
+ [ADVICE_SUBMODULES_NOT_UPDATED] = { "submodulesNotUpdated", 1 },
[ADVICE_UPDATE_SPARSE_PATH] = { "updateSparsePath", 1 },
[ADVICE_WAITING_FOR_EDITOR] = { "waitingForEditor", 1 },
};
diff --git a/advice.h b/advice.h
index 601265f..7ddc6cb 100644
--- a/advice.h
+++ b/advice.h
@@ -17,9 +17,11 @@ struct string_list;
ADVICE_ADD_EMPTY_PATHSPEC,
ADVICE_ADD_IGNORED_FILE,
ADVICE_AM_WORK_DIR,
+ ADVICE_AMBIGUOUS_FETCH_REFSPEC,
ADVICE_CHECKOUT_AMBIGUOUS_REMOTE_BRANCH_NAME,
ADVICE_COMMIT_BEFORE_MERGE,
ADVICE_DETACHED_HEAD,
+ ADVICE_SUGGEST_DETACHING_HEAD,
ADVICE_FETCH_SHOW_FORCED_UPDATES,
ADVICE_GRAFT_FILE_DEPRECATED,
ADVICE_IGNORED_HOOK,
@@ -35,7 +37,7 @@ struct string_list;
ADVICE_PUSH_UPDATE_REJECTED_ALIAS,
ADVICE_PUSH_UPDATE_REJECTED,
ADVICE_PUSH_REF_NEEDS_UPDATE,
- ADVICE_RESET_QUIET_WARNING,
+ ADVICE_RESET_NO_REFRESH_WARNING,
ADVICE_RESOLVE_CONFLICT,
ADVICE_RM_HINTS,
ADVICE_SEQUENCER_IN_USE,
@@ -44,6 +46,7 @@ struct string_list;
ADVICE_STATUS_HINTS,
ADVICE_STATUS_U_OPTION,
ADVICE_SUBMODULE_ALTERNATE_ERROR_STRATEGY_DIE,
+ ADVICE_SUBMODULES_NOT_UPDATED,
ADVICE_UPDATE_SPARSE_PATH,
ADVICE_WAITING_FOR_EDITOR,
ADVICE_SKIPPED_CHERRY_PICKS,
diff --git a/apply.c b/apply.c
index 7ffadc3..d19c26d 100644
--- a/apply.c
+++ b/apply.c
@@ -103,7 +103,8 @@ int init_apply_state(struct apply_state *state,
state->linenr = 1;
string_list_init_nodup(&state->fn_table);
string_list_init_nodup(&state->limit_by_name);
- string_list_init_nodup(&state->symlink_changes);
+ strset_init(&state->removed_symlinks);
+ strset_init(&state->kept_symlinks);
strbuf_init(&state->root, 0);
git_apply_config();
@@ -117,7 +118,8 @@ int init_apply_state(struct apply_state *state,
void clear_apply_state(struct apply_state *state)
{
string_list_clear(&state->limit_by_name, 0);
- string_list_clear(&state->symlink_changes, 0);
+ strset_clear(&state->removed_symlinks);
+ strset_clear(&state->kept_symlinks);
strbuf_release(&state->root);
/* &state->fn_table is cleared at the end of apply_patch() */
@@ -217,13 +219,18 @@ static void free_fragment_list(struct fragment *list)
}
}
-static void free_patch(struct patch *patch)
+void release_patch(struct patch *patch)
{
free_fragment_list(patch->fragments);
free(patch->def_name);
free(patch->old_name);
free(patch->new_name);
free(patch->result);
+}
+
+static void free_patch(struct patch *patch)
+{
+ release_patch(patch);
free(patch);
}
@@ -3157,7 +3164,7 @@ static int apply_binary(struct apply_state *state,
* See if the old one matches what the patch
* applies to.
*/
- hash_object_file(the_hash_algo, img->buf, img->len, blob_type,
+ hash_object_file(the_hash_algo, img->buf, img->len, OBJ_BLOB,
&oid);
if (strcmp(oid_to_hex(&oid), patch->old_oid_prefix))
return error(_("the patch applies to '%s' (%s), "
@@ -3203,7 +3210,7 @@ static int apply_binary(struct apply_state *state,
name);
/* verify that the result matches */
- hash_object_file(the_hash_algo, img->buf, img->len, blob_type,
+ hash_object_file(the_hash_algo, img->buf, img->len, OBJ_BLOB,
&oid);
if (strcmp(oid_to_hex(&oid), patch->new_oid_prefix))
return error(_("binary patch to '%s' creates incorrect result (expecting %s, got %s)"),
@@ -3492,7 +3499,7 @@ static int three_way_merge(struct apply_state *state,
{
mmfile_t base_file, our_file, their_file;
mmbuffer_t result = { NULL };
- int status;
+ enum ll_merge_result status;
/* resolve trivial cases first */
if (oideq(base, ours))
@@ -3509,6 +3516,9 @@ static int three_way_merge(struct apply_state *state,
&their_file, "theirs",
state->repo->index,
NULL);
+ if (status == LL_MERGE_BINARY_CONFLICT)
+ warning("Cannot merge binary files: %s (%s vs. %s)",
+ path, "ours", "theirs");
free(base_file.ptr);
free(our_file.ptr);
free(their_file.ptr);
@@ -3589,7 +3599,7 @@ static int try_threeway(struct apply_state *state,
/* Preimage the patch was prepared for */
if (patch->is_new)
- write_object_file("", 0, blob_type, &pre_oid);
+ write_object_file("", 0, OBJ_BLOB, &pre_oid);
else if (get_oid(patch->old_oid_prefix, &pre_oid) ||
read_blob_object(&buf, &pre_oid, patch->old_mode))
return error(_("repository lacks the necessary blob to perform 3-way merge."));
@@ -3605,7 +3615,7 @@ static int try_threeway(struct apply_state *state,
return -1;
}
/* post_oid is theirs */
- write_object_file(tmp_image.buf, tmp_image.len, blob_type, &post_oid);
+ write_object_file(tmp_image.buf, tmp_image.len, OBJ_BLOB, &post_oid);
clear_image(&tmp_image);
/* our_oid is ours */
@@ -3618,7 +3628,7 @@ static int try_threeway(struct apply_state *state,
return error(_("cannot read the current contents of '%s'"),
patch->old_name);
}
- write_object_file(tmp_image.buf, tmp_image.len, blob_type, &our_oid);
+ write_object_file(tmp_image.buf, tmp_image.len, OBJ_BLOB, &our_oid);
clear_image(&tmp_image);
/* in-core three-way merge between post and our using pre as base */
@@ -3814,59 +3824,31 @@ static int check_to_create(struct apply_state *state,
return 0;
}
-static uintptr_t register_symlink_changes(struct apply_state *state,
- const char *path,
- uintptr_t what)
-{
- struct string_list_item *ent;
-
- ent = string_list_lookup(&state->symlink_changes, path);
- if (!ent) {
- ent = string_list_insert(&state->symlink_changes, path);
- ent->util = (void *)0;
- }
- ent->util = (void *)(what | ((uintptr_t)ent->util));
- return (uintptr_t)ent->util;
-}
-
-static uintptr_t check_symlink_changes(struct apply_state *state, const char *path)
-{
- struct string_list_item *ent;
-
- ent = string_list_lookup(&state->symlink_changes, path);
- if (!ent)
- return 0;
- return (uintptr_t)ent->util;
-}
-
static void prepare_symlink_changes(struct apply_state *state, struct patch *patch)
{
for ( ; patch; patch = patch->next) {
if ((patch->old_name && S_ISLNK(patch->old_mode)) &&
(patch->is_rename || patch->is_delete))
/* the symlink at patch->old_name is removed */
- register_symlink_changes(state, patch->old_name, APPLY_SYMLINK_GOES_AWAY);
+ strset_add(&state->removed_symlinks, patch->old_name);
if (patch->new_name && S_ISLNK(patch->new_mode))
/* the symlink at patch->new_name is created or remains */
- register_symlink_changes(state, patch->new_name, APPLY_SYMLINK_IN_RESULT);
+ strset_add(&state->kept_symlinks, patch->new_name);
}
}
static int path_is_beyond_symlink_1(struct apply_state *state, struct strbuf *name)
{
do {
- unsigned int change;
-
while (--name->len && name->buf[name->len] != '/')
; /* scan backwards */
if (!name->len)
break;
name->buf[name->len] = '\0';
- change = check_symlink_changes(state, name->buf);
- if (change & APPLY_SYMLINK_IN_RESULT)
+ if (strset_contains(&state->kept_symlinks, name->buf))
return 1;
- if (change & APPLY_SYMLINK_GOES_AWAY)
+ if (strset_contains(&state->removed_symlinks, name->buf))
/*
* This cannot be "return 0", because we may
* see a new one created at a higher level.
@@ -4346,7 +4328,7 @@ static int add_index_file(struct apply_state *state,
}
fill_stat_cache_info(state->repo->index, ce, &st);
}
- if (write_object_file(buf, size, blob_type, &ce->oid) < 0) {
+ if (write_object_file(buf, size, OBJ_BLOB, &ce->oid) < 0) {
discard_cache_entry(ce);
return error(_("unable to create backing store "
"for newly created file %s"), path);
diff --git a/apply.h b/apply.h
index 16202da..b9f18ce 100644
--- a/apply.h
+++ b/apply.h
@@ -4,6 +4,7 @@
#include "hash.h"
#include "lockfile.h"
#include "string-list.h"
+#include "strmap.h"
struct repository;
@@ -25,20 +26,6 @@ enum apply_verbosity {
verbosity_verbose = 1
};
-/*
- * We need to keep track of how symlinks in the preimage are
- * manipulated by the patches. A patch to add a/b/c where a/b
- * is a symlink should not be allowed to affect the directory
- * the symlink points at, but if the same patch removes a/b,
- * it is perfectly fine, as the patch removes a/b to make room
- * to create a directory a/b so that a/b/c can be created.
- *
- * See also "struct string_list symlink_changes" in "struct
- * apply_state".
- */
-#define APPLY_SYMLINK_GOES_AWAY 01
-#define APPLY_SYMLINK_IN_RESULT 02
-
struct apply_state {
const char *prefix;
@@ -86,7 +73,16 @@ struct apply_state {
/* Various "current state" */
int linenr; /* current line number */
- struct string_list symlink_changes; /* we have to track symlinks */
+ /*
+ * We need to keep track of how symlinks in the preimage are
+ * manipulated by the patches. A patch to add a/b/c where a/b
+ * is a symlink should not be allowed to affect the directory
+ * the symlink points at, but if the same patch removes a/b,
+ * it is perfectly fine, as the patch removes a/b to make room
+ * to create a directory a/b so that a/b/c can be created.
+ */
+ struct strset removed_symlinks;
+ struct strset kept_symlinks;
/*
* For "diff-stat" like behaviour, we keep track of the biggest change
@@ -177,6 +173,8 @@ int parse_git_diff_header(struct strbuf *root,
unsigned int size,
struct patch *patch);
+void release_patch(struct patch *patch);
+
/*
* Some aspects of the apply behavior are controlled by the following
* bits in the "options" parameter passed to apply_all_patches().
diff --git a/archive-tar.c b/archive-tar.c
index 3c74db1..042feb6 100644
--- a/archive-tar.c
+++ b/archive-tar.c
@@ -461,9 +461,9 @@ static int write_tar_filter_archive(const struct archiver *ar,
}
static struct archiver tar_archiver = {
- "tar",
- write_tar_archive,
- ARCHIVER_REMOTE
+ .name = "tar",
+ .write_archive = write_tar_archive,
+ .flags = ARCHIVER_REMOTE,
};
void init_tar_archiver(void)
diff --git a/archive-zip.c b/archive-zip.c
index 2961e01..9fe43d7 100644
--- a/archive-zip.c
+++ b/archive-zip.c
@@ -9,6 +9,7 @@
#include "object-store.h"
#include "userdiff.h"
#include "xdiff-interface.h"
+#include "date.h"
static int zip_date;
static int zip_time;
@@ -637,9 +638,9 @@ static int write_zip_archive(const struct archiver *ar,
}
static struct archiver zip_archiver = {
- "zip",
- write_zip_archive,
- ARCHIVER_WANT_COMPRESSION_LEVELS|ARCHIVER_REMOTE
+ .name = "zip",
+ .write_archive = write_zip_archive,
+ .flags = ARCHIVER_WANT_COMPRESSION_LEVELS|ARCHIVER_REMOTE,
};
void init_zip_archiver(void)
diff --git a/archive.c b/archive.c
index d571249..e29d0e0 100644
--- a/archive.c
+++ b/archive.c
@@ -12,7 +12,7 @@
static char const * const archive_usage[] = {
N_("git archive [<options>] <tree-ish> [<path>...]"),
- N_("git archive --list"),
+ "git archive --list",
N_("git archive --remote <repo> [--exec <cmd>] [<options>] <tree-ish> [<path>...]"),
N_("git archive --remote <repo> [--exec <cmd>] --list"),
NULL
diff --git a/attr.c b/attr.c
index 79adaa5..21e4ad2 100644
--- a/attr.c
+++ b/attr.c
@@ -14,7 +14,6 @@
#include "utf8.h"
#include "quote.h"
#include "thread-utils.h"
-#include "dir.h"
const char git_attr__true[] = "(builtin)true";
const char git_attr__false[] = "\0(builtin)false";
@@ -80,7 +79,7 @@ static int attr_hash_entry_cmp(const void *unused_cmp_data,
* Access to this dictionary must be surrounded with a mutex.
*/
static struct attr_hashmap g_attr_hashmap = {
- HASHMAP_INIT(attr_hash_entry_cmp, NULL)
+ .map = HASHMAP_INIT(attr_hash_entry_cmp, NULL),
};
/*
diff --git a/attr.h b/attr.h
index 3732505..3fb40cc 100644
--- a/attr.h
+++ b/attr.h
@@ -121,7 +121,6 @@ struct git_attr;
/* opaque structures used internally for attribute collection */
struct all_attrs_item;
struct attr_stack;
-struct index_state;
/*
* Given a string, return the gitattribute object that
diff --git a/banned.h b/banned.h
index 7ab4f2e..6ccf46b 100644
--- a/banned.h
+++ b/banned.h
@@ -21,13 +21,8 @@
#undef sprintf
#undef vsprintf
-#ifdef HAVE_VARIADIC_MACROS
#define sprintf(...) BANNED(sprintf)
#define vsprintf(...) BANNED(vsprintf)
-#else
-#define sprintf(buf,fmt,arg) BANNED(sprintf)
-#define vsprintf(buf,fmt,arg) BANNED(vsprintf)
-#endif
#undef gmtime
#define gmtime(t) BANNED(gmtime)
diff --git a/bisect.c b/bisect.c
index 888949f..9e6a2b7 100644
--- a/bisect.c
+++ b/bisect.c
@@ -724,7 +724,8 @@ static int is_expected_rev(const struct object_id *oid)
return res;
}
-static enum bisect_error bisect_checkout(const struct object_id *bisect_rev, int no_checkout)
+enum bisect_error bisect_checkout(const struct object_id *bisect_rev,
+ int no_checkout)
{
char bisect_rev_hex[GIT_MAX_HEXSZ + 1];
struct commit *commit;
diff --git a/bisect.h b/bisect.h
index ec24ac2..1015aeb 100644
--- a/bisect.h
+++ b/bisect.h
@@ -3,6 +3,7 @@
struct commit_list;
struct repository;
+struct object_id;
/*
* Find bisection. If something is found, `reaches` will be the number of
@@ -69,4 +70,7 @@ void read_bisect_terms(const char **bad, const char **good);
int bisect_clean_state(void);
+enum bisect_error bisect_checkout(const struct object_id *bisect_rev,
+ int no_checkout);
+
#endif
diff --git a/blame.c b/blame.c
index 206c295..186ad96 100644
--- a/blame.c
+++ b/blame.c
@@ -1403,7 +1403,6 @@ static struct blame_origin *find_origin(struct repository *r,
}
}
diff_flush(&diff_opts);
- clear_pathspec(&diff_opts.pathspec);
return porigin;
}
@@ -1447,7 +1446,6 @@ static struct blame_origin *find_rename(struct repository *r,
}
}
diff_flush(&diff_opts);
- clear_pathspec(&diff_opts.pathspec);
return porigin;
}
@@ -2328,7 +2326,6 @@ static void find_copy_in_parent(struct blame_scoreboard *sb,
} while (unblamed);
target->suspects = reverse_blame(leftover, NULL);
diff_flush(&diff_opts);
- clear_pathspec(&diff_opts.pathspec);
}
/*
@@ -2615,7 +2612,7 @@ void assign_blame(struct blame_scoreboard *sb, int opt)
else {
commit->object.flags |= UNINTERESTING;
if (commit->object.parsed)
- mark_parents_uninteresting(commit);
+ mark_parents_uninteresting(sb->revs, commit);
}
/* treat root commit as boundary */
if (!commit->parents && !sb->show_root)
diff --git a/block-sha1/sha1.c b/block-sha1/sha1.c
index 1bb6e7c..5974cd7 100644
--- a/block-sha1/sha1.c
+++ b/block-sha1/sha1.c
@@ -11,27 +11,10 @@
#include "sha1.h"
-#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
-
-/*
- * Force usage of rol or ror by selecting the one with the smaller constant.
- * It _can_ generate slightly smaller code (a constant of 1 is special), but
- * perhaps more importantly it's possibly faster on any uarch that does a
- * rotate with a loop.
- */
-
-#define SHA_ASM(op, x, n) ({ unsigned int __res; __asm__(op " %1,%0":"=r" (__res):"i" (n), "0" (x)); __res; })
-#define SHA_ROL(x,n) SHA_ASM("rol", x, n)
-#define SHA_ROR(x,n) SHA_ASM("ror", x, n)
-
-#else
-
#define SHA_ROT(X,l,r) (((X) << (l)) | ((X) >> (r)))
#define SHA_ROL(X,n) SHA_ROT(X,n,32-(n))
#define SHA_ROR(X,n) SHA_ROT(X,32-(n),n)
-#endif
-
/*
* If you have 32 registers or more, the compiler can (and should)
* try to change the array[] accesses into registers. However, on
diff --git a/branch.c b/branch.c
index 5d20a2e..01ecb81 100644
--- a/branch.c
+++ b/branch.c
@@ -8,6 +8,8 @@
#include "sequencer.h"
#include "commit.h"
#include "worktree.h"
+#include "submodule-config.h"
+#include "run-command.h"
struct tracking {
struct refspec_item spec;
@@ -16,17 +18,31 @@ struct tracking {
int matches;
};
+struct find_tracked_branch_cb {
+ struct tracking *tracking;
+ struct string_list ambiguous_remotes;
+};
+
static int find_tracked_branch(struct remote *remote, void *priv)
{
- struct tracking *tracking = priv;
+ struct find_tracked_branch_cb *ftb = priv;
+ struct tracking *tracking = ftb->tracking;
if (!remote_find_tracking(remote, &tracking->spec)) {
- if (++tracking->matches == 1) {
+ switch (++tracking->matches) {
+ case 1:
string_list_append(tracking->srcs, tracking->spec.src);
tracking->remote = remote->name;
- } else {
+ break;
+ case 2:
+ /* there are at least two remotes; backfill the first one */
+ string_list_append(&ftb->ambiguous_remotes, tracking->remote);
+ /* fall through */
+ default:
+ string_list_append(&ftb->ambiguous_remotes, remote->name);
free(tracking->spec.src);
string_list_clear(tracking->srcs, 0);
+ break;
}
tracking->spec.src = NULL;
}
@@ -218,9 +234,11 @@ static int inherit_tracking(struct tracking *tracking, const char *orig_ref)
}
/*
- * This is called when new_ref is branched off of orig_ref, and tries
- * to infer the settings for branch.<new_ref>.{remote,merge} from the
- * config.
+ * Used internally to set the branch.<new_ref>.{remote,merge} config
+ * settings so that branch 'new_ref' tracks 'orig_ref'. Unlike
+ * dwim_and_setup_tracking(), this does not do DWIM, i.e. "origin/main"
+ * will not be expanded to "refs/remotes/origin/main", so it is not safe
+ * for 'orig_ref' to be raw user input.
*/
static void setup_tracking(const char *new_ref, const char *orig_ref,
enum branch_track track, int quiet)
@@ -228,14 +246,21 @@ static void setup_tracking(const char *new_ref, const char *orig_ref,
struct tracking tracking;
struct string_list tracking_srcs = STRING_LIST_INIT_DUP;
int config_flags = quiet ? 0 : BRANCH_CONFIG_VERBOSE;
+ struct find_tracked_branch_cb ftb_cb = {
+ .tracking = &tracking,
+ .ambiguous_remotes = STRING_LIST_INIT_DUP,
+ };
+
+ if (!track)
+ BUG("asked to set up tracking, but tracking is disallowed");
memset(&tracking, 0, sizeof(tracking));
tracking.spec.dst = (char *)orig_ref;
tracking.srcs = &tracking_srcs;
if (track != BRANCH_TRACK_INHERIT)
- for_each_remote(find_tracked_branch, &tracking);
+ for_each_remote(find_tracked_branch, &ftb_cb);
else if (inherit_tracking(&tracking, orig_ref))
- return;
+ goto cleanup;
if (!tracking.matches)
switch (track) {
@@ -245,20 +270,52 @@ static void setup_tracking(const char *new_ref, const char *orig_ref,
case BRANCH_TRACK_INHERIT:
break;
default:
- return;
+ goto cleanup;
}
- if (tracking.matches > 1)
- die(_("not tracking: ambiguous information for ref %s"),
- orig_ref);
+ if (tracking.matches > 1) {
+ int status = die_message(_("not tracking: ambiguous information for ref '%s'"),
+ orig_ref);
+ if (advice_enabled(ADVICE_AMBIGUOUS_FETCH_REFSPEC)) {
+ struct strbuf remotes_advice = STRBUF_INIT;
+ struct string_list_item *item;
+
+ for_each_string_list_item(item, &ftb_cb.ambiguous_remotes)
+ /*
+ * TRANSLATORS: This is a line listing a remote with duplicate
+ * refspecs in the advice message below. For RTL languages you'll
+ * probably want to swap the "%s" and leading " " space around.
+ */
+ strbuf_addf(&remotes_advice, _(" %s\n"), item->string);
+
+ /*
+ * TRANSLATORS: The second argument is a \n-delimited list of
+ * duplicate refspecs, composed above.
+ */
+ advise(_("There are multiple remotes whose fetch refspecs map to the remote\n"
+ "tracking ref '%s':\n"
+ "%s"
+ "\n"
+ "This is typically a configuration error.\n"
+ "\n"
+ "To support setting up tracking branches, ensure that\n"
+ "different remotes' fetch refspecs map into different\n"
+ "tracking namespaces."), orig_ref,
+ remotes_advice.buf);
+ strbuf_release(&remotes_advice);
+ }
+ exit(status);
+ }
if (tracking.srcs->nr < 1)
string_list_append(tracking.srcs, orig_ref);
if (install_branch_config_multiple_remotes(config_flags, new_ref,
tracking.remote, tracking.srcs) < 0)
- exit(-1);
+ exit(1);
- string_list_clear(tracking.srcs, 0);
+cleanup:
+ string_list_clear(&tracking_srcs, 0);
+ string_list_clear(&ftb_cb.ambiguous_remotes, 0);
}
int read_branch_desc(struct strbuf *buf, const char *branch_name)
@@ -346,40 +403,44 @@ N_("\n"
"will track its remote counterpart, you may want to use\n"
"\"git push -u\" to set the upstream config as you push.");
-void create_branch(struct repository *r,
- const char *name, const char *start_name,
- int force, int clobber_head_ok, int reflog,
- int quiet, enum branch_track track)
+/**
+ * DWIMs a user-provided ref to determine the starting point for a
+ * branch and validates it, where:
+ *
+ * - r is the repository to validate the branch for
+ *
+ * - start_name is the ref that we would like to test. This is
+ * expanded with DWIM and assigned to out_real_ref.
+ *
+ * - track is the tracking mode of the new branch. If tracking is
+ * explicitly requested, start_name must be a branch (because
+ * otherwise start_name cannot be tracked)
+ *
+ * - out_oid is an out parameter containing the object_id of start_name
+ *
+ * - out_real_ref is an out parameter containing the full, 'real' form
+ * of start_name e.g. refs/heads/main instead of main
+ *
+ */
+static void dwim_branch_start(struct repository *r, const char *start_name,
+ enum branch_track track, char **out_real_ref,
+ struct object_id *out_oid)
{
struct commit *commit;
struct object_id oid;
char *real_ref;
- struct strbuf ref = STRBUF_INIT;
- int forcing = 0;
- int dont_change_ref = 0;
int explicit_tracking = 0;
if (track == BRANCH_TRACK_EXPLICIT || track == BRANCH_TRACK_OVERRIDE)
explicit_tracking = 1;
- if ((track == BRANCH_TRACK_OVERRIDE || clobber_head_ok)
- ? validate_branchname(name, &ref)
- : validate_new_branchname(name, &ref, force)) {
- if (!force)
- dont_change_ref = 1;
- else
- forcing = 1;
- }
-
real_ref = NULL;
if (get_oid_mb(start_name, &oid)) {
if (explicit_tracking) {
- if (advice_enabled(ADVICE_SET_UPSTREAM_FAILURE)) {
- error(_(upstream_missing), start_name);
- advise(_(upstream_advice));
- exit(1);
- }
- die(_(upstream_missing), start_name);
+ int code = die_message(_(upstream_missing), start_name);
+ advise_if_enabled(ADVICE_SET_UPSTREAM_FAILURE,
+ _(upstream_advice));
+ exit(code);
}
die(_("not a valid object name: '%s'"), start_name);
}
@@ -407,40 +468,240 @@ void create_branch(struct repository *r,
if ((commit = lookup_commit_reference(r, &oid)) == NULL)
die(_("not a valid branch point: '%s'"), start_name);
- oidcpy(&oid, &commit->object.oid);
+ if (out_real_ref) {
+ *out_real_ref = real_ref;
+ real_ref = NULL;
+ }
+ if (out_oid)
+ oidcpy(out_oid, &commit->object.oid);
+
+ FREE_AND_NULL(real_ref);
+}
+
+void create_branch(struct repository *r,
+ const char *name, const char *start_name,
+ int force, int clobber_head_ok, int reflog,
+ int quiet, enum branch_track track, int dry_run)
+{
+ struct object_id oid;
+ char *real_ref;
+ struct strbuf ref = STRBUF_INIT;
+ int forcing = 0;
+ struct ref_transaction *transaction;
+ struct strbuf err = STRBUF_INIT;
+ char *msg;
+
+ if (track == BRANCH_TRACK_OVERRIDE)
+ BUG("'track' cannot be BRANCH_TRACK_OVERRIDE. Did you mean to call dwim_and_setup_tracking()?");
+ if (clobber_head_ok && !force)
+ BUG("'clobber_head_ok' can only be used with 'force'");
+
+ if (clobber_head_ok ?
+ validate_branchname(name, &ref) :
+ validate_new_branchname(name, &ref, force)) {
+ forcing = 1;
+ }
+
+ dwim_branch_start(r, start_name, track, &real_ref, &oid);
+ if (dry_run)
+ goto cleanup;
if (reflog)
log_all_ref_updates = LOG_REFS_NORMAL;
- if (!dont_change_ref) {
- struct ref_transaction *transaction;
- struct strbuf err = STRBUF_INIT;
- char *msg;
-
- if (forcing)
- msg = xstrfmt("branch: Reset to %s", start_name);
- else
- msg = xstrfmt("branch: Created from %s", start_name);
-
- transaction = ref_transaction_begin(&err);
- if (!transaction ||
- ref_transaction_update(transaction, ref.buf,
- &oid, forcing ? NULL : null_oid(),
- 0, msg, &err) ||
- ref_transaction_commit(transaction, &err))
- die("%s", err.buf);
- ref_transaction_free(transaction);
- strbuf_release(&err);
- free(msg);
- }
+ if (forcing)
+ msg = xstrfmt("branch: Reset to %s", start_name);
+ else
+ msg = xstrfmt("branch: Created from %s", start_name);
+ transaction = ref_transaction_begin(&err);
+ if (!transaction ||
+ ref_transaction_update(transaction, ref.buf,
+ &oid, forcing ? NULL : null_oid(),
+ 0, msg, &err) ||
+ ref_transaction_commit(transaction, &err))
+ die("%s", err.buf);
+ ref_transaction_free(transaction);
+ strbuf_release(&err);
+ free(msg);
if (real_ref && track)
setup_tracking(ref.buf + 11, real_ref, track, quiet);
+cleanup:
strbuf_release(&ref);
free(real_ref);
}
+void dwim_and_setup_tracking(struct repository *r, const char *new_ref,
+ const char *orig_ref, enum branch_track track,
+ int quiet)
+{
+ char *real_orig_ref;
+ dwim_branch_start(r, orig_ref, track, &real_orig_ref, NULL);
+ setup_tracking(new_ref, real_orig_ref, track, quiet);
+}
+
+/**
+ * Creates a branch in a submodule by calling
+ * create_branches_recursively() in a child process. The child process
+ * is necessary because install_branch_config_multiple_remotes() (which
+ * is called by setup_tracking()) does not support writing configs to
+ * submodules.
+ */
+static int submodule_create_branch(struct repository *r,
+ const struct submodule *submodule,
+ const char *name, const char *start_oid,
+ const char *tracking_name, int force,
+ int reflog, int quiet,
+ enum branch_track track, int dry_run)
+{
+ int ret = 0;
+ struct child_process child = CHILD_PROCESS_INIT;
+ struct strbuf child_err = STRBUF_INIT;
+ struct strbuf out_buf = STRBUF_INIT;
+ char *out_prefix = xstrfmt("submodule '%s': ", submodule->name);
+ child.git_cmd = 1;
+ child.err = -1;
+ child.stdout_to_stderr = 1;
+
+ prepare_other_repo_env(&child.env_array, r->gitdir);
+ /*
+ * submodule_create_branch() is indirectly invoked by "git
+ * branch", but we cannot invoke "git branch" in the child
+ * process. "git branch" accepts a branch name and start point,
+ * where the start point is assumed to provide both the OID
+ * (start_oid) and the branch to use for tracking
+ * (tracking_name). But when recursing through submodules,
+ * start_oid and tracking name need to be specified separately
+ * (see create_branches_recursively()).
+ */
+ strvec_pushl(&child.args, "submodule--helper", "create-branch", NULL);
+ if (dry_run)
+ strvec_push(&child.args, "--dry-run");
+ if (force)
+ strvec_push(&child.args, "--force");
+ if (quiet)
+ strvec_push(&child.args, "--quiet");
+ if (reflog)
+ strvec_push(&child.args, "--create-reflog");
+
+ switch (track) {
+ case BRANCH_TRACK_NEVER:
+ strvec_push(&child.args, "--no-track");
+ break;
+ case BRANCH_TRACK_ALWAYS:
+ case BRANCH_TRACK_EXPLICIT:
+ strvec_push(&child.args, "--track=direct");
+ break;
+ case BRANCH_TRACK_OVERRIDE:
+ BUG("BRANCH_TRACK_OVERRIDE cannot be used when creating a branch.");
+ break;
+ case BRANCH_TRACK_INHERIT:
+ strvec_push(&child.args, "--track=inherit");
+ break;
+ case BRANCH_TRACK_UNSPECIFIED:
+ /* Default for "git checkout". Do not pass --track. */
+ case BRANCH_TRACK_REMOTE:
+ /* Default for "git branch". Do not pass --track. */
+ break;
+ }
+
+ strvec_pushl(&child.args, name, start_oid, tracking_name, NULL);
+
+ if ((ret = start_command(&child)))
+ return ret;
+ ret = finish_command(&child);
+ strbuf_read(&child_err, child.err, 0);
+ strbuf_add_lines(&out_buf, out_prefix, child_err.buf, child_err.len);
+
+ if (ret)
+ fprintf(stderr, "%s", out_buf.buf);
+ else
+ printf("%s", out_buf.buf);
+
+ strbuf_release(&child_err);
+ strbuf_release(&out_buf);
+ return ret;
+}
+
+void create_branches_recursively(struct repository *r, const char *name,
+ const char *start_commitish,
+ const char *tracking_name, int force,
+ int reflog, int quiet, enum branch_track track,
+ int dry_run)
+{
+ int i = 0;
+ char *branch_point = NULL;
+ struct object_id super_oid;
+ struct submodule_entry_list submodule_entry_list;
+
+ /* Perform dwim on start_commitish to get super_oid and branch_point. */
+ dwim_branch_start(r, start_commitish, BRANCH_TRACK_NEVER,
+ &branch_point, &super_oid);
+
+ /*
+ * If we were not given an explicit name to track, then assume we are at
+ * the top level and, just like the non-recursive case, the tracking
+ * name is the branch point.
+ */
+ if (!tracking_name)
+ tracking_name = branch_point;
+
+ submodules_of_tree(r, &super_oid, &submodule_entry_list);
+ /*
+ * Before creating any branches, first check that the branch can
+ * be created in every submodule.
+ */
+ for (i = 0; i < submodule_entry_list.entry_nr; i++) {
+ if (submodule_entry_list.entries[i].repo == NULL) {
+ int code = die_message(
+ _("submodule '%s': unable to find submodule"),
+ submodule_entry_list.entries[i].submodule->name);
+ if (advice_enabled(ADVICE_SUBMODULES_NOT_UPDATED))
+ advise(_("You may try updating the submodules using 'git checkout %s && git submodule update --init'"),
+ start_commitish);
+ exit(code);
+ }
+
+ if (submodule_create_branch(
+ submodule_entry_list.entries[i].repo,
+ submodule_entry_list.entries[i].submodule, name,
+ oid_to_hex(&submodule_entry_list.entries[i]
+ .name_entry->oid),
+ tracking_name, force, reflog, quiet, track, 1))
+ die(_("submodule '%s': cannot create branch '%s'"),
+ submodule_entry_list.entries[i].submodule->name,
+ name);
+ }
+
+ create_branch(the_repository, name, start_commitish, force, 0, reflog, quiet,
+ BRANCH_TRACK_NEVER, dry_run);
+ if (dry_run)
+ return;
+ /*
+ * NEEDSWORK If tracking was set up in the superproject but not the
+ * submodule, users might expect "git branch --recurse-submodules" to
+ * fail or give a warning, but this is not yet implemented because it is
+ * tedious to determine whether or not tracking was set up in the
+ * superproject.
+ */
+ if (track)
+ setup_tracking(name, tracking_name, track, quiet);
+
+ for (i = 0; i < submodule_entry_list.entry_nr; i++) {
+ if (submodule_create_branch(
+ submodule_entry_list.entries[i].repo,
+ submodule_entry_list.entries[i].submodule, name,
+ oid_to_hex(&submodule_entry_list.entries[i]
+ .name_entry->oid),
+ tracking_name, force, reflog, quiet, track, 0))
+ die(_("submodule '%s': cannot create branch '%s'"),
+ submodule_entry_list.entries[i].submodule->name,
+ name);
+ repo_clear(submodule_entry_list.entries[i].repo);
+ }
+}
+
void remove_merge_branch_state(struct repository *r)
{
unlink(git_path_merge_head(r));
diff --git a/branch.h b/branch.h
index 815dcd4..04df2aa5 100644
--- a/branch.h
+++ b/branch.h
@@ -18,6 +18,28 @@ extern enum branch_track git_branch_track;
/* Functions for acting on the information about branches. */
+/**
+ * Sets branch.<new_ref>.{remote,merge} config settings such that
+ * new_ref tracks orig_ref according to the specified tracking mode.
+ *
+ * - new_ref is the name of the branch that we are setting tracking
+ * for.
+ *
+ * - orig_ref is the name of the ref that is 'upstream' of new_ref.
+ * orig_ref will be expanded with DWIM so that the config settings
+ * are in the correct format e.g. "refs/remotes/origin/main" instead
+ * of "origin/main".
+ *
+ * - track is the tracking mode e.g. BRANCH_TRACK_REMOTE causes
+ * new_ref to track orig_ref directly, whereas BRANCH_TRACK_INHERIT
+ * causes new_ref to track whatever orig_ref tracks.
+ *
+ * - quiet suppresses tracking information.
+ */
+void dwim_and_setup_tracking(struct repository *r, const char *new_ref,
+ const char *orig_ref, enum branch_track track,
+ int quiet);
+
/*
* Creates a new branch, where:
*
@@ -30,8 +52,8 @@ extern enum branch_track git_branch_track;
*
* - force enables overwriting an existing (non-head) branch
*
- * - clobber_head_ok allows the currently checked out (hence existing)
- * branch to be overwritten; without 'force', it has no effect.
+ * - clobber_head_ok, when enabled with 'force', allows the currently
+ * checked out (head) branch to be overwritten
*
* - reflog creates a reflog for the branch
*
@@ -40,13 +62,45 @@ extern enum branch_track git_branch_track;
* - track causes the new branch to be configured to merge the remote branch
* that start_name is a tracking branch for (if any).
*
+ * - dry_run causes the branch to be validated but not created.
+ *
*/
void create_branch(struct repository *r,
const char *name, const char *start_name,
int force, int clobber_head_ok,
- int reflog, int quiet, enum branch_track track);
+ int reflog, int quiet, enum branch_track track,
+ int dry_run);
/*
+ * Creates a new branch in a repository and its submodules (and its
+ * submodules, recursively). The parameters are mostly analogous to
+ * those of create_branch() except for start_name, which is represented
+ * by two different parameters:
+ *
+ * - start_commitish is the commit-ish, in repository r, that determines
+ * which commits the branches will point to. The superproject branch
+ * will point to the commit of start_commitish and the submodule
+ * branches will point to the gitlink commit oids in start_commitish's
+ * tree.
+ *
+ * - tracking_name is the name of the ref, in repository r, that will be
+ * used to set up tracking information. This value is propagated to
+ * all submodules, which will evaluate the ref using their own ref
+ * stores. If NULL, this defaults to start_commitish.
+ *
+ * When this function is called on the superproject, start_commitish
+ * can be any user-provided ref and tracking_name can be NULL (similar
+ * to create_branches()). But when recursing through submodules,
+ * start_commitish is the plain gitlink commit oid. Since the oid cannot
+ * be used for tracking information, tracking_name is propagated and
+ * used for tracking instead.
+ */
+void create_branches_recursively(struct repository *r, const char *name,
+ const char *start_commitish,
+ const char *tracking_name, int force,
+ int reflog, int quiet, enum branch_track track,
+ int dry_run);
+/*
* Check if 'name' can be a valid name for a branch; die otherwise.
* Return 1 if the named branch already exists; return 0 otherwise.
* Fill ref with the full refname for the branch.
diff --git a/builtin.h b/builtin.h
index 8a58743..40e9ecc 100644
--- a/builtin.h
+++ b/builtin.h
@@ -159,11 +159,13 @@ int cmd_for_each_ref(int argc, const char **argv, const char *prefix);
int cmd_for_each_repo(int argc, const char **argv, const char *prefix);
int cmd_format_patch(int argc, const char **argv, const char *prefix);
int cmd_fsck(int argc, const char **argv, const char *prefix);
+int cmd_fsmonitor__daemon(int argc, const char **argv, const char *prefix);
int cmd_gc(int argc, const char **argv, const char *prefix);
int cmd_get_tar_commit_id(int argc, const char **argv, const char *prefix);
int cmd_grep(int argc, const char **argv, const char *prefix);
int cmd_hash_object(int argc, const char **argv, const char *prefix);
int cmd_help(int argc, const char **argv, const char *prefix);
+int cmd_hook(int argc, const char **argv, const char *prefix);
int cmd_index_pack(int argc, const char **argv, const char *prefix);
int cmd_init_db(int argc, const char **argv, const char *prefix);
int cmd_interpret_trailers(int argc, const char **argv, const char *prefix);
diff --git a/builtin/add.c b/builtin/add.c
index 84dff3e..3ffb86a 100644
--- a/builtin/add.c
+++ b/builtin/add.c
@@ -32,7 +32,6 @@ static int add_renormalize;
static int pathspec_file_nul;
static int include_sparse;
static const char *pathspec_from_file;
-static int legacy_stash_p; /* support for the scripted `git stash` */
struct update_callback_data {
int flags;
@@ -388,8 +387,6 @@ static struct option builtin_add_options[] = {
N_("override the executable bit of the listed files")),
OPT_HIDDEN_BOOL(0, "warn-embedded-repo", &warn_on_embedded_repo,
N_("warn when adding an embedded repository")),
- OPT_HIDDEN_BOOL(0, "legacy-stash-p", &legacy_stash_p,
- N_("backend for `git stash -p`")),
OPT_PATHSPEC_FROM_FILE(&pathspec_from_file),
OPT_PATHSPEC_FILE_NUL(&pathspec_file_nul),
OPT_END(),
@@ -512,17 +509,6 @@ int cmd_add(int argc, const char **argv, const char *prefix)
die(_("options '%s' and '%s' cannot be used together"), "--pathspec-from-file", "--interactive/--patch");
exit(interactive_add(argv + 1, prefix, patch_interactive));
}
- if (legacy_stash_p) {
- struct pathspec pathspec;
-
- parse_pathspec(&pathspec, 0,
- PATHSPEC_PREFER_FULL |
- PATHSPEC_SYMLINK_LEADING_PATH |
- PATHSPEC_PREFIX_ORIGIN,
- prefix, argv);
-
- return run_add_interactive(NULL, "--patch=stash", &pathspec);
- }
if (edit_interactive) {
if (pathspec_from_file)
diff --git a/builtin/am.c b/builtin/am.c
index b6be1f1..0f4111b 100644
--- a/builtin/am.c
+++ b/builtin/am.c
@@ -34,6 +34,7 @@
#include "string-list.h"
#include "packfile.h"
#include "repository.h"
+#include "pretty.h"
/**
* Returns the length of the first line of msg.
@@ -199,7 +200,7 @@ static int am_option_parse_empty(const struct option *opt,
else if (!strcmp(arg, "keep"))
*opt_value = KEEP_EMPTY_COMMIT;
else
- return error(_("Invalid value for --empty: %s"), arg);
+ return error(_("invalid value for '%s': '%s'"), "--empty", arg);
return 0;
}
@@ -474,7 +475,7 @@ static int run_applypatch_msg_hook(struct am_state *state)
int ret;
assert(state->msg);
- ret = run_hook_le(NULL, "applypatch-msg", am_path(state, "final-commit"), NULL);
+ ret = run_hooks_l("applypatch-msg", am_path(state, "final-commit"), NULL);
if (!ret) {
FREE_AND_NULL(state->msg);
@@ -1636,7 +1637,7 @@ static void do_commit(const struct am_state *state)
const char *reflog_msg, *author, *committer = NULL;
struct strbuf sb = STRBUF_INIT;
- if (run_hook_le(NULL, "pre-applypatch", NULL))
+ if (run_hooks("pre-applypatch"))
exit(1);
if (write_cache_as_tree(&tree, 0, NULL))
@@ -1688,7 +1689,7 @@ static void do_commit(const struct am_state *state)
fclose(fp);
}
- run_hook_le(NULL, "post-applypatch", NULL);
+ run_hooks("post-applypatch");
strbuf_release(&sb);
}
@@ -2239,7 +2240,8 @@ static int parse_opt_patchformat(const struct option *opt, const char *arg, int
* when you add new options
*/
else
- return error(_("Invalid value for --patch-format: %s"), arg);
+ return error(_("invalid value for '%s': '%s'"),
+ "--patch-format", arg);
return 0;
}
@@ -2282,7 +2284,8 @@ static int parse_opt_show_current_patch(const struct option *opt, const char *ar
break;
}
if (new_value >= ARRAY_SIZE(valid_modes))
- return error(_("Invalid value for --show-current-patch: %s"), arg);
+ return error(_("invalid value for '%s': '%s'"),
+ "--show-current-patch", arg);
}
if (resume->mode == RESUME_SHOW_PATCH && new_value != resume->sub_mode)
diff --git a/builtin/bisect--helper.c b/builtin/bisect--helper.c
index 28a2e6a..8b2b259 100644
--- a/builtin/bisect--helper.c
+++ b/builtin/bisect--helper.c
@@ -22,15 +22,15 @@ static GIT_PATH_FUNC(git_path_bisect_run, "BISECT_RUN")
static const char * const git_bisect_helper_usage[] = {
N_("git bisect--helper --bisect-reset [<commit>]"),
- N_("git bisect--helper --bisect-terms [--term-good | --term-old | --term-bad | --term-new]"),
+ "git bisect--helper --bisect-terms [--term-good | --term-old | --term-bad | --term-new]",
N_("git bisect--helper --bisect-start [--term-{new,bad}=<term> --term-{old,good}=<term>]"
" [--no-checkout] [--first-parent] [<bad> [<good>...]] [--] [<paths>...]"),
- N_("git bisect--helper --bisect-next"),
+ "git bisect--helper --bisect-next",
N_("git bisect--helper --bisect-state (bad|new) [<rev>]"),
N_("git bisect--helper --bisect-state (good|old) [<rev>...]"),
N_("git bisect--helper --bisect-replay <filename>"),
N_("git bisect--helper --bisect-skip [(<rev>|<range>)...]"),
- N_("git bisect--helper --bisect-visualize"),
+ "git bisect--helper --bisect-visualize",
N_("git bisect--helper --bisect-run <cmd>..."),
NULL
};
@@ -1089,14 +1089,52 @@ static int bisect_visualize(struct bisect_terms *terms, const char **argv, int a
return res;
}
+static int get_first_good(const char *refname, const struct object_id *oid,
+ int flag, void *cb_data)
+{
+ oidcpy(cb_data, oid);
+ return 1;
+}
+
+static int verify_good(const struct bisect_terms *terms,
+ const char **quoted_argv)
+{
+ int rc;
+ enum bisect_error res;
+ struct object_id good_rev;
+ struct object_id current_rev;
+ char *good_glob = xstrfmt("%s-*", terms->term_good);
+ int no_checkout = ref_exists("BISECT_HEAD");
+
+ for_each_glob_ref_in(get_first_good, good_glob, "refs/bisect/",
+ &good_rev);
+ free(good_glob);
+
+ if (read_ref(no_checkout ? "BISECT_HEAD" : "HEAD", &current_rev))
+ return -1;
+
+ res = bisect_checkout(&good_rev, no_checkout);
+ if (res != BISECT_OK)
+ return -1;
+
+ printf(_("running %s\n"), quoted_argv[0]);
+ rc = run_command_v_opt(quoted_argv, RUN_USING_SHELL);
+
+ res = bisect_checkout(&current_rev, no_checkout);
+ if (res != BISECT_OK)
+ return -1;
+
+ return rc;
+}
+
static int bisect_run(struct bisect_terms *terms, const char **argv, int argc)
{
int res = BISECT_OK;
struct strbuf command = STRBUF_INIT;
- struct strvec args = STRVEC_INIT;
struct strvec run_args = STRVEC_INIT;
const char *new_state;
int temporary_stdout_fd, saved_stdout;
+ int is_first_run = 1;
if (bisect_next_check(terms, NULL))
return BISECT_FAILED;
@@ -1111,16 +1149,37 @@ static int bisect_run(struct bisect_terms *terms, const char **argv, int argc)
strvec_push(&run_args, command.buf);
while (1) {
- strvec_clear(&args);
-
printf(_("running %s\n"), command.buf);
res = run_command_v_opt(run_args.v, RUN_USING_SHELL);
+ /*
+ * Exit code 126 and 127 can either come from the shell
+ * if it was unable to execute or even find the script,
+ * or from the script itself. Check with a known-good
+ * revision to avoid trashing the bisect run due to a
+ * missing or non-executable script.
+ */
+ if (is_first_run && (res == 126 || res == 127)) {
+ int rc = verify_good(terms, run_args.v);
+ is_first_run = 0;
+ if (rc < 0) {
+ error(_("unable to verify '%s' on good"
+ " revision"), command.buf);
+ res = BISECT_FAILED;
+ break;
+ }
+ if (rc == res) {
+ error(_("bogus exit code %d for good revision"),
+ rc);
+ res = BISECT_FAILED;
+ break;
+ }
+ }
+
if (res < 0 || 128 <= res) {
error(_("bisect run failed: exit code %d from"
" '%s' is < 0 or >= 128"), res, command.buf);
- strbuf_release(&command);
- return res;
+ break;
}
if (res == 125)
@@ -1132,8 +1191,10 @@ static int bisect_run(struct bisect_terms *terms, const char **argv, int argc)
temporary_stdout_fd = open(git_path_bisect_run(), O_CREAT | O_WRONLY | O_TRUNC, 0666);
- if (temporary_stdout_fd < 0)
- return error_errno(_("cannot open file '%s' for writing"), git_path_bisect_run());
+ if (temporary_stdout_fd < 0) {
+ res = error_errno(_("cannot open file '%s' for writing"), git_path_bisect_run());
+ break;
+ }
fflush(stdout);
saved_stdout = dup(1);
@@ -1158,16 +1219,16 @@ static int bisect_run(struct bisect_terms *terms, const char **argv, int argc)
res = BISECT_OK;
} else if (res) {
error(_("bisect run failed: 'git bisect--helper --bisect-state"
- " %s' exited with error code %d"), args.v[0], res);
+ " %s' exited with error code %d"), new_state, res);
} else {
continue;
}
-
- strbuf_release(&command);
- strvec_clear(&args);
- strvec_clear(&run_args);
- return res;
+ break;
}
+
+ strbuf_release(&command);
+ strvec_clear(&run_args);
+ return res;
}
int cmd_bisect__helper(int argc, const char **argv, const char *prefix)
@@ -1209,7 +1270,7 @@ int cmd_bisect__helper(int argc, const char **argv, const char *prefix)
OPT_CMDMODE(0, "bisect-visualize", &cmdmode,
N_("visualize the bisection"), BISECT_VISUALIZE),
OPT_CMDMODE(0, "bisect-run", &cmdmode,
- N_("use <cmd>... to automatically bisect."), BISECT_RUN),
+ N_("use <cmd>... to automatically bisect"), BISECT_RUN),
OPT_BOOL(0, "no-log", &nolog,
N_("no log for BISECT_WRITE")),
OPT_END()
diff --git a/builtin/blame.c b/builtin/blame.c
index 7fafeac..8d15b68 100644
--- a/builtin/blame.c
+++ b/builtin/blame.c
@@ -721,8 +721,8 @@ static int git_blame_config(const char *var, const char *value, void *cb)
}
if (!strcmp(var, "color.blame.repeatedlines")) {
if (color_parse_mem(value, strlen(value), repeated_meta_color))
- warning(_("invalid color '%s' in color.blame.repeatedLines"),
- value);
+ warning(_("invalid value for '%s': '%s'"),
+ "color.blame.repeatedLines", value);
return 0;
}
if (!strcmp(var, "color.blame.highlightrecent")) {
@@ -739,7 +739,8 @@ static int git_blame_config(const char *var, const char *value, void *cb)
coloring_mode &= ~(OUTPUT_COLOR_LINE |
OUTPUT_SHOW_AGE_WITH_COLOR);
} else {
- warning(_("invalid value for blame.coloring"));
+ warning(_("invalid value for '%s': '%s'"),
+ "blame.coloring", value);
return 0;
}
}
@@ -934,6 +935,7 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
parse_revision_opt(&revs, &ctx, options, blame_opt_usage);
}
parse_done:
+ revision_opts_finish(&revs);
no_whole_file_rename = !revs.diffopt.flags.follow_renames;
xdl_opts |= revs.diffopt.xdl_opts & XDF_INDENT_HEURISTIC;
revs.diffopt.flags.follow_renames = 0;
diff --git a/builtin/branch.c b/builtin/branch.c
index 4ce2a24..5d00d0b 100644
--- a/builtin/branch.c
+++ b/builtin/branch.c
@@ -27,7 +27,8 @@
static const char * const builtin_branch_usage[] = {
N_("git branch [<options>] [-r | -a] [--merged] [--no-merged]"),
- N_("git branch [<options>] [-l] [-f] <branch-name> [<start-point>]"),
+ N_("git branch [<options>] [-f] [--recurse-submodules] <branch-name> [<start-point>]"),
+ N_("git branch [<options>] [-l] [<pattern>...]"),
N_("git branch [<options>] [-r] (-d | -D) <branch-name>..."),
N_("git branch [<options>] (-m | -M) [<old-branch>] <new-branch>"),
N_("git branch [<options>] (-c | -C) [<old-branch>] <new-branch>"),
@@ -38,6 +39,8 @@ static const char * const builtin_branch_usage[] = {
static const char *head;
static struct object_id head_oid;
+static int recurse_submodules = 0;
+static int submodule_propagate_branches = 0;
static int branch_use_color = -1;
static char branch_colors[][COLOR_MAXLEN] = {
@@ -99,6 +102,15 @@ static int git_branch_config(const char *var, const char *value, void *cb)
return config_error_nonbool(var);
return color_parse(value, branch_colors[slot]);
}
+ if (!strcmp(var, "submodule.recurse")) {
+ recurse_submodules = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcasecmp(var, "submodule.propagateBranches")) {
+ submodule_propagate_branches = git_config_bool(var, value);
+ return 0;
+ }
+
return git_color_default_config(var, value, cb);
}
@@ -621,14 +633,16 @@ static int edit_branch_description(const char *branch_name)
int cmd_branch(int argc, const char **argv, const char *prefix)
{
- int delete = 0, rename = 0, copy = 0, force = 0, list = 0;
- int show_current = 0;
- int reflog = 0, edit_description = 0;
- int quiet = 0, unset_upstream = 0;
+ /* possible actions */
+ int delete = 0, rename = 0, copy = 0, list = 0,
+ unset_upstream = 0, show_current = 0, edit_description = 0;
const char *new_upstream = NULL;
+ int noncreate_actions = 0;
+ /* possible options */
+ int reflog = 0, quiet = 0, icase = 0, force = 0,
+ recurse_submodules_explicit = 0;
enum branch_track track;
struct ref_filter filter;
- int icase = 0;
static struct ref_sorting *sorting;
struct string_list sorting_options = STRING_LIST_INIT_DUP;
struct ref_format format = REF_FORMAT_INIT;
@@ -677,6 +691,7 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
OPT_CALLBACK(0, "points-at", &filter.points_at, N_("object"),
N_("print only branches of the object"), parse_opt_object_name),
OPT_BOOL('i', "ignore-case", &icase, N_("sorting and filtering are case insensitive")),
+ OPT_BOOL(0, "recurse-submodules", &recurse_submodules_explicit, N_("recurse through submodules")),
OPT_STRING( 0 , "format", &format.format, N_("format"), N_("format to use for the output")),
OPT_END(),
};
@@ -713,10 +728,23 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
filter.reachable_from || filter.unreachable_from || filter.points_at.nr)
list = 1;
- if (!!delete + !!rename + !!copy + !!new_upstream + !!show_current +
- list + edit_description + unset_upstream > 1)
+ noncreate_actions = !!delete + !!rename + !!copy + !!new_upstream +
+ !!show_current + !!list + !!edit_description +
+ !!unset_upstream;
+ if (noncreate_actions > 1)
usage_with_options(builtin_branch_usage, options);
+ if (recurse_submodules_explicit) {
+ if (!submodule_propagate_branches)
+ die(_("branch with --recurse-submodules can only be used if submodule.propagateBranches is enabled"));
+ if (noncreate_actions)
+ die(_("--recurse-submodules can only be used to create branches"));
+ }
+
+ recurse_submodules =
+ (recurse_submodules || recurse_submodules_explicit) &&
+ submodule_propagate_branches;
+
if (filter.abbrev == -1)
filter.abbrev = DEFAULT_ABBREV;
filter.ignore_case = icase;
@@ -828,12 +856,9 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
if (!ref_exists(branch->refname))
die(_("branch '%s' does not exist"), branch->name);
- /*
- * create_branch takes care of setting up the tracking
- * info and making sure new_upstream is correct
- */
- create_branch(the_repository, branch->name, new_upstream,
- 0, 0, 0, quiet, BRANCH_TRACK_OVERRIDE);
+ dwim_and_setup_tracking(the_repository, branch->name,
+ new_upstream, BRANCH_TRACK_OVERRIDE,
+ quiet);
} else if (unset_upstream) {
struct branch *branch = branch_get(argv[0]);
struct strbuf buf = STRBUF_INIT;
@@ -857,7 +882,10 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
strbuf_addf(&buf, "branch.%s.merge", branch->name);
git_config_set_multivar(buf.buf, NULL, NULL, CONFIG_FLAGS_MULTI_REPLACE);
strbuf_release(&buf);
- } else if (argc > 0 && argc <= 2) {
+ } else if (!noncreate_actions && argc > 0 && argc <= 2) {
+ const char *branch_name = argv[0];
+ const char *start_name = argc == 2 ? argv[1] : head;
+
if (filter.kind != FILTER_REFS_BRANCHES)
die(_("The -a, and -r, options to 'git branch' do not take a branch name.\n"
"Did you mean to use: -a|-r --list <pattern>?"));
@@ -865,10 +893,14 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
if (track == BRANCH_TRACK_OVERRIDE)
die(_("the '--set-upstream' option is no longer supported. Please use '--track' or '--set-upstream-to' instead."));
- create_branch(the_repository,
- argv[0], (argc == 2) ? argv[1] : head,
- force, 0, reflog, quiet, track);
-
+ if (recurse_submodules) {
+ create_branches_recursively(the_repository, branch_name,
+ start_name, NULL, force,
+ reflog, quiet, track, 0);
+ return 0;
+ }
+ create_branch(the_repository, branch_name, start_name, force, 0,
+ reflog, quiet, track, 0);
} else
usage_with_options(builtin_branch_usage, options);
diff --git a/builtin/bundle.c b/builtin/bundle.c
index 5a85d7c..2adad54 100644
--- a/builtin/bundle.c
+++ b/builtin/bundle.c
@@ -93,6 +93,7 @@ static int cmd_bundle_create(int argc, const char **argv, const char *prefix) {
if (!startup_info->have_repository)
die(_("Need a repository to create a bundle."));
ret = !!create_bundle(the_repository, bundle_file, argc, argv, &pack_opts, version);
+ strvec_clear(&pack_opts);
free(bundle_file);
return ret;
}
diff --git a/builtin/cat-file.c b/builtin/cat-file.c
index d94050e..50cf389 100644
--- a/builtin/cat-file.c
+++ b/builtin/cat-file.c
@@ -17,14 +17,20 @@
#include "object-store.h"
#include "promisor-remote.h"
+enum batch_mode {
+ BATCH_MODE_CONTENTS,
+ BATCH_MODE_INFO,
+ BATCH_MODE_QUEUE_AND_DISPATCH,
+};
+
struct batch_options {
int enabled;
int follow_symlinks;
- int print_contents;
+ enum batch_mode batch_mode;
int buffer_output;
int all_objects;
int unordered;
- int cmdmode; /* may be 'w' or 'c' for --filters or --textconv */
+ int transform_mode; /* may be 'w' or 'c' for --filters or --textconv */
const char *format;
};
@@ -73,14 +79,17 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
struct object_info oi = OBJECT_INFO_INIT;
struct strbuf sb = STRBUF_INIT;
unsigned flags = OBJECT_INFO_LOOKUP_REPLACE;
+ unsigned get_oid_flags = GET_OID_RECORD_PATH | GET_OID_ONLY_TO_DIE;
const char *path = force_path;
+ const int opt_cw = (opt == 'c' || opt == 'w');
+ if (!path && opt_cw)
+ get_oid_flags |= GET_OID_REQUIRE_PATH;
if (unknown_type)
flags |= OBJECT_INFO_ALLOW_UNKNOWN_TYPE;
- if (get_oid_with_context(the_repository, obj_name,
- GET_OID_RECORD_PATH,
- &oid, &obj_context))
+ if (get_oid_with_context(the_repository, obj_name, get_oid_flags, &oid,
+ &obj_context))
die("Not a valid object name %s", obj_name);
if (!path)
@@ -112,9 +121,6 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
return !has_object_file(&oid);
case 'w':
- if (!path)
- die("git cat-file --filters %s: <object> must be "
- "<sha1:path>", obj_name);
if (filter_object(path, obj_context.mode,
&oid, &buf, &size))
@@ -122,10 +128,6 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
break;
case 'c':
- if (!path)
- die("git cat-file --textconv %s: <object> must be <sha1:path>",
- obj_name);
-
if (textconv_object(the_repository, path, obj_context.mode,
&oid, 1, &buf, &size))
break;
@@ -154,7 +156,10 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
break;
case 0:
- if (type_from_string(exp_type) == OBJ_BLOB) {
+ {
+ enum object_type exp_type_id = type_from_string(exp_type);
+
+ if (exp_type_id == OBJ_BLOB) {
struct object_id blob_oid;
if (oid_object_info(the_repository, &oid, NULL) == OBJ_TAG) {
char *buffer = read_object_file(&oid, &type,
@@ -176,10 +181,10 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
* fall-back to the usual case.
*/
}
- buf = read_object_with_reference(the_repository,
- &oid, exp_type, &size, NULL);
+ buf = read_object_with_reference(the_repository, &oid,
+ exp_type_id, &size, NULL);
break;
-
+ }
default:
die("git cat-file: unknown option: %s", exp_type);
}
@@ -306,19 +311,19 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d
if (data->type == OBJ_BLOB) {
if (opt->buffer_output)
fflush(stdout);
- if (opt->cmdmode) {
+ if (opt->transform_mode) {
char *contents;
unsigned long size;
if (!data->rest)
die("missing path for '%s'", oid_to_hex(oid));
- if (opt->cmdmode == 'w') {
+ if (opt->transform_mode == 'w') {
if (filter_object(data->rest, 0100644, oid,
&contents, &size))
die("could not convert '%s' %s",
oid_to_hex(oid), data->rest);
- } else if (opt->cmdmode == 'c') {
+ } else if (opt->transform_mode == 'c') {
enum object_type type;
if (!textconv_object(the_repository,
data->rest, 0100644, oid,
@@ -330,7 +335,7 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d
die("could not convert '%s' %s",
oid_to_hex(oid), data->rest);
} else
- BUG("invalid cmdmode: %c", opt->cmdmode);
+ BUG("invalid transform_mode: %c", opt->transform_mode);
batch_write(opt, contents, size);
free(contents);
} else {
@@ -355,6 +360,13 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d
}
}
+static void print_default_format(struct strbuf *scratch, struct expand_data *data)
+{
+ strbuf_addf(scratch, "%s %s %"PRIuMAX"\n", oid_to_hex(&data->oid),
+ type_name(data->type),
+ (uintmax_t)data->size);
+}
+
/*
* If "pack" is non-NULL, then "offset" is the byte offset within the pack from
* which the object may be accessed (though note that we may also rely on
@@ -386,11 +398,17 @@ static void batch_object_write(const char *obj_name,
}
strbuf_reset(scratch);
- strbuf_expand(scratch, opt->format, expand_format, data);
- strbuf_addch(scratch, '\n');
+
+ if (!opt->format) {
+ print_default_format(scratch, data);
+ } else {
+ strbuf_expand(scratch, opt->format, expand_format, data);
+ strbuf_addch(scratch, '\n');
+ }
+
batch_write(opt, scratch->buf, scratch->len);
- if (opt->print_contents) {
+ if (opt->batch_mode == BATCH_MODE_CONTENTS) {
print_object_or_die(opt, data);
batch_write(opt, "\n", 1);
}
@@ -512,6 +530,137 @@ static int batch_unordered_packed(const struct object_id *oid,
data);
}
+typedef void (*parse_cmd_fn_t)(struct batch_options *, const char *,
+ struct strbuf *, struct expand_data *);
+
+struct queued_cmd {
+ parse_cmd_fn_t fn;
+ char *line;
+};
+
+static void parse_cmd_contents(struct batch_options *opt,
+ const char *line,
+ struct strbuf *output,
+ struct expand_data *data)
+{
+ opt->batch_mode = BATCH_MODE_CONTENTS;
+ batch_one_object(line, output, opt, data);
+}
+
+static void parse_cmd_info(struct batch_options *opt,
+ const char *line,
+ struct strbuf *output,
+ struct expand_data *data)
+{
+ opt->batch_mode = BATCH_MODE_INFO;
+ batch_one_object(line, output, opt, data);
+}
+
+static void dispatch_calls(struct batch_options *opt,
+ struct strbuf *output,
+ struct expand_data *data,
+ struct queued_cmd *cmd,
+ int nr)
+{
+ int i;
+
+ if (!opt->buffer_output)
+ die(_("flush is only for --buffer mode"));
+
+ for (i = 0; i < nr; i++)
+ cmd[i].fn(opt, cmd[i].line, output, data);
+
+ fflush(stdout);
+}
+
+static void free_cmds(struct queued_cmd *cmd, size_t *nr)
+{
+ size_t i;
+
+ for (i = 0; i < *nr; i++)
+ FREE_AND_NULL(cmd[i].line);
+
+ *nr = 0;
+}
+
+
+static const struct parse_cmd {
+ const char *name;
+ parse_cmd_fn_t fn;
+ unsigned takes_args;
+} commands[] = {
+ { "contents", parse_cmd_contents, 1},
+ { "info", parse_cmd_info, 1},
+ { "flush", NULL, 0},
+};
+
+static void batch_objects_command(struct batch_options *opt,
+ struct strbuf *output,
+ struct expand_data *data)
+{
+ struct strbuf input = STRBUF_INIT;
+ struct queued_cmd *queued_cmd = NULL;
+ size_t alloc = 0, nr = 0;
+
+ while (!strbuf_getline(&input, stdin)) {
+ int i;
+ const struct parse_cmd *cmd = NULL;
+ const char *p = NULL, *cmd_end;
+ struct queued_cmd call = {0};
+
+ if (!input.len)
+ die(_("empty command in input"));
+ if (isspace(*input.buf))
+ die(_("whitespace before command: '%s'"), input.buf);
+
+ for (i = 0; i < ARRAY_SIZE(commands); i++) {
+ if (!skip_prefix(input.buf, commands[i].name, &cmd_end))
+ continue;
+
+ cmd = &commands[i];
+ if (cmd->takes_args) {
+ if (*cmd_end != ' ')
+ die(_("%s requires arguments"),
+ commands[i].name);
+
+ p = cmd_end + 1;
+ } else if (*cmd_end) {
+ die(_("%s takes no arguments"),
+ commands[i].name);
+ }
+
+ break;
+ }
+
+ if (!cmd)
+ die(_("unknown command: '%s'"), input.buf);
+
+ if (!strcmp(cmd->name, "flush")) {
+ dispatch_calls(opt, output, data, queued_cmd, nr);
+ free_cmds(queued_cmd, &nr);
+ } else if (!opt->buffer_output) {
+ cmd->fn(opt, p, output, data);
+ } else {
+ ALLOC_GROW(queued_cmd, nr + 1, alloc);
+ call.fn = cmd->fn;
+ call.line = xstrdup_or_null(p);
+ queued_cmd[nr++] = call;
+ }
+ }
+
+ if (opt->buffer_output &&
+ nr &&
+ !git_env_bool("GIT_TEST_CAT_FILE_NO_FLUSH_ON_EXIT", 0)) {
+ dispatch_calls(opt, output, data, queued_cmd, nr);
+ free_cmds(queued_cmd, &nr);
+ }
+
+ free(queued_cmd);
+ strbuf_release(&input);
+}
+
+#define DEFAULT_FORMAT "%(objectname) %(objecttype) %(objectsize)"
+
static int batch_objects(struct batch_options *opt)
{
struct strbuf input = STRBUF_INIT;
@@ -520,9 +669,6 @@ static int batch_objects(struct batch_options *opt)
int save_warning;
int retval = 0;
- if (!opt->format)
- opt->format = "%(objectname) %(objecttype) %(objectsize)";
-
/*
* Expand once with our special mark_query flag, which will prime the
* object_info to be handed to oid_object_info_extended for each
@@ -530,17 +676,22 @@ static int batch_objects(struct batch_options *opt)
*/
memset(&data, 0, sizeof(data));
data.mark_query = 1;
- strbuf_expand(&output, opt->format, expand_format, &data);
+ strbuf_expand(&output,
+ opt->format ? opt->format : DEFAULT_FORMAT,
+ expand_format,
+ &data);
data.mark_query = 0;
strbuf_release(&output);
- if (opt->cmdmode)
+ if (opt->transform_mode)
data.split_on_whitespace = 1;
+ if (opt->format && !strcmp(opt->format, DEFAULT_FORMAT))
+ opt->format = NULL;
/*
* If we are printing out the object, then always fill in the type,
* since we will want to decide whether or not to stream.
*/
- if (opt->print_contents)
+ if (opt->batch_mode == BATCH_MODE_CONTENTS)
data.info.typep = &data.type;
if (opt->all_objects) {
@@ -594,6 +745,11 @@ static int batch_objects(struct batch_options *opt)
save_warning = warn_on_object_refname_ambiguity;
warn_on_object_refname_ambiguity = 0;
+ if (opt->batch_mode == BATCH_MODE_QUEUE_AND_DISPATCH) {
+ batch_objects_command(opt, &output, &data);
+ goto cleanup;
+ }
+
while (strbuf_getline(&input, stdin) != EOF) {
if (data.split_on_whitespace) {
/*
@@ -612,18 +768,13 @@ static int batch_objects(struct batch_options *opt)
batch_one_object(input.buf, &output, opt, &data);
}
+ cleanup:
strbuf_release(&input);
strbuf_release(&output);
warn_on_object_refname_ambiguity = save_warning;
return retval;
}
-static const char * const cat_file_usage[] = {
- N_("git cat-file (-t [--allow-unknown-type] | -s [--allow-unknown-type] | -e | -p | <type> | --textconv | --filters) [--path=<path>] <object>"),
- N_("git cat-file (--batch[=<format>] | --batch-check[=<format>]) [--follow-symlinks] [--textconv | --filters]"),
- NULL
-};
-
static int git_cat_file_config(const char *var, const char *value, void *cb)
{
if (userdiff_config(var, value) < 0)
@@ -645,7 +796,16 @@ static int batch_option_callback(const struct option *opt,
}
bo->enabled = 1;
- bo->print_contents = !strcmp(opt->long_name, "batch");
+
+ if (!strcmp(opt->long_name, "batch"))
+ bo->batch_mode = BATCH_MODE_CONTENTS;
+ else if (!strcmp(opt->long_name, "batch-check"))
+ bo->batch_mode = BATCH_MODE_INFO;
+ else if (!strcmp(opt->long_name, "batch-command"))
+ bo->batch_mode = BATCH_MODE_QUEUE_AND_DISPATCH;
+ else
+ BUG("%s given to batch-option-callback", opt->long_name);
+
bo->format = arg;
return 0;
@@ -654,90 +814,142 @@ static int batch_option_callback(const struct option *opt,
int cmd_cat_file(int argc, const char **argv, const char *prefix)
{
int opt = 0;
+ int opt_cw = 0;
+ int opt_epts = 0;
const char *exp_type = NULL, *obj_name = NULL;
struct batch_options batch = {0};
int unknown_type = 0;
+ const char * const usage[] = {
+ N_("git cat-file <type> <object>"),
+ N_("git cat-file (-e | -p) <object>"),
+ N_("git cat-file (-t | -s) [--allow-unknown-type] <object>"),
+ N_("git cat-file (--batch | --batch-check | --batch-command) [--batch-all-objects]\n"
+ " [--buffer] [--follow-symlinks] [--unordered]\n"
+ " [--textconv | --filters]"),
+ N_("git cat-file (--textconv | --filters)\n"
+ " [<rev>:<path|tree-ish> | --path=<path|tree-ish> <rev>]"),
+ NULL
+ };
const struct option options[] = {
- OPT_GROUP(N_("<type> can be one of: blob, tree, commit, tag")),
- OPT_CMDMODE('t', NULL, &opt, N_("show object type"), 't'),
- OPT_CMDMODE('s', NULL, &opt, N_("show object size"), 's'),
+ /* Simple queries */
+ OPT_GROUP(N_("Check object existence or emit object contents")),
OPT_CMDMODE('e', NULL, &opt,
- N_("exit with zero when there's no error"), 'e'),
- OPT_CMDMODE('p', NULL, &opt, N_("pretty-print object's content"), 'p'),
- OPT_CMDMODE(0, "textconv", &opt,
- N_("for blob objects, run textconv on object's content"), 'c'),
- OPT_CMDMODE(0, "filters", &opt,
- N_("for blob objects, run filters on object's content"), 'w'),
- OPT_STRING(0, "path", &force_path, N_("blob"),
- N_("use a specific path for --textconv/--filters")),
+ N_("check if <object> exists"), 'e'),
+ OPT_CMDMODE('p', NULL, &opt, N_("pretty-print <object> content"), 'p'),
+
+ OPT_GROUP(N_("Emit [broken] object attributes")),
+ OPT_CMDMODE('t', NULL, &opt, N_("show object type (one of 'blob', 'tree', 'commit', 'tag', ...)"), 't'),
+ OPT_CMDMODE('s', NULL, &opt, N_("show object size"), 's'),
OPT_BOOL(0, "allow-unknown-type", &unknown_type,
N_("allow -s and -t to work with broken/corrupt objects")),
- OPT_BOOL(0, "buffer", &batch.buffer_output, N_("buffer --batch output")),
- OPT_CALLBACK_F(0, "batch", &batch, "format",
- N_("show info and content of objects fed from the standard input"),
+ /* Batch mode */
+ OPT_GROUP(N_("Batch objects requested on stdin (or --batch-all-objects)")),
+ OPT_CALLBACK_F(0, "batch", &batch, N_("format"),
+ N_("show full <object> or <rev> contents"),
+ PARSE_OPT_OPTARG | PARSE_OPT_NONEG,
+ batch_option_callback),
+ OPT_CALLBACK_F(0, "batch-check", &batch, N_("format"),
+ N_("like --batch, but don't emit <contents>"),
PARSE_OPT_OPTARG | PARSE_OPT_NONEG,
batch_option_callback),
- OPT_CALLBACK_F(0, "batch-check", &batch, "format",
- N_("show info about objects fed from the standard input"),
+ OPT_CALLBACK_F(0, "batch-command", &batch, N_("format"),
+ N_("read commands from stdin"),
PARSE_OPT_OPTARG | PARSE_OPT_NONEG,
batch_option_callback),
+ OPT_CMDMODE(0, "batch-all-objects", &opt,
+ N_("with --batch[-check]: ignores stdin, batches all known objects"), 'b'),
+ /* Batch-specific options */
+ OPT_GROUP(N_("Change or optimize batch output")),
+ OPT_BOOL(0, "buffer", &batch.buffer_output, N_("buffer --batch output")),
OPT_BOOL(0, "follow-symlinks", &batch.follow_symlinks,
- N_("follow in-tree symlinks (used with --batch or --batch-check)")),
- OPT_BOOL(0, "batch-all-objects", &batch.all_objects,
- N_("show all objects with --batch or --batch-check")),
+ N_("follow in-tree symlinks")),
OPT_BOOL(0, "unordered", &batch.unordered,
- N_("do not order --batch-all-objects output")),
+ N_("do not order objects before emitting them")),
+ /* Textconv options, stand-ole*/
+ OPT_GROUP(N_("Emit object (blob or tree) with conversion or filter (stand-alone, or with batch)")),
+ OPT_CMDMODE(0, "textconv", &opt,
+ N_("run textconv on object's content"), 'c'),
+ OPT_CMDMODE(0, "filters", &opt,
+ N_("run filters on object's content"), 'w'),
+ OPT_STRING(0, "path", &force_path, N_("blob|tree"),
+ N_("use a <path> for (--textconv | --filters); Not with 'batch'")),
OPT_END()
};
git_config(git_cat_file_config, NULL);
batch.buffer_output = -1;
- argc = parse_options(argc, argv, prefix, options, cat_file_usage, 0);
-
- if (opt) {
- if (batch.enabled && (opt == 'c' || opt == 'w'))
- batch.cmdmode = opt;
- else if (argc == 1)
- obj_name = argv[0];
- else
- usage_with_options(cat_file_usage, options);
- }
- if (!opt && !batch.enabled) {
- if (argc == 2) {
- exp_type = argv[0];
- obj_name = argv[1];
- } else
- usage_with_options(cat_file_usage, options);
- }
- if (batch.enabled) {
- if (batch.cmdmode != opt || argc)
- usage_with_options(cat_file_usage, options);
- if (batch.cmdmode && batch.all_objects)
- die("--batch-all-objects cannot be combined with "
- "--textconv nor with --filters");
- }
- if ((batch.follow_symlinks || batch.all_objects) && !batch.enabled) {
- usage_with_options(cat_file_usage, options);
- }
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+ opt_cw = (opt == 'c' || opt == 'w');
+ opt_epts = (opt == 'e' || opt == 'p' || opt == 't' || opt == 's');
- if (force_path && opt != 'c' && opt != 'w') {
- error("--path=<path> needs --textconv or --filters");
- usage_with_options(cat_file_usage, options);
- }
+ /* --batch-all-objects? */
+ if (opt == 'b')
+ batch.all_objects = 1;
- if (force_path && batch.enabled) {
- error("options '--path=<path>' and '--batch' cannot be used together");
- usage_with_options(cat_file_usage, options);
- }
+ /* Option compatibility */
+ if (force_path && !opt_cw)
+ usage_msg_optf(_("'%s=<%s>' needs '%s' or '%s'"),
+ usage, options,
+ "--path", _("path|tree-ish"), "--filters",
+ "--textconv");
+ /* Option compatibility with batch mode */
+ if (batch.enabled)
+ ;
+ else if (batch.follow_symlinks)
+ usage_msg_optf(_("'%s' requires a batch mode"), usage, options,
+ "--follow-symlinks");
+ else if (batch.buffer_output >= 0)
+ usage_msg_optf(_("'%s' requires a batch mode"), usage, options,
+ "--buffer");
+ else if (batch.all_objects)
+ usage_msg_optf(_("'%s' requires a batch mode"), usage, options,
+ "--batch-all-objects");
+
+ /* Batch defaults */
if (batch.buffer_output < 0)
batch.buffer_output = batch.all_objects;
- if (batch.enabled)
+ /* Return early if we're in batch mode? */
+ if (batch.enabled) {
+ if (opt_cw)
+ batch.transform_mode = opt;
+ else if (opt && opt != 'b')
+ usage_msg_optf(_("'-%c' is incompatible with batch mode"),
+ usage, options, opt);
+ else if (argc)
+ usage_msg_opt(_("batch modes take no arguments"), usage,
+ options);
+
return batch_objects(&batch);
+ }
+
+ if (opt) {
+ if (!argc && opt == 'c')
+ usage_msg_optf(_("<rev> required with '%s'"),
+ usage, options, "--textconv");
+ else if (!argc && opt == 'w')
+ usage_msg_optf(_("<rev> required with '%s'"),
+ usage, options, "--filters");
+ else if (!argc && opt_epts)
+ usage_msg_optf(_("<object> required with '-%c'"),
+ usage, options, opt);
+ else if (argc == 1)
+ obj_name = argv[0];
+ else
+ usage_msg_opt(_("too many arguments"), usage, options);
+ } else if (!argc) {
+ usage_with_options(usage, options);
+ } else if (argc != 2) {
+ usage_msg_optf(_("only two arguments allowed in <type> <object> mode, not %d"),
+ usage, options, argc);
+ } else if (argc) {
+ exp_type = argv[0];
+ obj_name = argv[1];
+ }
if (unknown_type && opt != 't' && opt != 's')
die("git cat-file --allow-unknown-type: use with -s or -t");
diff --git a/builtin/checkout-index.c b/builtin/checkout-index.c
index e21620d..97e06e8 100644
--- a/builtin/checkout-index.c
+++ b/builtin/checkout-index.c
@@ -7,6 +7,7 @@
#define USE_THE_INDEX_COMPATIBILITY_MACROS
#include "builtin.h"
#include "config.h"
+#include "dir.h"
#include "lockfile.h"
#include "quote.h"
#include "cache-tree.h"
@@ -17,6 +18,7 @@
#define CHECKOUT_ALL 4
static int nul_term_line;
static int checkout_stage; /* default to checkout stage0 */
+static int ignore_skip_worktree; /* default to 0 */
static int to_tempfile;
static char topath[4][TEMPORARY_FILENAME_LENGTH + 1];
@@ -65,6 +67,8 @@ static int checkout_file(const char *name, const char *prefix)
int namelen = strlen(name);
int pos = cache_name_pos(name, namelen);
int has_same_name = 0;
+ int is_file = 0;
+ int is_skipped = 1;
int did_checkout = 0;
int errs = 0;
@@ -78,6 +82,12 @@ static int checkout_file(const char *name, const char *prefix)
break;
has_same_name = 1;
pos++;
+ if (S_ISSPARSEDIR(ce->ce_mode))
+ break;
+ is_file = 1;
+ if (!ignore_skip_worktree && ce_skip_worktree(ce))
+ break;
+ is_skipped = 0;
if (ce_stage(ce) != checkout_stage
&& (CHECKOUT_ALL != checkout_stage || !ce_stage(ce)))
continue;
@@ -106,6 +116,11 @@ static int checkout_file(const char *name, const char *prefix)
fprintf(stderr, "git checkout-index: %s ", name);
if (!has_same_name)
fprintf(stderr, "is not in the cache");
+ else if (!is_file)
+ fprintf(stderr, "is a sparse directory");
+ else if (is_skipped)
+ fprintf(stderr, "has skip-worktree enabled; "
+ "use '--ignore-skip-worktree-bits' to checkout");
else if (checkout_stage)
fprintf(stderr, "does not exist at stage %d",
checkout_stage);
@@ -121,10 +136,27 @@ static int checkout_all(const char *prefix, int prefix_length)
int i, errs = 0;
struct cache_entry *last_ce = NULL;
- /* TODO: audit for interaction with sparse-index. */
- ensure_full_index(&the_index);
for (i = 0; i < active_nr ; i++) {
struct cache_entry *ce = active_cache[i];
+
+ if (S_ISSPARSEDIR(ce->ce_mode)) {
+ if (!ce_skip_worktree(ce))
+ BUG("sparse directory '%s' does not have skip-worktree set", ce->name);
+
+ /*
+ * If the current entry is a sparse directory and skip-worktree
+ * entries are being checked out, expand the index and continue
+ * the loop on the current index position (now pointing to the
+ * first entry inside the expanded sparse directory).
+ */
+ if (ignore_skip_worktree) {
+ ensure_full_index(&the_index);
+ ce = active_cache[i];
+ }
+ }
+
+ if (!ignore_skip_worktree && ce_skip_worktree(ce))
+ continue;
if (ce_stage(ce) != checkout_stage
&& (CHECKOUT_ALL != checkout_stage || !ce_stage(ce)))
continue;
@@ -185,6 +217,8 @@ int cmd_checkout_index(int argc, const char **argv, const char *prefix)
struct option builtin_checkout_index_options[] = {
OPT_BOOL('a', "all", &all,
N_("check out all files in the index")),
+ OPT_BOOL(0, "ignore-skip-worktree-bits", &ignore_skip_worktree,
+ N_("do not skip files with skip-worktree set")),
OPT__FORCE(&force, N_("force overwrite of existing files"), 0),
OPT__QUIET(&quiet,
N_("no warning for existing files and files not in index")),
@@ -212,6 +246,9 @@ int cmd_checkout_index(int argc, const char **argv, const char *prefix)
git_config(git_default_config, NULL);
prefix_length = prefix ? strlen(prefix) : 0;
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
if (read_cache() < 0) {
die("invalid cache");
}
diff --git a/builtin/checkout.c b/builtin/checkout.c
index cc804ba..7976814 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -9,6 +9,7 @@
#include "config.h"
#include "diff.h"
#include "dir.h"
+#include "hook.h"
#include "ll-merge.h"
#include "lockfile.h"
#include "merge-recursive.h"
@@ -114,7 +115,7 @@ static void branch_info_release(struct branch_info *info)
static int post_checkout_hook(struct commit *old_commit, struct commit *new_commit,
int changed)
{
- return run_hook_le(NULL, "post-checkout",
+ return run_hooks_l("post-checkout",
oid_to_hex(old_commit ? &old_commit->object.oid : null_oid()),
oid_to_hex(new_commit ? &new_commit->object.oid : null_oid()),
changed ? "1" : "0", NULL);
@@ -245,6 +246,7 @@ static int checkout_merged(int pos, const struct checkout *state,
struct cache_entry *ce = active_cache[pos];
const char *path = ce->name;
mmfile_t ancestor, ours, theirs;
+ enum ll_merge_result merge_status;
int status;
struct object_id oid;
mmbuffer_t result_buf;
@@ -275,13 +277,16 @@ static int checkout_merged(int pos, const struct checkout *state,
memset(&ll_opts, 0, sizeof(ll_opts));
git_config_get_bool("merge.renormalize", &renormalize);
ll_opts.renormalize = renormalize;
- status = ll_merge(&result_buf, path, &ancestor, "base",
- &ours, "ours", &theirs, "theirs",
- state->istate, &ll_opts);
+ merge_status = ll_merge(&result_buf, path, &ancestor, "base",
+ &ours, "ours", &theirs, "theirs",
+ state->istate, &ll_opts);
free(ancestor.ptr);
free(ours.ptr);
free(theirs.ptr);
- if (status < 0 || !result_buf.ptr) {
+ if (merge_status == LL_MERGE_BINARY_CONFLICT)
+ warning("Cannot merge binary files: %s (%s vs. %s)",
+ path, "ours", "theirs");
+ if (merge_status < 0 || !result_buf.ptr) {
free(result_buf.ptr);
return error(_("path '%s': cannot merge"), path);
}
@@ -298,7 +303,7 @@ static int checkout_merged(int pos, const struct checkout *state,
* (it also writes the merge result to the object database even
* when it may contain conflicts).
*/
- if (write_object_file(result_buf.ptr, result_buf.size, blob_type, &oid))
+ if (write_object_file(result_buf.ptr, result_buf.size, OBJ_BLOB, &oid))
die(_("Unable to add merge result for '%s'"), path);
free(result_buf.ptr);
ce = make_transient_cache_entry(mode, &oid, path, 2, ce_mem_pool);
@@ -733,6 +738,7 @@ static int merge_working_tree(const struct checkout_opts *opts,
struct tree_desc trees[2];
struct tree *tree;
struct unpack_trees_options topts;
+ const struct object_id *old_commit_oid;
memset(&topts, 0, sizeof(topts));
topts.head_idx = -1;
@@ -760,9 +766,15 @@ static int merge_working_tree(const struct checkout_opts *opts,
&new_branch_info->commit->object.oid :
&new_branch_info->oid, NULL);
topts.preserve_ignored = !opts->overwrite_ignore;
- tree = parse_tree_indirect(old_branch_info->commit ?
- &old_branch_info->commit->object.oid :
- the_hash_algo->empty_tree);
+
+ old_commit_oid = old_branch_info->commit ?
+ &old_branch_info->commit->object.oid :
+ the_hash_algo->empty_tree;
+ tree = parse_tree_indirect(old_commit_oid);
+ if (!tree)
+ die(_("unable to parse commit %s"),
+ oid_to_hex(old_commit_oid));
+
init_tree_desc(&trees[0], tree->buffer, tree->size);
parse_tree(new_tree);
tree = new_tree;
@@ -904,7 +916,8 @@ static void update_refs_for_switch(const struct checkout_opts *opts,
opts->new_branch_force ? 1 : 0,
opts->new_branch_log,
opts->quiet,
- opts->track);
+ opts->track,
+ 0);
free(new_branch_info->name);
free(new_branch_info->refname);
new_branch_info->name = xstrdup(opts->new_branch);
@@ -1391,23 +1404,31 @@ static void die_expecting_a_branch(const struct branch_info *branch_info)
{
struct object_id oid;
char *to_free;
+ int code;
if (dwim_ref(branch_info->name, strlen(branch_info->name), &oid, &to_free, 0) == 1) {
const char *ref = to_free;
if (skip_prefix(ref, "refs/tags/", &ref))
- die(_("a branch is expected, got tag '%s'"), ref);
- if (skip_prefix(ref, "refs/remotes/", &ref))
- die(_("a branch is expected, got remote branch '%s'"), ref);
- die(_("a branch is expected, got '%s'"), ref);
+ code = die_message(_("a branch is expected, got tag '%s'"), ref);
+ else if (skip_prefix(ref, "refs/remotes/", &ref))
+ code = die_message(_("a branch is expected, got remote branch '%s'"), ref);
+ else
+ code = die_message(_("a branch is expected, got '%s'"), ref);
}
- if (branch_info->commit)
- die(_("a branch is expected, got commit '%s'"), branch_info->name);
- /*
- * This case should never happen because we already die() on
- * non-commit, but just in case.
- */
- die(_("a branch is expected, got '%s'"), branch_info->name);
+ else if (branch_info->commit)
+ code = die_message(_("a branch is expected, got commit '%s'"), branch_info->name);
+ else
+ /*
+ * This case should never happen because we already die() on
+ * non-commit, but just in case.
+ */
+ code = die_message(_("a branch is expected, got '%s'"), branch_info->name);
+
+ if (advice_enabled(ADVICE_SUGGEST_DETACHING_HEAD))
+ advise(_("If you want to detach HEAD at the commit, try again with the --detach option."));
+
+ exit(code);
}
static void die_if_some_operation_in_progress(void)
@@ -1602,9 +1623,10 @@ static int checkout_main(int argc, const char **argv, const char *prefix,
opts->show_progress = -1;
git_config(git_checkout_config, opts);
-
- prepare_repo_settings(the_repository);
- the_repository->settings.command_requires_full_index = 0;
+ if (the_repository->gitdir) {
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+ }
opts->track = BRANCH_TRACK_UNSPECIFIED;
diff --git a/builtin/clean.c b/builtin/clean.c
index 3ff02bb..5466636 100644
--- a/builtin/clean.c
+++ b/builtin/clean.c
@@ -1009,6 +1009,9 @@ int cmd_clean(int argc, const char **argv, const char *prefix)
dir.flags |= DIR_KEEP_UNTRACKED_CONTENTS;
}
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
if (read_cache() < 0)
die(_("index file corrupt"));
diff --git a/builtin/clone.c b/builtin/clone.c
index 727e16e..5231656 100644
--- a/builtin/clone.c
+++ b/builtin/clone.c
@@ -32,6 +32,8 @@
#include "connected.h"
#include "packfile.h"
#include "list-objects-filter-options.h"
+#include "hook.h"
+#include "bundle.h"
/*
* Overall FIXMEs:
@@ -71,6 +73,8 @@ static int option_dissociate;
static int max_jobs = -1;
static struct string_list option_recurse_submodules = STRING_LIST_INIT_NODUP;
static struct list_objects_filter_options filter_options;
+static int option_filter_submodules = -1; /* unspecified */
+static int config_filter_submodules = -1; /* unspecified */
static struct string_list server_options = STRING_LIST_INIT_NODUP;
static int option_remote_submodules;
@@ -150,6 +154,8 @@ static struct option builtin_clone_options[] = {
OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
TRANSPORT_FAMILY_IPV6),
OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
+ OPT_BOOL(0, "also-filter-submodules", &option_filter_submodules,
+ N_("apply partial clone filters to submodules")),
OPT_BOOL(0, "remote-submodules", &option_remote_submodules,
N_("any cloned submodules will use their remote-tracking branch")),
OPT_BOOL(0, "sparse", &option_sparse_checkout,
@@ -650,7 +656,7 @@ static int git_sparse_checkout_init(const char *repo)
return result;
}
-static int checkout(int submodule_progress)
+static int checkout(int submodule_progress, int filter_submodules)
{
struct object_id oid;
char *head;
@@ -695,6 +701,8 @@ static int checkout(int submodule_progress)
init_checkout_metadata(&opts.meta, head, &oid, NULL);
tree = parse_tree_indirect(&oid);
+ if (!tree)
+ die(_("unable to parse commit %s"), oid_to_hex(&oid));
parse_tree(tree);
init_tree_desc(&t, tree->buffer, tree->size);
if (unpack_trees(1, &t, &opts) < 0)
@@ -705,7 +713,7 @@ static int checkout(int submodule_progress)
if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
- err |= run_hook_le(NULL, "post-checkout", oid_to_hex(null_oid()),
+ err |= run_hooks_l("post-checkout", oid_to_hex(null_oid()),
oid_to_hex(&oid), "1", NULL);
if (!err && (option_recurse_submodules.nr > 0)) {
@@ -729,6 +737,10 @@ static int checkout(int submodule_progress)
strvec_push(&args, "--no-fetch");
}
+ if (filter_submodules && filter_options.choice)
+ strvec_pushf(&args, "--filter=%s",
+ expand_list_objects_filter_spec(&filter_options));
+
if (option_single_branch >= 0)
strvec_push(&args, option_single_branch ?
"--single-branch" :
@@ -749,6 +761,8 @@ static int git_clone_config(const char *k, const char *v, void *cb)
}
if (!strcmp(k, "clone.rejectshallow"))
config_reject_shallow = git_config_bool(k, v);
+ if (!strcmp(k, "clone.filtersubmodules"))
+ config_filter_submodules = git_config_bool(k, v);
return git_default_config(k, v, cb);
}
@@ -862,7 +876,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
const struct ref *refs, *remote_head;
struct ref *remote_head_points_at = NULL;
const struct ref *our_head_points_at;
- struct ref *mapped_refs;
+ struct ref *mapped_refs = NULL;
const struct ref *ref;
struct strbuf key = STRBUF_INIT;
struct strbuf branch_top = STRBUF_INIT, reflog_msg = STRBUF_INIT;
@@ -871,6 +885,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
struct remote *remote;
int err = 0, complete_refs_before_fetch = 1;
int submodule_progress;
+ int filter_submodules = 0;
struct transport_ls_refs_options transport_ls_refs_options =
TRANSPORT_LS_REFS_OPTIONS_INIT;
@@ -1067,6 +1082,27 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
reject_shallow = option_reject_shallow;
/*
+ * If option_filter_submodules is specified from CLI option,
+ * ignore config_filter_submodules from git_clone_config.
+ */
+ if (config_filter_submodules != -1)
+ filter_submodules = config_filter_submodules;
+ if (option_filter_submodules != -1)
+ filter_submodules = option_filter_submodules;
+
+ /*
+ * Exit if the user seems to be doing something silly with submodule
+ * filter flags (but not with filter configs, as those should be
+ * set-and-forget).
+ */
+ if (option_filter_submodules > 0 && !filter_options.choice)
+ die(_("the option '%s' requires '%s'"),
+ "--also-filter-submodules", "--filter");
+ if (option_filter_submodules > 0 && !option_recurse_submodules.nr)
+ die(_("the option '%s' requires '%s'"),
+ "--also-filter-submodules", "--recurse-submodules");
+
+ /*
* apply the remote name provided by --origin only after this second
* call to git_config, to ensure it overrides all config-based values.
*/
@@ -1137,6 +1173,18 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
warning(_("--local is ignored"));
transport->cloning = 1;
+ if (is_bundle) {
+ struct bundle_header header = BUNDLE_HEADER_INIT;
+ int fd = read_bundle_header(path, &header);
+ int has_filter = header.filter.choice != LOFC_DISABLED;
+
+ if (fd > 0)
+ close(fd);
+ bundle_header_release(&header);
+ if (has_filter)
+ die(_("cannot clone from filtered bundle"));
+ }
+
transport_set_option(transport, TRANS_OPT_KEEP, "yes");
if (reject_shallow)
@@ -1184,7 +1232,10 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
refs = transport_get_remote_refs(transport, &transport_ls_refs_options);
- if (refs) {
+ if (refs)
+ mapped_refs = wanted_peer_refs(refs, &remote->fetch);
+
+ if (mapped_refs) {
int hash_algo = hash_algo_by_ptr(transport_get_hash_algo(transport));
/*
@@ -1193,8 +1244,6 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
*/
initialize_repository_version(hash_algo, 1);
repo_set_hash_algo(the_repository, hash_algo);
-
- mapped_refs = wanted_peer_refs(refs, &remote->fetch);
/*
* transport_get_remote_refs() may return refs with null sha-1
* in mapped_refs (see struct transport->get_refs_list
@@ -1233,14 +1282,14 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
}
else {
const char *branch;
- char *ref;
+ const char *ref;
+ char *ref_free = NULL;
if (option_branch)
die(_("Remote branch %s not found in upstream %s"),
option_branch, remote_name);
warning(_("You appear to have cloned an empty repository."));
- mapped_refs = NULL;
our_head_points_at = NULL;
remote_head_points_at = NULL;
remote_head = NULL;
@@ -1250,17 +1299,16 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
skip_prefix(transport_ls_refs_options.unborn_head_target,
"refs/heads/", &branch)) {
ref = transport_ls_refs_options.unborn_head_target;
- transport_ls_refs_options.unborn_head_target = NULL;
create_symref("HEAD", ref, reflog_msg.buf);
} else {
branch = git_default_branch_name(0);
- ref = xstrfmt("refs/heads/%s", branch);
+ ref_free = xstrfmt("refs/heads/%s", branch);
+ ref = ref_free;
}
if (!option_bare)
install_branch_config(0, branch, remote_name, ref);
-
- free(ref);
+ free(ref_free);
}
write_refspec_config(src_ref_prefix, our_head_points_at,
@@ -1271,7 +1319,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
if (is_local)
clone_local(path, git_dir);
- else if (refs && complete_refs_before_fetch) {
+ else if (mapped_refs && complete_refs_before_fetch) {
if (transport_fetch_refs(transport, mapped_refs))
die(_("remote transport reported error"));
}
@@ -1299,7 +1347,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
}
junk_mode = JUNK_LEAVE_REPO;
- err = checkout(submodule_progress);
+ err = checkout(submodule_progress, filter_submodules);
free(remote_name);
strbuf_release(&reflog_msg);
@@ -1312,7 +1360,6 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
UNLEAK(repo);
junk_mode = JUNK_LEAVE_ALL;
- strvec_clear(&transport_ls_refs_options.ref_prefixes);
- free(transport_ls_refs_options.unborn_head_target);
+ transport_ls_refs_options_release(&transport_ls_refs_options);
return err;
}
diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c
index 4247fbd..51c4040 100644
--- a/builtin/commit-graph.c
+++ b/builtin/commit-graph.c
@@ -192,7 +192,7 @@ static int git_commit_graph_write_config(const char *var, const char *value,
static int graph_write(int argc, const char **argv)
{
- struct string_list pack_indexes = STRING_LIST_INIT_NODUP;
+ struct string_list pack_indexes = STRING_LIST_INIT_DUP;
struct strbuf buf = STRBUF_INIT;
struct oidset commits = OIDSET_INIT;
struct object_directory *odb = NULL;
@@ -273,8 +273,8 @@ static int graph_write(int argc, const char **argv)
if (opts.stdin_packs) {
while (strbuf_getline(&buf, stdin) != EOF)
- string_list_append(&pack_indexes,
- strbuf_detach(&buf, NULL));
+ string_list_append_nodup(&pack_indexes,
+ strbuf_detach(&buf, NULL));
} else if (opts.stdin_commits) {
oidset_init(&commits, 0);
if (opts.progress)
diff --git a/builtin/commit.c b/builtin/commit.c
index b9ed037..009a1de 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -37,6 +37,7 @@
#include "help.h"
#include "commit-reach.h"
#include "commit-graph.h"
+#include "pretty.h"
static const char * const builtin_commit_usage[] = {
N_("git commit [<options>] [--] <pathspec>..."),
@@ -725,11 +726,13 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
int clean_message_contents = (cleanup_mode != COMMIT_MSG_CLEANUP_NONE);
int old_display_comment_prefix;
int merge_contains_scissors = 0;
+ int invoked_hook;
/* This checks and barfs if author is badly specified */
determine_author_info(author_ident);
- if (!no_verify && run_commit_hook(use_editor, index_file, "pre-commit", NULL))
+ if (!no_verify && run_commit_hook(use_editor, index_file, &invoked_hook,
+ "pre-commit", NULL))
return 0;
if (squash_message) {
@@ -1052,10 +1055,10 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
return 0;
}
- if (!no_verify && hook_exists("pre-commit")) {
+ if (!no_verify && invoked_hook) {
/*
- * Re-read the index as pre-commit hook could have updated it,
- * and write it out as a tree. We must do this before we invoke
+ * Re-read the index as the pre-commit-commit hook was invoked
+ * and could have updated it. We must do this before we invoke
* the editor and after we invoke run_status above.
*/
discard_cache();
@@ -1067,7 +1070,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
return 0;
}
- if (run_commit_hook(use_editor, index_file, "prepare-commit-msg",
+ if (run_commit_hook(use_editor, index_file, NULL, "prepare-commit-msg",
git_path_commit_editmsg(), hook_arg1, hook_arg2, NULL))
return 0;
@@ -1084,7 +1087,8 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
}
if (!no_verify &&
- run_commit_hook(use_editor, index_file, "commit-msg", git_path_commit_editmsg(), NULL)) {
+ run_commit_hook(use_editor, index_file, NULL, "commit-msg",
+ git_path_commit_editmsg(), NULL)) {
return 0;
}
@@ -1242,8 +1246,6 @@ static int parse_and_validate_options(int argc, const char *argv[],
struct commit *current_head,
struct wt_status *s)
{
- int f = 0;
-
argc = parse_options(argc, argv, prefix, options, usage, 0);
finalize_deferred_config(s);
@@ -1251,7 +1253,7 @@ static int parse_and_validate_options(int argc, const char *argv[],
force_author = find_author_by_nickname(force_author);
if (force_author && renew_authorship)
- die(_("Using both --reset-author and --author does not make sense"));
+ die(_("options '%s' and '%s' cannot be used together"), "--reset-author", "--author");
if (logfile || have_option_m || use_message)
use_editor = 0;
@@ -1268,20 +1270,16 @@ static int parse_and_validate_options(int argc, const char *argv[],
die(_("You are in the middle of a rebase -- cannot amend."));
}
if (fixup_message && squash_message)
- die(_("Options --squash and --fixup cannot be used together"));
- if (use_message)
- f++;
- if (edit_message)
- f++;
- if (fixup_message)
- f++;
- if (logfile)
- f++;
- if (f > 1)
- die(_("Only one of -c/-C/-F/--fixup can be used."));
- if (have_option_m && (edit_message || use_message || logfile))
- die((_("Option -m cannot be combined with -c/-C/-F.")));
- if (f || have_option_m)
+ die(_("options '%s' and '%s' cannot be used together"), "--squash", "--fixup");
+ die_for_incompatible_opt4(!!use_message, "-C",
+ !!edit_message, "-c",
+ !!logfile, "-F",
+ !!fixup_message, "--fixup");
+ die_for_incompatible_opt4(have_option_m, "-m",
+ !!edit_message, "-c",
+ !!use_message, "-C",
+ !!logfile, "-F");
+ if (use_message || edit_message || logfile ||fixup_message || have_option_m)
template_file = NULL;
if (edit_message)
use_message = edit_message;
@@ -1306,9 +1304,10 @@ static int parse_and_validate_options(int argc, const char *argv[],
if (patch_interactive)
interactive = 1;
- if (also + only + all + interactive > 1)
- die(_("Only one of --include/--only/--all/--interactive/--patch can be used."));
-
+ die_for_incompatible_opt4(also, "-i/--include",
+ only, "-o/--only",
+ all, "-a/--all",
+ interactive, "--interactive/-p/--patch");
if (fixup_message) {
/*
* We limit --fixup's suboptions to only alpha characters.
@@ -1845,7 +1844,8 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
repo_rerere(the_repository, 0);
run_auto_maintenance(quiet);
- run_commit_hook(use_editor, get_index_file(), "post-commit", NULL);
+ run_commit_hook(use_editor, get_index_file(), NULL, "post-commit",
+ NULL);
if (amend && !no_post_rewrite) {
commit_post_rewrite(the_repository, current_head, &oid);
}
diff --git a/builtin/config.c b/builtin/config.c
index 542d8d0..e7b88a9 100644
--- a/builtin/config.c
+++ b/builtin/config.c
@@ -151,7 +151,7 @@ static struct option builtin_config_options[] = {
OPT_BIT(0, "get-color", &actions, N_("find the color configured: slot [default]"), ACTION_GET_COLOR),
OPT_BIT(0, "get-colorbool", &actions, N_("find the color setting: slot [stdout-is-tty]"), ACTION_GET_COLORBOOL),
OPT_GROUP(N_("Type")),
- OPT_CALLBACK('t', "type", &type, "", N_("value is given this type"), option_parse_type),
+ OPT_CALLBACK('t', "type", &type, N_("type"), N_("value is given this type"), option_parse_type),
OPT_CALLBACK_VALUE(0, "bool", &type, N_("value is \"true\" or \"false\""), TYPE_BOOL),
OPT_CALLBACK_VALUE(0, "int", &type, N_("value is decimal number"), TYPE_INT),
OPT_CALLBACK_VALUE(0, "bool-or-int", &type, N_("value is --bool or --int"), TYPE_BOOL_OR_INT),
@@ -612,7 +612,7 @@ static int get_urlmatch(const char *var, const char *url)
strbuf_release(&matched->value);
}
- string_list_clear(&config.vars, 1);
+ urlmatch_config_release(&config);
string_list_clear(&values, 1);
free(config.url.url);
diff --git a/builtin/count-objects.c b/builtin/count-objects.c
index 3fae474..07b9419 100644
--- a/builtin/count-objects.c
+++ b/builtin/count-objects.c
@@ -87,7 +87,7 @@ static int print_alternate(struct object_directory *odb, void *data)
}
static char const * const count_objects_usage[] = {
- N_("git count-objects [-v] [-H | --human-readable]"),
+ "git count-objects [-v] [-H | --human-readable]",
NULL
};
diff --git a/builtin/diff.c b/builtin/diff.c
index fa46833..bb7fafc 100644
--- a/builtin/diff.c
+++ b/builtin/diff.c
@@ -28,9 +28,9 @@ static const char builtin_diff_usage[] =
"git diff [<options>] [<commit>] [--] [<path>...]\n"
" or: git diff [<options>] --cached [--merge-base] [<commit>] [--] [<path>...]\n"
" or: git diff [<options>] [--merge-base] <commit> [<commit>...] <commit> [--] [<path>...]\n"
-" or: git diff [<options>] <commit>...<commit>] [--] [<path>...]\n"
-" or: git diff [<options>] <blob> <blob>]\n"
-" or: git diff [<options>] --no-index [--] <path> <path>]\n"
+" or: git diff [<options>] <commit>...<commit> [--] [<path>...]\n"
+" or: git diff [<options>] <blob> <blob>\n"
+" or: git diff [<options>] --no-index [--] <path> <path>\n"
COMMON_DIFF_OPTIONS_HELP;
static const char *blob_path(struct object_array_entry *entry)
diff --git a/builtin/difftool.c b/builtin/difftool.c
index c79fbbf..faa3507 100644
--- a/builtin/difftool.c
+++ b/builtin/difftool.c
@@ -732,8 +732,9 @@ int cmd_difftool(int argc, const char **argv, const char *prefix)
} else if (dir_diff)
die(_("options '%s' and '%s' cannot be used together"), "--dir-diff", "--no-index");
- if (use_gui_tool + !!difftool_cmd + !!extcmd > 1)
- die(_("options '%s', '%s', and '%s' cannot be used together"), "--gui", "--tool", "--extcmd");
+ die_for_incompatible_opt3(use_gui_tool, "--gui",
+ !!difftool_cmd, "--tool",
+ !!extcmd, "--extcmd");
if (use_gui_tool)
setenv("GIT_MERGETOOL_GUI", "true", 1);
diff --git a/builtin/fast-export.c b/builtin/fast-export.c
index 9f1c730..a7d7269 100644
--- a/builtin/fast-export.c
+++ b/builtin/fast-export.c
@@ -26,7 +26,7 @@
#include "commit-slab.h"
static const char *fast_export_usage[] = {
- N_("git fast-export [rev-list-opts]"),
+ N_("git fast-export [<rev-list-opts>]"),
NULL
};
@@ -300,7 +300,7 @@ static void export_blob(const struct object_id *oid)
if (!buf)
die("could not read blob %s", oid_to_hex(oid));
if (check_object_signature(the_repository, oid, buf, size,
- type_name(type), NULL) < 0)
+ type) < 0)
die("oid mismatch in blob %s", oid_to_hex(oid));
object = parse_object_buffer(the_repository, oid, type,
size, buf, &eaten);
diff --git a/builtin/fast-import.c b/builtin/fast-import.c
index 2b2e28b..28d3193 100644
--- a/builtin/fast-import.c
+++ b/builtin/fast-import.c
@@ -19,6 +19,7 @@
#include "mem-pool.h"
#include "commit-reach.h"
#include "khash.h"
+#include "date.h"
#define PACK_ID_BITS 16
#define MAX_PACK_ID ((1<<PACK_ID_BITS)-1)
@@ -176,8 +177,9 @@ static int global_argc;
static const char **global_argv;
/* Memory pools */
-static struct mem_pool fi_mem_pool = {NULL, 2*1024*1024 -
- sizeof(struct mp_block), 0 };
+static struct mem_pool fi_mem_pool = {
+ .block_alloc = 2*1024*1024 - sizeof(struct mp_block),
+};
/* Atom management */
static unsigned int atom_table_sz = 4451;
@@ -205,7 +207,9 @@ static int import_marks_file_done;
static int relative_marks_paths;
/* Our last blob */
-static struct last_object last_blob = { STRBUF_INIT, 0, 0, 0 };
+static struct last_object last_blob = {
+ .data = STRBUF_INIT,
+ };
/* Tree management */
static unsigned int tree_entry_alloc = 1000;
@@ -231,7 +235,10 @@ static struct tag *last_tag;
static whenspec_type whenspec = WHENSPEC_RAW;
static struct strbuf command_buf = STRBUF_INIT;
static int unread_command_buf;
-static struct recent_command cmd_hist = {&cmd_hist, &cmd_hist, NULL};
+static struct recent_command cmd_hist = {
+ .prev = &cmd_hist,
+ .next = &cmd_hist,
+};
static struct recent_command *cmd_tail = &cmd_hist;
static struct recent_command *rc_free;
static unsigned int cmd_save = 100;
@@ -858,7 +865,7 @@ static void end_packfile(void)
struct tag *t;
close_pack_windows(pack_data);
- finalize_hashfile(pack_file, cur_pack_oid.hash, 0);
+ finalize_hashfile(pack_file, cur_pack_oid.hash, FSYNC_COMPONENT_PACK, 0);
fixup_pack_header_footer(pack_data->pack_fd, pack_data->hash,
pack_data->pack_name, object_count,
cur_pack_oid.hash, pack_size);
@@ -937,8 +944,8 @@ static int store_object(
git_hash_ctx c;
git_zstream s;
- hdrlen = xsnprintf((char *)hdr, sizeof(hdr), "%s %lu",
- type_name(type), (unsigned long)dat->len) + 1;
+ hdrlen = format_object_header((char *)hdr, sizeof(hdr), type,
+ dat->len);
the_hash_algo->init_fn(&c);
the_hash_algo->update_fn(&c, hdr, hdrlen);
the_hash_algo->update_fn(&c, dat->buf, dat->len);
@@ -1091,7 +1098,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
hashfile_checkpoint(pack_file, &checkpoint);
offset = checkpoint.offset;
- hdrlen = xsnprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1;
+ hdrlen = format_object_header((char *)out_buf, out_sz, OBJ_BLOB, len);
the_hash_algo->init_fn(&c);
the_hash_algo->update_fn(&c, out_buf, hdrlen);
@@ -2483,7 +2490,7 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa
unsigned long size;
char *buf = read_object_with_reference(the_repository,
&commit_oid,
- commit_type, &size,
+ OBJ_COMMIT, &size,
&commit_oid);
if (!buf || size < the_hash_algo->hexsz + 6)
die("Not a valid commit: %s", p);
@@ -2555,7 +2562,7 @@ static void parse_from_existing(struct branch *b)
char *buf;
buf = read_object_with_reference(the_repository,
- &b->oid, commit_type, &size,
+ &b->oid, OBJ_COMMIT, &size,
&b->oid);
parse_from_commit(b, buf, size);
free(buf);
@@ -2651,7 +2658,7 @@ static struct hash_list *parse_merge(unsigned int *count)
unsigned long size;
char *buf = read_object_with_reference(the_repository,
&n->oid,
- commit_type,
+ OBJ_COMMIT,
&size, &n->oid);
if (!buf || size < the_hash_algo->hexsz + 6)
die("Not a valid commit: %s", from);
diff --git a/builtin/fetch-pack.c b/builtin/fetch-pack.c
index c2d96f4..f045bbb 100644
--- a/builtin/fetch-pack.c
+++ b/builtin/fetch-pack.c
@@ -153,11 +153,15 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
args.from_promisor = 1;
continue;
}
- if (skip_prefix(arg, ("--" CL_ARG__FILTER "="), &arg)) {
+ if (!strcmp("--refetch", arg)) {
+ args.refetch = 1;
+ continue;
+ }
+ if (skip_prefix(arg, ("--filter="), &arg)) {
parse_list_objects_filter(&args.filter_options, arg);
continue;
}
- if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) {
+ if (!strcmp(arg, ("--no-filter"))) {
list_objects_filter_set_no_filter(&args.filter_options);
continue;
}
diff --git a/builtin/fetch.c b/builtin/fetch.c
index 5f06b21..e3791f0 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -59,7 +59,7 @@ static int prune_tags = -1; /* unspecified */
static int all, append, dry_run, force, keep, multiple, update_head_ok;
static int write_fetch_head = 1;
-static int verbosity, deepen_relative, set_upstream;
+static int verbosity, deepen_relative, set_upstream, refetch;
static int progress = -1;
static int enable_auto_gc = 1;
static int tags = TAGS_DEFAULT, unshallow, update_shallow, deepen;
@@ -76,6 +76,7 @@ static struct transport *gtransport;
static struct transport *gsecondary;
static const char *submodule_prefix = "";
static int recurse_submodules = RECURSE_SUBMODULES_DEFAULT;
+static int recurse_submodules_cli = RECURSE_SUBMODULES_DEFAULT;
static int recurse_submodules_default = RECURSE_SUBMODULES_ON_DEMAND;
static int shown_url = 0;
static struct refspec refmap = REFSPEC_INIT_FETCH;
@@ -167,7 +168,7 @@ static struct option builtin_fetch_options[] = {
N_("prune remote-tracking branches no longer on remote")),
OPT_BOOL('P', "prune-tags", &prune_tags,
N_("prune local tags no longer on remote and clobber changed tags")),
- OPT_CALLBACK_F(0, "recurse-submodules", &recurse_submodules, N_("on-demand"),
+ OPT_CALLBACK_F(0, "recurse-submodules", &recurse_submodules_cli, N_("on-demand"),
N_("control recursive fetching of submodules"),
PARSE_OPT_OPTARG, option_fetch_parse_recurse_submodules),
OPT_BOOL(0, "dry-run", &dry_run,
@@ -189,6 +190,9 @@ static struct option builtin_fetch_options[] = {
OPT_SET_INT_F(0, "unshallow", &unshallow,
N_("convert to a complete repository"),
1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "refetch", &refetch,
+ N_("re-fetch without negotiating common commits"),
+ 1, PARSE_OPT_NONEG),
{ OPTION_STRING, 0, "submodule-prefix", &submodule_prefix, N_("dir"),
N_("prepend this to submodule path output"), PARSE_OPT_HIDDEN },
OPT_CALLBACK_F(0, "recurse-submodules-default",
@@ -348,7 +352,19 @@ static void clear_item(struct refname_hash_entry *item)
item->ignore = 1;
}
+
+static void add_already_queued_tags(const char *refname,
+ const struct object_id *old_oid,
+ const struct object_id *new_oid,
+ void *cb_data)
+{
+ struct hashmap *queued_tags = cb_data;
+ if (starts_with(refname, "refs/tags/") && new_oid)
+ (void) refname_hash_add(queued_tags, refname, new_oid);
+}
+
static void find_non_local_tags(const struct ref *refs,
+ struct ref_transaction *transaction,
struct ref **head,
struct ref ***tail)
{
@@ -366,6 +382,16 @@ static void find_non_local_tags(const struct ref *refs,
create_fetch_oidset(head, &fetch_oids);
for_each_ref(add_one_refname, &existing_refs);
+
+ /*
+ * If we already have a transaction, then we need to filter out all
+ * tags which have already been queued up.
+ */
+ if (transaction)
+ ref_transaction_for_each_queued_update(transaction,
+ add_already_queued_tags,
+ &existing_refs);
+
for (ref = refs; ref; ref = ref->next) {
if (!starts_with(ref->name, "refs/tags/"))
continue;
@@ -599,7 +625,7 @@ static struct ref *get_ref_map(struct remote *remote,
/* also fetch all tags */
get_fetch_map(remote_refs, tag_refspec, &tail, 0);
else if (tags == TAGS_DEFAULT && *autotags)
- find_non_local_tags(remote_refs, &ref_map, &tail);
+ find_non_local_tags(remote_refs, NULL, &ref_map, &tail);
/* Now append any refs to be updated opportunistically: */
*tail = orefs;
@@ -763,8 +789,8 @@ static void prepare_format_display(struct ref *ref_map)
else if (!strcasecmp(format, "compact"))
compact_format = 1;
else
- die(_("configuration fetch.output contains invalid value %s"),
- format);
+ die(_("invalid value for '%s': '%s'"),
+ "fetch.output", format);
for (rm = ref_map; rm; rm = rm->next) {
if (rm->status == REF_STATUS_REJECT_SHALLOW ||
@@ -1082,22 +1108,20 @@ N_("it took %.2f seconds to check forced updates; you can use\n"
"to avoid this check\n");
static int store_updated_refs(const char *raw_url, const char *remote_name,
- int connectivity_checked, struct ref *ref_map,
- struct worktree **worktrees)
+ int connectivity_checked,
+ struct ref_transaction *transaction, struct ref *ref_map,
+ struct fetch_head *fetch_head, struct worktree **worktrees)
{
- struct fetch_head fetch_head;
int url_len, i, rc = 0;
struct strbuf note = STRBUF_INIT, err = STRBUF_INIT;
- struct ref_transaction *transaction = NULL;
const char *what, *kind;
struct ref *rm;
char *url;
int want_status;
- int summary_width = transport_summary_width(ref_map);
+ int summary_width = 0;
- rc = open_fetch_head(&fetch_head);
- if (rc)
- return -1;
+ if (verbosity >= 0)
+ summary_width = transport_summary_width(ref_map);
if (raw_url)
url = transport_anonymize_url(raw_url);
@@ -1114,14 +1138,6 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
}
}
- if (atomic_fetch) {
- transaction = ref_transaction_begin(&err);
- if (!transaction) {
- error("%s", err.buf);
- goto abort;
- }
- }
-
prepare_format_display(ref_map);
/*
@@ -1133,7 +1149,6 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
want_status <= FETCH_HEAD_IGNORE;
want_status++) {
for (rm = ref_map; rm; rm = rm->next) {
- struct commit *commit = NULL;
struct ref *ref = NULL;
if (rm->status == REF_STATUS_REJECT_SHALLOW) {
@@ -1144,21 +1159,34 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
}
/*
- * References in "refs/tags/" are often going to point
- * to annotated tags, which are not part of the
- * commit-graph. We thus only try to look up refs in
- * the graph which are not in that namespace to not
- * regress performance in repositories with many
- * annotated tags.
+ * When writing FETCH_HEAD we need to determine whether
+ * we already have the commit or not. If not, then the
+ * reference is not for merge and needs to be written
+ * to the reflog after other commits which we already
+ * have. We're not interested in this property though
+ * in case FETCH_HEAD is not to be updated, so we can
+ * skip the classification in that case.
*/
- if (!starts_with(rm->name, "refs/tags/"))
- commit = lookup_commit_in_graph(the_repository, &rm->old_oid);
- if (!commit) {
- commit = lookup_commit_reference_gently(the_repository,
- &rm->old_oid,
- 1);
- if (!commit)
- rm->fetch_head_status = FETCH_HEAD_NOT_FOR_MERGE;
+ if (fetch_head->fp) {
+ struct commit *commit = NULL;
+
+ /*
+ * References in "refs/tags/" are often going to point
+ * to annotated tags, which are not part of the
+ * commit-graph. We thus only try to look up refs in
+ * the graph which are not in that namespace to not
+ * regress performance in repositories with many
+ * annotated tags.
+ */
+ if (!starts_with(rm->name, "refs/tags/"))
+ commit = lookup_commit_in_graph(the_repository, &rm->old_oid);
+ if (!commit) {
+ commit = lookup_commit_reference_gently(the_repository,
+ &rm->old_oid,
+ 1);
+ if (!commit)
+ rm->fetch_head_status = FETCH_HEAD_NOT_FOR_MERGE;
+ }
}
if (rm->fetch_head_status != want_status)
@@ -1205,7 +1233,7 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
strbuf_addf(&note, "'%s' of ", what);
}
- append_fetch_head(&fetch_head, &rm->old_oid,
+ append_fetch_head(fetch_head, &rm->old_oid,
rm->fetch_head_status,
note.buf, url, url_len);
@@ -1237,17 +1265,6 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
}
}
- if (!rc && transaction) {
- rc = ref_transaction_commit(transaction, &err);
- if (rc) {
- error("%s", err.buf);
- goto abort;
- }
- }
-
- if (!rc)
- commit_fetch_head(&fetch_head);
-
if (rc & STORE_REF_ERROR_DF_CONFLICT)
error(_("some local refs could not be updated; try running\n"
" 'git remote prune %s' to remove any old, conflicting "
@@ -1265,9 +1282,7 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
abort:
strbuf_release(&note);
strbuf_release(&err);
- ref_transaction_free(transaction);
free(url);
- close_fetch_head(&fetch_head);
return rc;
}
@@ -1293,6 +1308,14 @@ static int check_exist_and_connected(struct ref *ref_map)
return -1;
/*
+ * Similarly, if we need to refetch, we always want to perform a full
+ * fetch ignoring existing objects.
+ */
+ if (refetch)
+ return -1;
+
+
+ /*
* check_connected() allows objects to merely be promised, but
* we need all direct targets to exist.
*/
@@ -1307,7 +1330,9 @@ static int check_exist_and_connected(struct ref *ref_map)
}
static int fetch_and_consume_refs(struct transport *transport,
+ struct ref_transaction *transaction,
struct ref *ref_map,
+ struct fetch_head *fetch_head,
struct worktree **worktrees)
{
int connectivity_checked = 1;
@@ -1330,7 +1355,8 @@ static int fetch_and_consume_refs(struct transport *transport,
trace2_region_enter("fetch", "consume_refs", the_repository);
ret = store_updated_refs(transport->url, transport->remote->name,
- connectivity_checked, ref_map, worktrees);
+ connectivity_checked, transaction, ref_map,
+ fetch_head, worktrees);
trace2_region_leave("fetch", "consume_refs", the_repository);
out:
@@ -1338,13 +1364,15 @@ out:
return ret;
}
-static int prune_refs(struct refspec *rs, struct ref *ref_map,
+static int prune_refs(struct refspec *rs,
+ struct ref_transaction *transaction,
+ struct ref *ref_map,
const char *raw_url)
{
int url_len, i, result = 0;
struct ref *ref, *stale_refs = get_stale_heads(rs, ref_map);
+ struct strbuf err = STRBUF_INIT;
char *url;
- int summary_width = transport_summary_width(stale_refs);
const char *dangling_msg = dry_run
? _(" (%s will become dangling)")
: _(" (%s has become dangling)");
@@ -1363,16 +1391,27 @@ static int prune_refs(struct refspec *rs, struct ref *ref_map,
url_len = i - 3;
if (!dry_run) {
- struct string_list refnames = STRING_LIST_INIT_NODUP;
+ if (transaction) {
+ for (ref = stale_refs; ref; ref = ref->next) {
+ result = ref_transaction_delete(transaction, ref->name, NULL, 0,
+ "fetch: prune", &err);
+ if (result)
+ goto cleanup;
+ }
+ } else {
+ struct string_list refnames = STRING_LIST_INIT_NODUP;
- for (ref = stale_refs; ref; ref = ref->next)
- string_list_append(&refnames, ref->name);
+ for (ref = stale_refs; ref; ref = ref->next)
+ string_list_append(&refnames, ref->name);
- result = delete_refs("fetch: prune", &refnames, 0);
- string_list_clear(&refnames, 0);
+ result = delete_refs("fetch: prune", &refnames, 0);
+ string_list_clear(&refnames, 0);
+ }
}
if (verbosity >= 0) {
+ int summary_width = transport_summary_width(stale_refs);
+
for (ref = stale_refs; ref; ref = ref->next) {
struct strbuf sb = STRBUF_INIT;
if (!shown_url) {
@@ -1388,6 +1427,8 @@ static int prune_refs(struct refspec *rs, struct ref *ref_map,
}
}
+cleanup:
+ strbuf_release(&err);
free(url);
free_refs(stale_refs);
return result;
@@ -1487,6 +1528,8 @@ static struct transport *prepare_transport(struct remote *remote, int deepen)
set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, "yes");
if (update_shallow)
set_option(transport, TRANS_OPT_UPDATE_SHALLOW, "yes");
+ if (refetch)
+ set_option(transport, TRANS_OPT_REFETCH, "yes");
if (filter_options.choice) {
const char *spec =
expand_list_objects_filter_spec(&filter_options);
@@ -1502,10 +1545,13 @@ static struct transport *prepare_transport(struct remote *remote, int deepen)
return transport;
}
-static void backfill_tags(struct transport *transport, struct ref *ref_map,
- struct worktree **worktrees)
+static int backfill_tags(struct transport *transport,
+ struct ref_transaction *transaction,
+ struct ref *ref_map,
+ struct fetch_head *fetch_head,
+ struct worktree **worktrees)
{
- int cannot_reuse;
+ int retcode, cannot_reuse;
/*
* Once we have set TRANS_OPT_DEEPEN_SINCE, we can't unset it
@@ -1524,18 +1570,21 @@ static void backfill_tags(struct transport *transport, struct ref *ref_map,
transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, NULL);
transport_set_option(transport, TRANS_OPT_DEPTH, "0");
transport_set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, NULL);
- fetch_and_consume_refs(transport, ref_map, worktrees);
+ retcode = fetch_and_consume_refs(transport, transaction, ref_map, fetch_head, worktrees);
if (gsecondary) {
transport_disconnect(gsecondary);
gsecondary = NULL;
}
+
+ return retcode;
}
static int do_fetch(struct transport *transport,
struct refspec *rs)
{
- struct ref *ref_map;
+ struct ref_transaction *transaction = NULL;
+ struct ref *ref_map = NULL;
int autotags = (transport->remote->fetch_tags == 1);
int retcode = 0;
const struct ref *remote_refs;
@@ -1543,6 +1592,8 @@ static int do_fetch(struct transport *transport,
TRANSPORT_LS_REFS_OPTIONS_INIT;
int must_list_refs = 1;
struct worktree **worktrees = get_worktrees();
+ struct fetch_head fetch_head = { 0 };
+ struct strbuf err = STRBUF_INIT;
if (tags == TAGS_DEFAULT) {
if (transport->remote->fetch_tags == 2)
@@ -1593,13 +1644,25 @@ static int do_fetch(struct transport *transport,
} else
remote_refs = NULL;
- strvec_clear(&transport_ls_refs_options.ref_prefixes);
+ transport_ls_refs_options_release(&transport_ls_refs_options);
ref_map = get_ref_map(transport->remote, remote_refs, rs,
tags, &autotags);
if (!update_head_ok)
check_not_current_branch(ref_map, worktrees);
+ retcode = open_fetch_head(&fetch_head);
+ if (retcode)
+ goto cleanup;
+
+ if (atomic_fetch) {
+ transaction = ref_transaction_begin(&err);
+ if (!transaction) {
+ retcode = error("%s", err.buf);
+ goto cleanup;
+ }
+ }
+
if (tags == TAGS_DEFAULT && autotags)
transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, "1");
if (prune) {
@@ -1609,19 +1672,61 @@ static int do_fetch(struct transport *transport,
* don't care whether --tags was specified.
*/
if (rs->nr) {
- prune_refs(rs, ref_map, transport->url);
+ retcode = prune_refs(rs, transaction, ref_map, transport->url);
} else {
- prune_refs(&transport->remote->fetch,
- ref_map,
- transport->url);
+ retcode = prune_refs(&transport->remote->fetch,
+ transaction, ref_map,
+ transport->url);
}
+ if (retcode != 0)
+ retcode = 1;
}
- if (fetch_and_consume_refs(transport, ref_map, worktrees)) {
- free_refs(ref_map);
+
+ if (fetch_and_consume_refs(transport, transaction, ref_map, &fetch_head, worktrees)) {
retcode = 1;
goto cleanup;
}
+ /*
+ * If neither --no-tags nor --tags was specified, do automated tag
+ * following.
+ */
+ if (tags == TAGS_DEFAULT && autotags) {
+ struct ref *tags_ref_map = NULL, **tail = &tags_ref_map;
+
+ find_non_local_tags(remote_refs, transaction, &tags_ref_map, &tail);
+ if (tags_ref_map) {
+ /*
+ * If backfilling of tags fails then we want to tell
+ * the user so, but we have to continue regardless to
+ * populate upstream information of the references we
+ * have already fetched above. The exception though is
+ * when `--atomic` is passed: in that case we'll abort
+ * the transaction and don't commit anything.
+ */
+ if (backfill_tags(transport, transaction, tags_ref_map,
+ &fetch_head, worktrees))
+ retcode = 1;
+ }
+
+ free_refs(tags_ref_map);
+ }
+
+ if (transaction) {
+ if (retcode)
+ goto cleanup;
+
+ retcode = ref_transaction_commit(transaction, &err);
+ if (retcode) {
+ error("%s", err.buf);
+ ref_transaction_free(transaction);
+ transaction = NULL;
+ goto cleanup;
+ }
+ }
+
+ commit_fetch_head(&fetch_head);
+
if (set_upstream) {
struct branch *branch = branch_get("HEAD");
struct ref *rm;
@@ -1641,7 +1746,7 @@ static int do_fetch(struct transport *transport,
if (!rm->peer_ref) {
if (source_ref) {
warning(_("multiple branches detected, incompatible with --set-upstream"));
- goto skip;
+ goto cleanup;
} else {
source_ref = rm;
}
@@ -1655,7 +1760,7 @@ static int do_fetch(struct transport *transport,
warning(_("could not set upstream of HEAD to '%s' from '%s' when "
"it does not point to any branch."),
shortname, transport->remote->name);
- goto skip;
+ goto cleanup;
}
if (!strcmp(source_ref->name, "HEAD") ||
@@ -1675,21 +1780,16 @@ static int do_fetch(struct transport *transport,
"you need to specify exactly one branch with the --set-upstream option"));
}
}
-skip:
- free_refs(ref_map);
- /* if neither --no-tags nor --tags was specified, do automated tag
- * following ... */
- if (tags == TAGS_DEFAULT && autotags) {
- struct ref **tail = &ref_map;
- ref_map = NULL;
- find_non_local_tags(remote_refs, &ref_map, &tail);
- if (ref_map)
- backfill_tags(transport, ref_map, worktrees);
- free_refs(ref_map);
+cleanup:
+ if (retcode && transaction) {
+ ref_transaction_abort(transaction, &err);
+ error("%s", err.buf);
}
-cleanup:
+ close_fetch_head(&fetch_head);
+ strbuf_release(&err);
+ free_refs(ref_map);
free_worktrees(worktrees);
return retcode;
}
@@ -2014,11 +2114,35 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
}
git_config(git_fetch_config, NULL);
- prepare_repo_settings(the_repository);
- the_repository->settings.command_requires_full_index = 0;
+ if (the_repository->gitdir) {
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+ }
argc = parse_options(argc, argv, prefix,
builtin_fetch_options, builtin_fetch_usage, 0);
+
+ if (recurse_submodules_cli != RECURSE_SUBMODULES_DEFAULT)
+ recurse_submodules = recurse_submodules_cli;
+
+ if (negotiate_only) {
+ switch (recurse_submodules_cli) {
+ case RECURSE_SUBMODULES_OFF:
+ case RECURSE_SUBMODULES_DEFAULT:
+ /*
+ * --negotiate-only should never recurse into
+ * submodules. Skip it by setting recurse_submodules to
+ * RECURSE_SUBMODULES_OFF.
+ */
+ recurse_submodules = RECURSE_SUBMODULES_OFF;
+ break;
+
+ default:
+ die(_("options '%s' and '%s' cannot be used together"),
+ "--negotiate-only", "--recurse-submodules");
+ }
+ }
+
if (recurse_submodules != RECURSE_SUBMODULES_OFF) {
int *sfjc = submodule_fetch_jobs_config == -1
? &submodule_fetch_jobs_config : NULL;
@@ -2029,7 +2153,7 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
}
if (negotiate_only && !negotiation_tip.nr)
- die(_("--negotiate-only needs one or more --negotiate-tip=*"));
+ die(_("--negotiate-only needs one or more --negotiation-tip=*"));
if (deepen_relative) {
if (deepen_relative < 0)
@@ -2100,7 +2224,8 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
gtransport->smart_options->acked_commits = &acked_commits;
} else {
warning(_("protocol does not support --negotiate-only, exiting"));
- return 1;
+ result = 1;
+ goto cleanup;
}
if (server_options.nr)
gtransport->server_options = &server_options;
@@ -2146,17 +2271,26 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
max_children = fetch_parallel_config;
add_options_to_argv(&options);
- result = fetch_populated_submodules(the_repository,
- &options,
- submodule_prefix,
- recurse_submodules,
- recurse_submodules_default,
- verbosity < 0,
- max_children);
+ result = fetch_submodules(the_repository,
+ &options,
+ submodule_prefix,
+ recurse_submodules,
+ recurse_submodules_default,
+ verbosity < 0,
+ max_children);
strvec_clear(&options);
}
- string_list_clear(&list, 0);
+ /*
+ * Skip irrelevant tasks because we know objects were not
+ * fetched.
+ *
+ * NEEDSWORK: as a future optimization, we can return early
+ * whenever objects were not fetched e.g. if we already have all
+ * of them.
+ */
+ if (negotiate_only)
+ goto cleanup;
prepare_repo_settings(the_repository);
if (fetch_write_commit_graph > 0 ||
@@ -2172,8 +2306,27 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
NULL);
}
- if (enable_auto_gc)
+ if (enable_auto_gc) {
+ if (refetch) {
+ /*
+ * Hint auto-maintenance strongly to encourage repacking,
+ * but respect config settings disabling it.
+ */
+ int opt_val;
+ if (git_config_get_int("gc.autopacklimit", &opt_val))
+ opt_val = -1;
+ if (opt_val != 0)
+ git_config_push_parameter("gc.autoPackLimit=1");
+
+ if (git_config_get_int("maintenance.incremental-repack.auto", &opt_val))
+ opt_val = -1;
+ if (opt_val != 0)
+ git_config_push_parameter("maintenance.incremental-repack.auto=-1");
+ }
run_auto_maintenance(verbosity < 0);
+ }
+ cleanup:
+ string_list_clear(&list, 0);
return result;
}
diff --git a/builtin/fsmonitor--daemon.c b/builtin/fsmonitor--daemon.c
new file mode 100644
index 0000000..46be55a
--- /dev/null
+++ b/builtin/fsmonitor--daemon.c
@@ -0,0 +1,1479 @@
+#include "builtin.h"
+#include "config.h"
+#include "parse-options.h"
+#include "fsmonitor.h"
+#include "fsmonitor-ipc.h"
+#include "compat/fsmonitor/fsm-listen.h"
+#include "fsmonitor--daemon.h"
+#include "simple-ipc.h"
+#include "khash.h"
+#include "pkt-line.h"
+
+static const char * const builtin_fsmonitor__daemon_usage[] = {
+ N_("git fsmonitor--daemon start [<options>]"),
+ N_("git fsmonitor--daemon run [<options>]"),
+ N_("git fsmonitor--daemon stop"),
+ N_("git fsmonitor--daemon status"),
+ NULL
+};
+
+#ifdef HAVE_FSMONITOR_DAEMON_BACKEND
+/*
+ * Global state loaded from config.
+ */
+#define FSMONITOR__IPC_THREADS "fsmonitor.ipcthreads"
+static int fsmonitor__ipc_threads = 8;
+
+#define FSMONITOR__START_TIMEOUT "fsmonitor.starttimeout"
+static int fsmonitor__start_timeout_sec = 60;
+
+#define FSMONITOR__ANNOUNCE_STARTUP "fsmonitor.announcestartup"
+static int fsmonitor__announce_startup = 0;
+
+static int fsmonitor_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, FSMONITOR__IPC_THREADS)) {
+ int i = git_config_int(var, value);
+ if (i < 1)
+ return error(_("value of '%s' out of range: %d"),
+ FSMONITOR__IPC_THREADS, i);
+ fsmonitor__ipc_threads = i;
+ return 0;
+ }
+
+ if (!strcmp(var, FSMONITOR__START_TIMEOUT)) {
+ int i = git_config_int(var, value);
+ if (i < 0)
+ return error(_("value of '%s' out of range: %d"),
+ FSMONITOR__START_TIMEOUT, i);
+ fsmonitor__start_timeout_sec = i;
+ return 0;
+ }
+
+ if (!strcmp(var, FSMONITOR__ANNOUNCE_STARTUP)) {
+ int is_bool;
+ int i = git_config_bool_or_int(var, value, &is_bool);
+ if (i < 0)
+ return error(_("value of '%s' not bool or int: %d"),
+ var, i);
+ fsmonitor__announce_startup = i;
+ return 0;
+ }
+
+ return git_default_config(var, value, cb);
+}
+
+/*
+ * Acting as a CLIENT.
+ *
+ * Send a "quit" command to the `git-fsmonitor--daemon` (if running)
+ * and wait for it to shutdown.
+ */
+static int do_as_client__send_stop(void)
+{
+ struct strbuf answer = STRBUF_INIT;
+ int ret;
+
+ ret = fsmonitor_ipc__send_command("quit", &answer);
+
+ /* The quit command does not return any response data. */
+ strbuf_release(&answer);
+
+ if (ret)
+ return ret;
+
+ trace2_region_enter("fsm_client", "polling-for-daemon-exit", NULL);
+ while (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
+ sleep_millisec(50);
+ trace2_region_leave("fsm_client", "polling-for-daemon-exit", NULL);
+
+ return 0;
+}
+
+static int do_as_client__status(void)
+{
+ enum ipc_active_state state = fsmonitor_ipc__get_state();
+
+ switch (state) {
+ case IPC_STATE__LISTENING:
+ printf(_("fsmonitor-daemon is watching '%s'\n"),
+ the_repository->worktree);
+ return 0;
+
+ default:
+ printf(_("fsmonitor-daemon is not watching '%s'\n"),
+ the_repository->worktree);
+ return 1;
+ }
+}
+
+enum fsmonitor_cookie_item_result {
+ FCIR_ERROR = -1, /* could not create cookie file ? */
+ FCIR_INIT,
+ FCIR_SEEN,
+ FCIR_ABORT,
+};
+
+struct fsmonitor_cookie_item {
+ struct hashmap_entry entry;
+ char *name;
+ enum fsmonitor_cookie_item_result result;
+};
+
+static int cookies_cmp(const void *data, const struct hashmap_entry *he1,
+ const struct hashmap_entry *he2, const void *keydata)
+{
+ const struct fsmonitor_cookie_item *a =
+ container_of(he1, const struct fsmonitor_cookie_item, entry);
+ const struct fsmonitor_cookie_item *b =
+ container_of(he2, const struct fsmonitor_cookie_item, entry);
+
+ return strcmp(a->name, keydata ? keydata : b->name);
+}
+
+static enum fsmonitor_cookie_item_result with_lock__wait_for_cookie(
+ struct fsmonitor_daemon_state *state)
+{
+ /* assert current thread holding state->main_lock */
+
+ int fd;
+ struct fsmonitor_cookie_item *cookie;
+ struct strbuf cookie_pathname = STRBUF_INIT;
+ struct strbuf cookie_filename = STRBUF_INIT;
+ enum fsmonitor_cookie_item_result result;
+ int my_cookie_seq;
+
+ CALLOC_ARRAY(cookie, 1);
+
+ my_cookie_seq = state->cookie_seq++;
+
+ strbuf_addf(&cookie_filename, "%i-%i", getpid(), my_cookie_seq);
+
+ strbuf_addbuf(&cookie_pathname, &state->path_cookie_prefix);
+ strbuf_addbuf(&cookie_pathname, &cookie_filename);
+
+ cookie->name = strbuf_detach(&cookie_filename, NULL);
+ cookie->result = FCIR_INIT;
+ hashmap_entry_init(&cookie->entry, strhash(cookie->name));
+
+ hashmap_add(&state->cookies, &cookie->entry);
+
+ trace_printf_key(&trace_fsmonitor, "cookie-wait: '%s' '%s'",
+ cookie->name, cookie_pathname.buf);
+
+ /*
+ * Create the cookie file on disk and then wait for a notification
+ * that the listener thread has seen it.
+ */
+ fd = open(cookie_pathname.buf, O_WRONLY | O_CREAT | O_EXCL, 0600);
+ if (fd < 0) {
+ error_errno(_("could not create fsmonitor cookie '%s'"),
+ cookie->name);
+
+ cookie->result = FCIR_ERROR;
+ goto done;
+ }
+
+ /*
+ * Technically, close() and unlink() can fail, but we don't
+ * care here. We only created the file to trigger a watch
+ * event from the FS to know that when we're up to date.
+ */
+ close(fd);
+ unlink(cookie_pathname.buf);
+
+ /*
+ * Technically, this is an infinite wait (well, unless another
+ * thread sends us an abort). I'd like to change this to
+ * use `pthread_cond_timedwait()` and return an error/timeout
+ * and let the caller do the trivial response thing, but we
+ * don't have that routine in our thread-utils.
+ *
+ * After extensive beta testing I'm not really worried about
+ * this. Also note that the above open() and unlink() calls
+ * will cause at least two FS events on that path, so the odds
+ * of getting stuck are pretty slim.
+ */
+ while (cookie->result == FCIR_INIT)
+ pthread_cond_wait(&state->cookies_cond,
+ &state->main_lock);
+
+done:
+ hashmap_remove(&state->cookies, &cookie->entry, NULL);
+
+ result = cookie->result;
+
+ free(cookie->name);
+ free(cookie);
+ strbuf_release(&cookie_pathname);
+
+ return result;
+}
+
+/*
+ * Mark these cookies as _SEEN and wake up the corresponding client threads.
+ */
+static void with_lock__mark_cookies_seen(struct fsmonitor_daemon_state *state,
+ const struct string_list *cookie_names)
+{
+ /* assert current thread holding state->main_lock */
+
+ int k;
+ int nr_seen = 0;
+
+ for (k = 0; k < cookie_names->nr; k++) {
+ struct fsmonitor_cookie_item key;
+ struct fsmonitor_cookie_item *cookie;
+
+ key.name = cookie_names->items[k].string;
+ hashmap_entry_init(&key.entry, strhash(key.name));
+
+ cookie = hashmap_get_entry(&state->cookies, &key, entry, NULL);
+ if (cookie) {
+ trace_printf_key(&trace_fsmonitor, "cookie-seen: '%s'",
+ cookie->name);
+ cookie->result = FCIR_SEEN;
+ nr_seen++;
+ }
+ }
+
+ if (nr_seen)
+ pthread_cond_broadcast(&state->cookies_cond);
+}
+
+/*
+ * Set _ABORT on all pending cookies and wake up all client threads.
+ */
+static void with_lock__abort_all_cookies(struct fsmonitor_daemon_state *state)
+{
+ /* assert current thread holding state->main_lock */
+
+ struct hashmap_iter iter;
+ struct fsmonitor_cookie_item *cookie;
+ int nr_aborted = 0;
+
+ hashmap_for_each_entry(&state->cookies, &iter, cookie, entry) {
+ trace_printf_key(&trace_fsmonitor, "cookie-abort: '%s'",
+ cookie->name);
+ cookie->result = FCIR_ABORT;
+ nr_aborted++;
+ }
+
+ if (nr_aborted)
+ pthread_cond_broadcast(&state->cookies_cond);
+}
+
+/*
+ * Requests to and from a FSMonitor Protocol V2 provider use an opaque
+ * "token" as a virtual timestamp. Clients can request a summary of all
+ * created/deleted/modified files relative to a token. In the response,
+ * clients receive a new token for the next (relative) request.
+ *
+ *
+ * Token Format
+ * ============
+ *
+ * The contents of the token are private and provider-specific.
+ *
+ * For the built-in fsmonitor--daemon, we define a token as follows:
+ *
+ * "builtin" ":" <token_id> ":" <sequence_nr>
+ *
+ * The "builtin" prefix is used as a namespace to avoid conflicts
+ * with other providers (such as Watchman).
+ *
+ * The <token_id> is an arbitrary OPAQUE string, such as a GUID,
+ * UUID, or {timestamp,pid}. It is used to group all filesystem
+ * events that happened while the daemon was monitoring (and in-sync
+ * with the filesystem).
+ *
+ * Unlike FSMonitor Protocol V1, it is not defined as a timestamp
+ * and does not define less-than/greater-than relationships.
+ * (There are too many race conditions to rely on file system
+ * event timestamps.)
+ *
+ * The <sequence_nr> is a simple integer incremented whenever the
+ * daemon needs to make its state public. For example, if 1000 file
+ * system events come in, but no clients have requested the data,
+ * the daemon can continue to accumulate file changes in the same
+ * bin and does not need to advance the sequence number. However,
+ * as soon as a client does arrive, the daemon needs to start a new
+ * bin and increment the sequence number.
+ *
+ * The sequence number serves as the boundary between 2 sets
+ * of bins -- the older ones that the client has already seen
+ * and the newer ones that it hasn't.
+ *
+ * When a new <token_id> is created, the <sequence_nr> is reset to
+ * zero.
+ *
+ *
+ * About Token Ids
+ * ===============
+ *
+ * A new token_id is created:
+ *
+ * [1] each time the daemon is started.
+ *
+ * [2] any time that the daemon must re-sync with the filesystem
+ * (such as when the kernel drops or we miss events on a very
+ * active volume).
+ *
+ * [3] in response to a client "flush" command (for dropped event
+ * testing).
+ *
+ * When a new token_id is created, the daemon is free to discard all
+ * cached filesystem events associated with any previous token_ids.
+ * Events associated with a non-current token_id will never be sent
+ * to a client. A token_id change implicitly means that the daemon
+ * has gap in its event history.
+ *
+ * Therefore, clients that present a token with a stale (non-current)
+ * token_id will always be given a trivial response.
+ */
+struct fsmonitor_token_data {
+ struct strbuf token_id;
+ struct fsmonitor_batch *batch_head;
+ struct fsmonitor_batch *batch_tail;
+ uint64_t client_ref_count;
+};
+
+struct fsmonitor_batch {
+ struct fsmonitor_batch *next;
+ uint64_t batch_seq_nr;
+ const char **interned_paths;
+ size_t nr, alloc;
+ time_t pinned_time;
+};
+
+static struct fsmonitor_token_data *fsmonitor_new_token_data(void)
+{
+ static int test_env_value = -1;
+ static uint64_t flush_count = 0;
+ struct fsmonitor_token_data *token;
+ struct fsmonitor_batch *batch;
+
+ CALLOC_ARRAY(token, 1);
+ batch = fsmonitor_batch__new();
+
+ strbuf_init(&token->token_id, 0);
+ token->batch_head = batch;
+ token->batch_tail = batch;
+ token->client_ref_count = 0;
+
+ if (test_env_value < 0)
+ test_env_value = git_env_bool("GIT_TEST_FSMONITOR_TOKEN", 0);
+
+ if (!test_env_value) {
+ struct timeval tv;
+ struct tm tm;
+ time_t secs;
+
+ gettimeofday(&tv, NULL);
+ secs = tv.tv_sec;
+ gmtime_r(&secs, &tm);
+
+ strbuf_addf(&token->token_id,
+ "%"PRIu64".%d.%4d%02d%02dT%02d%02d%02d.%06ldZ",
+ flush_count++,
+ getpid(),
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec,
+ (long)tv.tv_usec);
+ } else {
+ strbuf_addf(&token->token_id, "test_%08x", test_env_value++);
+ }
+
+ /*
+ * We created a new <token_id> and are starting a new series
+ * of tokens with a zero <seq_nr>.
+ *
+ * Since clients cannot guess our new (non test) <token_id>
+ * they will always receive a trivial response (because of the
+ * mismatch on the <token_id>). The trivial response will
+ * tell them our new <token_id> so that subsequent requests
+ * will be relative to our new series. (And when sending that
+ * response, we pin the current head of the batch list.)
+ *
+ * Even if the client correctly guesses the <token_id>, their
+ * request of "builtin:<token_id>:0" asks for all changes MORE
+ * RECENT than batch/bin 0.
+ *
+ * This implies that it is a waste to accumulate paths in the
+ * initial batch/bin (because they will never be transmitted).
+ *
+ * So the daemon could be running for days and watching the
+ * file system, but doesn't need to actually accumulate any
+ * paths UNTIL we need to set a reference point for a later
+ * relative request.
+ *
+ * However, it is very useful for testing to always have a
+ * reference point set. Pin batch 0 to force early file system
+ * events to accumulate.
+ */
+ if (test_env_value)
+ batch->pinned_time = time(NULL);
+
+ return token;
+}
+
+struct fsmonitor_batch *fsmonitor_batch__new(void)
+{
+ struct fsmonitor_batch *batch;
+
+ CALLOC_ARRAY(batch, 1);
+
+ return batch;
+}
+
+void fsmonitor_batch__free_list(struct fsmonitor_batch *batch)
+{
+ while (batch) {
+ struct fsmonitor_batch *next = batch->next;
+
+ /*
+ * The actual strings within the array of this batch
+ * are interned, so we don't own them. We only own
+ * the array.
+ */
+ free(batch->interned_paths);
+ free(batch);
+
+ batch = next;
+ }
+}
+
+void fsmonitor_batch__add_path(struct fsmonitor_batch *batch,
+ const char *path)
+{
+ const char *interned_path = strintern(path);
+
+ trace_printf_key(&trace_fsmonitor, "event: %s", interned_path);
+
+ ALLOC_GROW(batch->interned_paths, batch->nr + 1, batch->alloc);
+ batch->interned_paths[batch->nr++] = interned_path;
+}
+
+static void fsmonitor_batch__combine(struct fsmonitor_batch *batch_dest,
+ const struct fsmonitor_batch *batch_src)
+{
+ size_t k;
+
+ ALLOC_GROW(batch_dest->interned_paths,
+ batch_dest->nr + batch_src->nr + 1,
+ batch_dest->alloc);
+
+ for (k = 0; k < batch_src->nr; k++)
+ batch_dest->interned_paths[batch_dest->nr++] =
+ batch_src->interned_paths[k];
+}
+
+/*
+ * To keep the batch list from growing unbounded in response to filesystem
+ * activity, we try to truncate old batches from the end of the list as
+ * they become irrelevant.
+ *
+ * We assume that the .git/index will be updated with the most recent token
+ * any time the index is updated. And future commands will only ask for
+ * recent changes *since* that new token. So as tokens advance into the
+ * future, older batch items will never be requested/needed. So we can
+ * truncate them without loss of functionality.
+ *
+ * However, multiple commands may be talking to the daemon concurrently
+ * or perform a slow command, so a little "token skew" is possible.
+ * Therefore, we want this to be a little bit lazy and have a generous
+ * delay.
+ *
+ * The current reader thread walked backwards in time from `token->batch_head`
+ * back to `batch_marker` somewhere in the middle of the batch list.
+ *
+ * Let's walk backwards in time from that marker an arbitrary delay
+ * and truncate the list there. Note that these timestamps are completely
+ * artificial (based on when we pinned the batch item) and not on any
+ * filesystem activity.
+ *
+ * Return the obsolete portion of the list after we have removed it from
+ * the official list so that the caller can free it after leaving the lock.
+ */
+#define MY_TIME_DELAY_SECONDS (5 * 60) /* seconds */
+
+static struct fsmonitor_batch *with_lock__truncate_old_batches(
+ struct fsmonitor_daemon_state *state,
+ const struct fsmonitor_batch *batch_marker)
+{
+ /* assert current thread holding state->main_lock */
+
+ const struct fsmonitor_batch *batch;
+ struct fsmonitor_batch *remainder;
+
+ if (!batch_marker)
+ return NULL;
+
+ trace_printf_key(&trace_fsmonitor, "Truncate: mark (%"PRIu64",%"PRIu64")",
+ batch_marker->batch_seq_nr,
+ (uint64_t)batch_marker->pinned_time);
+
+ for (batch = batch_marker; batch; batch = batch->next) {
+ time_t t;
+
+ if (!batch->pinned_time) /* an overflow batch */
+ continue;
+
+ t = batch->pinned_time + MY_TIME_DELAY_SECONDS;
+ if (t > batch_marker->pinned_time) /* too close to marker */
+ continue;
+
+ goto truncate_past_here;
+ }
+
+ return NULL;
+
+truncate_past_here:
+ state->current_token_data->batch_tail = (struct fsmonitor_batch *)batch;
+
+ remainder = ((struct fsmonitor_batch *)batch)->next;
+ ((struct fsmonitor_batch *)batch)->next = NULL;
+
+ return remainder;
+}
+
+static void fsmonitor_free_token_data(struct fsmonitor_token_data *token)
+{
+ if (!token)
+ return;
+
+ assert(token->client_ref_count == 0);
+
+ strbuf_release(&token->token_id);
+
+ fsmonitor_batch__free_list(token->batch_head);
+
+ free(token);
+}
+
+/*
+ * Flush all of our cached data about the filesystem. Call this if we
+ * lose sync with the filesystem and miss some notification events.
+ *
+ * [1] If we are missing events, then we no longer have a complete
+ * history of the directory (relative to our current start token).
+ * We should create a new token and start fresh (as if we just
+ * booted up).
+ *
+ * [2] Some of those lost events may have been for cookie files. We
+ * should assume the worst and abort them rather letting them starve.
+ *
+ * If there are no concurrent threads reading the current token data
+ * series, we can free it now. Otherwise, let the last reader free
+ * it.
+ *
+ * Either way, the old token data series is no longer associated with
+ * our state data.
+ */
+static void with_lock__do_force_resync(struct fsmonitor_daemon_state *state)
+{
+ /* assert current thread holding state->main_lock */
+
+ struct fsmonitor_token_data *free_me = NULL;
+ struct fsmonitor_token_data *new_one = NULL;
+
+ new_one = fsmonitor_new_token_data();
+
+ if (state->current_token_data->client_ref_count == 0)
+ free_me = state->current_token_data;
+ state->current_token_data = new_one;
+
+ fsmonitor_free_token_data(free_me);
+
+ with_lock__abort_all_cookies(state);
+}
+
+void fsmonitor_force_resync(struct fsmonitor_daemon_state *state)
+{
+ pthread_mutex_lock(&state->main_lock);
+ with_lock__do_force_resync(state);
+ pthread_mutex_unlock(&state->main_lock);
+}
+
+/*
+ * Format an opaque token string to send to the client.
+ */
+static void with_lock__format_response_token(
+ struct strbuf *response_token,
+ const struct strbuf *response_token_id,
+ const struct fsmonitor_batch *batch)
+{
+ /* assert current thread holding state->main_lock */
+
+ strbuf_reset(response_token);
+ strbuf_addf(response_token, "builtin:%s:%"PRIu64,
+ response_token_id->buf, batch->batch_seq_nr);
+}
+
+/*
+ * Parse an opaque token from the client.
+ * Returns -1 on error.
+ */
+static int fsmonitor_parse_client_token(const char *buf_token,
+ struct strbuf *requested_token_id,
+ uint64_t *seq_nr)
+{
+ const char *p;
+ char *p_end;
+
+ strbuf_reset(requested_token_id);
+ *seq_nr = 0;
+
+ if (!skip_prefix(buf_token, "builtin:", &p))
+ return -1;
+
+ while (*p && *p != ':')
+ strbuf_addch(requested_token_id, *p++);
+ if (!*p++)
+ return -1;
+
+ *seq_nr = (uint64_t)strtoumax(p, &p_end, 10);
+ if (*p_end)
+ return -1;
+
+ return 0;
+}
+
+KHASH_INIT(str, const char *, int, 0, kh_str_hash_func, kh_str_hash_equal)
+
+static int do_handle_client(struct fsmonitor_daemon_state *state,
+ const char *command,
+ ipc_server_reply_cb *reply,
+ struct ipc_server_reply_data *reply_data)
+{
+ struct fsmonitor_token_data *token_data = NULL;
+ struct strbuf response_token = STRBUF_INIT;
+ struct strbuf requested_token_id = STRBUF_INIT;
+ struct strbuf payload = STRBUF_INIT;
+ uint64_t requested_oldest_seq_nr = 0;
+ uint64_t total_response_len = 0;
+ const char *p;
+ const struct fsmonitor_batch *batch_head;
+ const struct fsmonitor_batch *batch;
+ struct fsmonitor_batch *remainder = NULL;
+ intmax_t count = 0, duplicates = 0;
+ kh_str_t *shown;
+ int hash_ret;
+ int do_trivial = 0;
+ int do_flush = 0;
+ int do_cookie = 0;
+ enum fsmonitor_cookie_item_result cookie_result;
+
+ /*
+ * We expect `command` to be of the form:
+ *
+ * <command> := quit NUL
+ * | flush NUL
+ * | <V1-time-since-epoch-ns> NUL
+ * | <V2-opaque-fsmonitor-token> NUL
+ */
+
+ if (!strcmp(command, "quit")) {
+ /*
+ * A client has requested over the socket/pipe that the
+ * daemon shutdown.
+ *
+ * Tell the IPC thread pool to shutdown (which completes
+ * the await in the main thread (which can stop the
+ * fsmonitor listener thread)).
+ *
+ * There is no reply to the client.
+ */
+ return SIMPLE_IPC_QUIT;
+
+ } else if (!strcmp(command, "flush")) {
+ /*
+ * Flush all of our cached data and generate a new token
+ * just like if we lost sync with the filesystem.
+ *
+ * Then send a trivial response using the new token.
+ */
+ do_flush = 1;
+ do_trivial = 1;
+
+ } else if (!skip_prefix(command, "builtin:", &p)) {
+ /* assume V1 timestamp or garbage */
+
+ char *p_end;
+
+ strtoumax(command, &p_end, 10);
+ trace_printf_key(&trace_fsmonitor,
+ ((*p_end) ?
+ "fsmonitor: invalid command line '%s'" :
+ "fsmonitor: unsupported V1 protocol '%s'"),
+ command);
+ do_trivial = 1;
+
+ } else {
+ /* We have "builtin:*" */
+ if (fsmonitor_parse_client_token(command, &requested_token_id,
+ &requested_oldest_seq_nr)) {
+ trace_printf_key(&trace_fsmonitor,
+ "fsmonitor: invalid V2 protocol token '%s'",
+ command);
+ do_trivial = 1;
+
+ } else {
+ /*
+ * We have a V2 valid token:
+ * "builtin:<token_id>:<seq_nr>"
+ */
+ do_cookie = 1;
+ }
+ }
+
+ pthread_mutex_lock(&state->main_lock);
+
+ if (!state->current_token_data)
+ BUG("fsmonitor state does not have a current token");
+
+ /*
+ * Write a cookie file inside the directory being watched in
+ * an effort to flush out existing filesystem events that we
+ * actually care about. Suspend this client thread until we
+ * see the filesystem events for this cookie file.
+ *
+ * Creating the cookie lets us guarantee that our FS listener
+ * thread has drained the kernel queue and we are caught up
+ * with the kernel.
+ *
+ * If we cannot create the cookie (or otherwise guarantee that
+ * we are caught up), we send a trivial response. We have to
+ * assume that there might be some very, very recent activity
+ * on the FS still in flight.
+ */
+ if (do_cookie) {
+ cookie_result = with_lock__wait_for_cookie(state);
+ if (cookie_result != FCIR_SEEN) {
+ error(_("fsmonitor: cookie_result '%d' != SEEN"),
+ cookie_result);
+ do_trivial = 1;
+ }
+ }
+
+ if (do_flush)
+ with_lock__do_force_resync(state);
+
+ /*
+ * We mark the current head of the batch list as "pinned" so
+ * that the listener thread will treat this item as read-only
+ * (and prevent any more paths from being added to it) from
+ * now on.
+ */
+ token_data = state->current_token_data;
+ batch_head = token_data->batch_head;
+ ((struct fsmonitor_batch *)batch_head)->pinned_time = time(NULL);
+
+ /*
+ * FSMonitor Protocol V2 requires that we send a response header
+ * with a "new current token" and then all of the paths that changed
+ * since the "requested token". We send the seq_nr of the just-pinned
+ * head batch so that future requests from a client will be relative
+ * to it.
+ */
+ with_lock__format_response_token(&response_token,
+ &token_data->token_id, batch_head);
+
+ reply(reply_data, response_token.buf, response_token.len + 1);
+ total_response_len += response_token.len + 1;
+
+ trace2_data_string("fsmonitor", the_repository, "response/token",
+ response_token.buf);
+ trace_printf_key(&trace_fsmonitor, "response token: %s",
+ response_token.buf);
+
+ if (!do_trivial) {
+ if (strcmp(requested_token_id.buf, token_data->token_id.buf)) {
+ /*
+ * The client last spoke to a different daemon
+ * instance -OR- the daemon had to resync with
+ * the filesystem (and lost events), so reject.
+ */
+ trace2_data_string("fsmonitor", the_repository,
+ "response/token", "different");
+ do_trivial = 1;
+
+ } else if (requested_oldest_seq_nr <
+ token_data->batch_tail->batch_seq_nr) {
+ /*
+ * The client wants older events than we have for
+ * this token_id. This means that the end of our
+ * batch list was truncated and we cannot give the
+ * client a complete snapshot relative to their
+ * request.
+ */
+ trace_printf_key(&trace_fsmonitor,
+ "client requested truncated data");
+ do_trivial = 1;
+ }
+ }
+
+ if (do_trivial) {
+ pthread_mutex_unlock(&state->main_lock);
+
+ reply(reply_data, "/", 2);
+
+ trace2_data_intmax("fsmonitor", the_repository,
+ "response/trivial", 1);
+
+ goto cleanup;
+ }
+
+ /*
+ * We're going to hold onto a pointer to the current
+ * token-data while we walk the list of batches of files.
+ * During this time, we will NOT be under the lock.
+ * So we ref-count it.
+ *
+ * This allows the listener thread to continue prepending
+ * new batches of items to the token-data (which we'll ignore).
+ *
+ * AND it allows the listener thread to do a token-reset
+ * (and install a new `current_token_data`).
+ */
+ token_data->client_ref_count++;
+
+ pthread_mutex_unlock(&state->main_lock);
+
+ /*
+ * The client request is relative to the token that they sent,
+ * so walk the batch list backwards from the current head back
+ * to the batch (sequence number) they named.
+ *
+ * We use khash to de-dup the list of pathnames.
+ *
+ * NEEDSWORK: each batch contains a list of interned strings,
+ * so we only need to do pointer comparisons here to build the
+ * hash table. Currently, we're still comparing the string
+ * values.
+ */
+ shown = kh_init_str();
+ for (batch = batch_head;
+ batch && batch->batch_seq_nr > requested_oldest_seq_nr;
+ batch = batch->next) {
+ size_t k;
+
+ for (k = 0; k < batch->nr; k++) {
+ const char *s = batch->interned_paths[k];
+ size_t s_len;
+
+ if (kh_get_str(shown, s) != kh_end(shown))
+ duplicates++;
+ else {
+ kh_put_str(shown, s, &hash_ret);
+
+ trace_printf_key(&trace_fsmonitor,
+ "send[%"PRIuMAX"]: %s",
+ count, s);
+
+ /* Each path gets written with a trailing NUL */
+ s_len = strlen(s) + 1;
+
+ if (payload.len + s_len >=
+ LARGE_PACKET_DATA_MAX) {
+ reply(reply_data, payload.buf,
+ payload.len);
+ total_response_len += payload.len;
+ strbuf_reset(&payload);
+ }
+
+ strbuf_add(&payload, s, s_len);
+ count++;
+ }
+ }
+ }
+
+ if (payload.len) {
+ reply(reply_data, payload.buf, payload.len);
+ total_response_len += payload.len;
+ }
+
+ kh_release_str(shown);
+
+ pthread_mutex_lock(&state->main_lock);
+
+ if (token_data->client_ref_count > 0)
+ token_data->client_ref_count--;
+
+ if (token_data->client_ref_count == 0) {
+ if (token_data != state->current_token_data) {
+ /*
+ * The listener thread did a token-reset while we were
+ * walking the batch list. Therefore, this token is
+ * stale and can be discarded completely. If we are
+ * the last reader thread using this token, we own
+ * that work.
+ */
+ fsmonitor_free_token_data(token_data);
+ } else if (batch) {
+ /*
+ * We are holding the lock and are the only
+ * reader of the ref-counted portion of the
+ * list, so we get the honor of seeing if the
+ * list can be truncated to save memory.
+ *
+ * The main loop did not walk to the end of the
+ * list, so this batch is the first item in the
+ * batch-list that is older than the requested
+ * end-point sequence number. See if the tail
+ * end of the list is obsolete.
+ */
+ remainder = with_lock__truncate_old_batches(state,
+ batch);
+ }
+ }
+
+ pthread_mutex_unlock(&state->main_lock);
+
+ if (remainder)
+ fsmonitor_batch__free_list(remainder);
+
+ trace2_data_intmax("fsmonitor", the_repository, "response/length", total_response_len);
+ trace2_data_intmax("fsmonitor", the_repository, "response/count/files", count);
+ trace2_data_intmax("fsmonitor", the_repository, "response/count/duplicates", duplicates);
+
+cleanup:
+ strbuf_release(&response_token);
+ strbuf_release(&requested_token_id);
+ strbuf_release(&payload);
+
+ return 0;
+}
+
+static ipc_server_application_cb handle_client;
+
+static int handle_client(void *data,
+ const char *command, size_t command_len,
+ ipc_server_reply_cb *reply,
+ struct ipc_server_reply_data *reply_data)
+{
+ struct fsmonitor_daemon_state *state = data;
+ int result;
+
+ /*
+ * The Simple IPC API now supports {char*, len} arguments, but
+ * FSMonitor always uses proper null-terminated strings, so
+ * we can ignore the command_len argument. (Trust, but verify.)
+ */
+ if (command_len != strlen(command))
+ BUG("FSMonitor assumes text messages");
+
+ trace_printf_key(&trace_fsmonitor, "requested token: %s", command);
+
+ trace2_region_enter("fsmonitor", "handle_client", the_repository);
+ trace2_data_string("fsmonitor", the_repository, "request", command);
+
+ result = do_handle_client(state, command, reply, reply_data);
+
+ trace2_region_leave("fsmonitor", "handle_client", the_repository);
+
+ return result;
+}
+
+#define FSMONITOR_DIR "fsmonitor--daemon"
+#define FSMONITOR_COOKIE_DIR "cookies"
+#define FSMONITOR_COOKIE_PREFIX (FSMONITOR_DIR "/" FSMONITOR_COOKIE_DIR "/")
+
+enum fsmonitor_path_type fsmonitor_classify_path_workdir_relative(
+ const char *rel)
+{
+ if (fspathncmp(rel, ".git", 4))
+ return IS_WORKDIR_PATH;
+ rel += 4;
+
+ if (!*rel)
+ return IS_DOT_GIT;
+ if (*rel != '/')
+ return IS_WORKDIR_PATH; /* e.g. .gitignore */
+ rel++;
+
+ if (!fspathncmp(rel, FSMONITOR_COOKIE_PREFIX,
+ strlen(FSMONITOR_COOKIE_PREFIX)))
+ return IS_INSIDE_DOT_GIT_WITH_COOKIE_PREFIX;
+
+ return IS_INSIDE_DOT_GIT;
+}
+
+enum fsmonitor_path_type fsmonitor_classify_path_gitdir_relative(
+ const char *rel)
+{
+ if (!fspathncmp(rel, FSMONITOR_COOKIE_PREFIX,
+ strlen(FSMONITOR_COOKIE_PREFIX)))
+ return IS_INSIDE_GITDIR_WITH_COOKIE_PREFIX;
+
+ return IS_INSIDE_GITDIR;
+}
+
+static enum fsmonitor_path_type try_classify_workdir_abs_path(
+ struct fsmonitor_daemon_state *state,
+ const char *path)
+{
+ const char *rel;
+
+ if (fspathncmp(path, state->path_worktree_watch.buf,
+ state->path_worktree_watch.len))
+ return IS_OUTSIDE_CONE;
+
+ rel = path + state->path_worktree_watch.len;
+
+ if (!*rel)
+ return IS_WORKDIR_PATH; /* it is the root dir exactly */
+ if (*rel != '/')
+ return IS_OUTSIDE_CONE;
+ rel++;
+
+ return fsmonitor_classify_path_workdir_relative(rel);
+}
+
+enum fsmonitor_path_type fsmonitor_classify_path_absolute(
+ struct fsmonitor_daemon_state *state,
+ const char *path)
+{
+ const char *rel;
+ enum fsmonitor_path_type t;
+
+ t = try_classify_workdir_abs_path(state, path);
+ if (state->nr_paths_watching == 1)
+ return t;
+ if (t != IS_OUTSIDE_CONE)
+ return t;
+
+ if (fspathncmp(path, state->path_gitdir_watch.buf,
+ state->path_gitdir_watch.len))
+ return IS_OUTSIDE_CONE;
+
+ rel = path + state->path_gitdir_watch.len;
+
+ if (!*rel)
+ return IS_GITDIR; /* it is the <gitdir> exactly */
+ if (*rel != '/')
+ return IS_OUTSIDE_CONE;
+ rel++;
+
+ return fsmonitor_classify_path_gitdir_relative(rel);
+}
+
+/*
+ * We try to combine small batches at the front of the batch-list to avoid
+ * having a long list. This hopefully makes it a little easier when we want
+ * to truncate and maintain the list. However, we don't want the paths array
+ * to just keep growing and growing with realloc, so we insert an arbitrary
+ * limit.
+ */
+#define MY_COMBINE_LIMIT (1024)
+
+void fsmonitor_publish(struct fsmonitor_daemon_state *state,
+ struct fsmonitor_batch *batch,
+ const struct string_list *cookie_names)
+{
+ if (!batch && !cookie_names->nr)
+ return;
+
+ pthread_mutex_lock(&state->main_lock);
+
+ if (batch) {
+ struct fsmonitor_batch *head;
+
+ head = state->current_token_data->batch_head;
+ if (!head) {
+ BUG("token does not have batch");
+ } else if (head->pinned_time) {
+ /*
+ * We cannot alter the current batch list
+ * because:
+ *
+ * [a] it is being transmitted to at least one
+ * client and the handle_client() thread has a
+ * ref-count, but not a lock on the batch list
+ * starting with this item.
+ *
+ * [b] it has been transmitted in the past to
+ * at least one client such that future
+ * requests are relative to this head batch.
+ *
+ * So, we can only prepend a new batch onto
+ * the front of the list.
+ */
+ batch->batch_seq_nr = head->batch_seq_nr + 1;
+ batch->next = head;
+ state->current_token_data->batch_head = batch;
+ } else if (!head->batch_seq_nr) {
+ /*
+ * Batch 0 is unpinned. See the note in
+ * `fsmonitor_new_token_data()` about why we
+ * don't need to accumulate these paths.
+ */
+ fsmonitor_batch__free_list(batch);
+ } else if (head->nr + batch->nr > MY_COMBINE_LIMIT) {
+ /*
+ * The head batch in the list has never been
+ * transmitted to a client, but folding the
+ * contents of the new batch onto it would
+ * exceed our arbitrary limit, so just prepend
+ * the new batch onto the list.
+ */
+ batch->batch_seq_nr = head->batch_seq_nr + 1;
+ batch->next = head;
+ state->current_token_data->batch_head = batch;
+ } else {
+ /*
+ * We are free to add the paths in the given
+ * batch onto the end of the current head batch.
+ */
+ fsmonitor_batch__combine(head, batch);
+ fsmonitor_batch__free_list(batch);
+ }
+ }
+
+ if (cookie_names->nr)
+ with_lock__mark_cookies_seen(state, cookie_names);
+
+ pthread_mutex_unlock(&state->main_lock);
+}
+
+static void *fsm_listen__thread_proc(void *_state)
+{
+ struct fsmonitor_daemon_state *state = _state;
+
+ trace2_thread_start("fsm-listen");
+
+ trace_printf_key(&trace_fsmonitor, "Watching: worktree '%s'",
+ state->path_worktree_watch.buf);
+ if (state->nr_paths_watching > 1)
+ trace_printf_key(&trace_fsmonitor, "Watching: gitdir '%s'",
+ state->path_gitdir_watch.buf);
+
+ fsm_listen__loop(state);
+
+ pthread_mutex_lock(&state->main_lock);
+ if (state->current_token_data &&
+ state->current_token_data->client_ref_count == 0)
+ fsmonitor_free_token_data(state->current_token_data);
+ state->current_token_data = NULL;
+ pthread_mutex_unlock(&state->main_lock);
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+static int fsmonitor_run_daemon_1(struct fsmonitor_daemon_state *state)
+{
+ struct ipc_server_opts ipc_opts = {
+ .nr_threads = fsmonitor__ipc_threads,
+
+ /*
+ * We know that there are no other active threads yet,
+ * so we can let the IPC layer temporarily chdir() if
+ * it needs to when creating the server side of the
+ * Unix domain socket.
+ */
+ .uds_disallow_chdir = 0
+ };
+
+ /*
+ * Start the IPC thread pool before the we've started the file
+ * system event listener thread so that we have the IPC handle
+ * before we need it.
+ */
+ if (ipc_server_run_async(&state->ipc_server_data,
+ fsmonitor_ipc__get_path(), &ipc_opts,
+ handle_client, state))
+ return error_errno(
+ _("could not start IPC thread pool on '%s'"),
+ fsmonitor_ipc__get_path());
+
+ /*
+ * Start the fsmonitor listener thread to collect filesystem
+ * events.
+ */
+ if (pthread_create(&state->listener_thread, NULL,
+ fsm_listen__thread_proc, state) < 0) {
+ ipc_server_stop_async(state->ipc_server_data);
+ ipc_server_await(state->ipc_server_data);
+
+ return error(_("could not start fsmonitor listener thread"));
+ }
+
+ /*
+ * The daemon is now fully functional in background threads.
+ * Wait for the IPC thread pool to shutdown (whether by client
+ * request or from filesystem activity).
+ */
+ ipc_server_await(state->ipc_server_data);
+
+ /*
+ * The fsmonitor listener thread may have received a shutdown
+ * event from the IPC thread pool, but it doesn't hurt to tell
+ * it again. And wait for it to shutdown.
+ */
+ fsm_listen__stop_async(state);
+ pthread_join(state->listener_thread, NULL);
+
+ return state->error_code;
+}
+
+static int fsmonitor_run_daemon(void)
+{
+ struct fsmonitor_daemon_state state;
+ int err;
+
+ memset(&state, 0, sizeof(state));
+
+ hashmap_init(&state.cookies, cookies_cmp, NULL, 0);
+ pthread_mutex_init(&state.main_lock, NULL);
+ pthread_cond_init(&state.cookies_cond, NULL);
+ state.error_code = 0;
+ state.current_token_data = fsmonitor_new_token_data();
+
+ /* Prepare to (recursively) watch the <worktree-root> directory. */
+ strbuf_init(&state.path_worktree_watch, 0);
+ strbuf_addstr(&state.path_worktree_watch, absolute_path(get_git_work_tree()));
+ state.nr_paths_watching = 1;
+
+ /*
+ * We create and delete cookie files somewhere inside the .git
+ * directory to help us keep sync with the file system. If
+ * ".git" is not a directory, then <gitdir> is not inside the
+ * cone of <worktree-root>, so set up a second watch to watch
+ * the <gitdir> so that we get events for the cookie files.
+ */
+ strbuf_init(&state.path_gitdir_watch, 0);
+ strbuf_addbuf(&state.path_gitdir_watch, &state.path_worktree_watch);
+ strbuf_addstr(&state.path_gitdir_watch, "/.git");
+ if (!is_directory(state.path_gitdir_watch.buf)) {
+ strbuf_reset(&state.path_gitdir_watch);
+ strbuf_addstr(&state.path_gitdir_watch, absolute_path(get_git_dir()));
+ state.nr_paths_watching = 2;
+ }
+
+ /*
+ * We will write filesystem syncing cookie files into
+ * <gitdir>/<fsmonitor-dir>/<cookie-dir>/<pid>-<seq>.
+ *
+ * The extra layers of subdirectories here keep us from
+ * changing the mtime on ".git/" or ".git/foo/" when we create
+ * or delete cookie files.
+ *
+ * There have been problems with some IDEs that do a
+ * non-recursive watch of the ".git/" directory and run a
+ * series of commands any time something happens.
+ *
+ * For example, if we place our cookie files directly in
+ * ".git/" or ".git/foo/" then a `git status` (or similar
+ * command) from the IDE will cause a cookie file to be
+ * created in one of those dirs. This causes the mtime of
+ * those dirs to change. This triggers the IDE's watch
+ * notification. This triggers the IDE to run those commands
+ * again. And the process repeats and the machine never goes
+ * idle.
+ *
+ * Adding the extra layers of subdirectories prevents the
+ * mtime of ".git/" and ".git/foo" from changing when a
+ * cookie file is created.
+ */
+ strbuf_init(&state.path_cookie_prefix, 0);
+ strbuf_addbuf(&state.path_cookie_prefix, &state.path_gitdir_watch);
+
+ strbuf_addch(&state.path_cookie_prefix, '/');
+ strbuf_addstr(&state.path_cookie_prefix, FSMONITOR_DIR);
+ mkdir(state.path_cookie_prefix.buf, 0777);
+
+ strbuf_addch(&state.path_cookie_prefix, '/');
+ strbuf_addstr(&state.path_cookie_prefix, FSMONITOR_COOKIE_DIR);
+ mkdir(state.path_cookie_prefix.buf, 0777);
+
+ strbuf_addch(&state.path_cookie_prefix, '/');
+
+ /*
+ * Confirm that we can create platform-specific resources for the
+ * filesystem listener before we bother starting all the threads.
+ */
+ if (fsm_listen__ctor(&state)) {
+ err = error(_("could not initialize listener thread"));
+ goto done;
+ }
+
+ err = fsmonitor_run_daemon_1(&state);
+
+done:
+ pthread_cond_destroy(&state.cookies_cond);
+ pthread_mutex_destroy(&state.main_lock);
+ fsm_listen__dtor(&state);
+
+ ipc_server_free(state.ipc_server_data);
+
+ strbuf_release(&state.path_worktree_watch);
+ strbuf_release(&state.path_gitdir_watch);
+ strbuf_release(&state.path_cookie_prefix);
+
+ return err;
+}
+
+static int try_to_run_foreground_daemon(int detach_console)
+{
+ /*
+ * Technically, we don't need to probe for an existing daemon
+ * process, since we could just call `fsmonitor_run_daemon()`
+ * and let it fail if the pipe/socket is busy.
+ *
+ * However, this method gives us a nicer error message for a
+ * common error case.
+ */
+ if (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
+ die(_("fsmonitor--daemon is already running '%s'"),
+ the_repository->worktree);
+
+ if (fsmonitor__announce_startup) {
+ fprintf(stderr, _("running fsmonitor-daemon in '%s'\n"),
+ the_repository->worktree);
+ fflush(stderr);
+ }
+
+#ifdef GIT_WINDOWS_NATIVE
+ if (detach_console)
+ FreeConsole();
+#endif
+
+ return !!fsmonitor_run_daemon();
+}
+
+static start_bg_wait_cb bg_wait_cb;
+
+static int bg_wait_cb(const struct child_process *cp, void *cb_data)
+{
+ enum ipc_active_state s = fsmonitor_ipc__get_state();
+
+ switch (s) {
+ case IPC_STATE__LISTENING:
+ /* child is "ready" */
+ return 0;
+
+ case IPC_STATE__NOT_LISTENING:
+ case IPC_STATE__PATH_NOT_FOUND:
+ /* give child more time */
+ return 1;
+
+ default:
+ case IPC_STATE__INVALID_PATH:
+ case IPC_STATE__OTHER_ERROR:
+ /* all the time in world won't help */
+ return -1;
+ }
+}
+
+static int try_to_start_background_daemon(void)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ enum start_bg_result sbgr;
+
+ /*
+ * Before we try to create a background daemon process, see
+ * if a daemon process is already listening. This makes it
+ * easier for us to report an already-listening error to the
+ * console, since our spawn/daemon can only report the success
+ * of creating the background process (and not whether it
+ * immediately exited).
+ */
+ if (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
+ die(_("fsmonitor--daemon is already running '%s'"),
+ the_repository->worktree);
+
+ if (fsmonitor__announce_startup) {
+ fprintf(stderr, _("starting fsmonitor-daemon in '%s'\n"),
+ the_repository->worktree);
+ fflush(stderr);
+ }
+
+ cp.git_cmd = 1;
+
+ strvec_push(&cp.args, "fsmonitor--daemon");
+ strvec_push(&cp.args, "run");
+ strvec_push(&cp.args, "--detach");
+ strvec_pushf(&cp.args, "--ipc-threads=%d", fsmonitor__ipc_threads);
+
+ cp.no_stdin = 1;
+ cp.no_stdout = 1;
+ cp.no_stderr = 1;
+
+ sbgr = start_bg_command(&cp, bg_wait_cb, NULL,
+ fsmonitor__start_timeout_sec);
+
+ switch (sbgr) {
+ case SBGR_READY:
+ return 0;
+
+ default:
+ case SBGR_ERROR:
+ case SBGR_CB_ERROR:
+ return error(_("daemon failed to start"));
+
+ case SBGR_TIMEOUT:
+ return error(_("daemon not online yet"));
+
+ case SBGR_DIED:
+ return error(_("daemon terminated"));
+ }
+}
+
+int cmd_fsmonitor__daemon(int argc, const char **argv, const char *prefix)
+{
+ const char *subcmd;
+ int detach_console = 0;
+
+ struct option options[] = {
+ OPT_BOOL(0, "detach", &detach_console, N_("detach from console")),
+ OPT_INTEGER(0, "ipc-threads",
+ &fsmonitor__ipc_threads,
+ N_("use <n> ipc worker threads")),
+ OPT_INTEGER(0, "start-timeout",
+ &fsmonitor__start_timeout_sec,
+ N_("max seconds to wait for background daemon startup")),
+
+ OPT_END()
+ };
+
+ git_config(fsmonitor_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_fsmonitor__daemon_usage, 0);
+ if (argc != 1)
+ usage_with_options(builtin_fsmonitor__daemon_usage, options);
+ subcmd = argv[0];
+
+ if (fsmonitor__ipc_threads < 1)
+ die(_("invalid 'ipc-threads' value (%d)"),
+ fsmonitor__ipc_threads);
+
+ if (!strcmp(subcmd, "start"))
+ return !!try_to_start_background_daemon();
+
+ if (!strcmp(subcmd, "run"))
+ return !!try_to_run_foreground_daemon(detach_console);
+
+ if (!strcmp(subcmd, "stop"))
+ return !!do_as_client__send_stop();
+
+ if (!strcmp(subcmd, "status"))
+ return !!do_as_client__status();
+
+ die(_("Unhandled subcommand '%s'"), subcmd);
+}
+
+#else
+int cmd_fsmonitor__daemon(int argc, const char **argv, const char *prefix)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(builtin_fsmonitor__daemon_usage, options);
+
+ die(_("fsmonitor--daemon not supported on this platform"));
+}
+#endif
diff --git a/builtin/gc.c b/builtin/gc.c
index 8e60ef1..b335cff 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -30,8 +30,8 @@
#include "promisor-remote.h"
#include "refs.h"
#include "remote.h"
-#include "object-store.h"
#include "exec-cmd.h"
+#include "hook.h"
#define FAILED_RUN "failed to run %s"
@@ -394,7 +394,7 @@ static int need_to_gc(void)
else
return 0;
- if (run_hook_le(NULL, "pre-auto-gc", NULL))
+ if (run_hooks("pre-auto-gc"))
return 0;
return 1;
}
diff --git a/builtin/grep.c b/builtin/grep.c
index 9e34a82..bcb07ea 100644
--- a/builtin/grep.c
+++ b/builtin/grep.c
@@ -26,6 +26,8 @@
#include "object-store.h"
#include "packfile.h"
+static const char *grep_prefix;
+
static char const * const grep_usage[] = {
N_("git grep [<options>] [-e] <pattern> [<rev>...] [[--] <path>...]"),
NULL
@@ -284,7 +286,7 @@ static int wait_all(void)
static int grep_cmd_config(const char *var, const char *value, void *cb)
{
int st = grep_config(var, value, cb);
- if (git_color_default_config(var, value, cb) < 0)
+ if (git_color_default_config(var, value, NULL) < 0)
st = -1;
if (!strcmp(var, "grep.threads")) {
@@ -315,11 +317,11 @@ static void grep_source_name(struct grep_opt *opt, const char *filename,
strbuf_reset(out);
if (opt->null_following_name) {
- if (opt->relative && opt->prefix_length) {
+ if (opt->relative && grep_prefix) {
struct strbuf rel_buf = STRBUF_INIT;
const char *rel_name =
relative_path(filename + tree_name_len,
- opt->prefix, &rel_buf);
+ grep_prefix, &rel_buf);
if (tree_name_len)
strbuf_add(out, filename, tree_name_len);
@@ -332,8 +334,8 @@ static void grep_source_name(struct grep_opt *opt, const char *filename,
return;
}
- if (opt->relative && opt->prefix_length)
- quote_path(filename + tree_name_len, opt->prefix, out, 0);
+ if (opt->relative && grep_prefix)
+ quote_path(filename + tree_name_len, grep_prefix, out, 0);
else
quote_c_style(filename + tree_name_len, out, NULL, 0);
@@ -482,7 +484,7 @@ static int grep_submodule(struct grep_opt *opt,
object_type = oid_object_info(subrepo, oid, NULL);
obj_read_unlock();
data = read_object_with_reference(subrepo,
- oid, tree_type,
+ oid, OBJ_TREE,
&size, NULL);
if (!data)
die(_("unable to read tree (%s)"), oid_to_hex(oid));
@@ -651,7 +653,7 @@ static int grep_object(struct grep_opt *opt, const struct pathspec *pathspec,
int hit, len;
data = read_object_with_reference(opt->repo,
- &obj->oid, tree_type,
+ &obj->oid, OBJ_TREE,
&size, NULL);
if (!data)
die(_("unable to read tree (%s)"), oid_to_hex(&obj->oid));
@@ -843,7 +845,6 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
int i;
int dummy;
int use_index = 1;
- int pattern_type_arg = GREP_PATTERN_TYPE_UNSPECIFIED;
int allow_revs;
struct option options[] = {
@@ -877,16 +878,16 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
N_("descend at most <depth> levels"), PARSE_OPT_NONEG,
NULL, 1 },
OPT_GROUP(""),
- OPT_SET_INT('E', "extended-regexp", &pattern_type_arg,
+ OPT_SET_INT('E', "extended-regexp", &opt.pattern_type_option,
N_("use extended POSIX regular expressions"),
GREP_PATTERN_TYPE_ERE),
- OPT_SET_INT('G', "basic-regexp", &pattern_type_arg,
+ OPT_SET_INT('G', "basic-regexp", &opt.pattern_type_option,
N_("use basic POSIX regular expressions (default)"),
GREP_PATTERN_TYPE_BRE),
- OPT_SET_INT('F', "fixed-strings", &pattern_type_arg,
+ OPT_SET_INT('F', "fixed-strings", &opt.pattern_type_option,
N_("interpret patterns as fixed strings"),
GREP_PATTERN_TYPE_FIXED),
- OPT_SET_INT('P', "perl-regexp", &pattern_type_arg,
+ OPT_SET_INT('P', "perl-regexp", &opt.pattern_type_option,
N_("use Perl-compatible regular expressions"),
GREP_PATTERN_TYPE_PCRE),
OPT_GROUP(""),
@@ -962,9 +963,10 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
PARSE_OPT_NOCOMPLETE),
OPT_END()
};
+ grep_prefix = prefix;
- git_config(grep_cmd_config, NULL);
- grep_init(&opt, the_repository, prefix);
+ grep_init(&opt, the_repository);
+ git_config(grep_cmd_config, &opt);
/*
* If there is no -- then the paths must exist in the working
@@ -979,7 +981,6 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, options, grep_usage,
PARSE_OPT_KEEP_DASHDASH |
PARSE_OPT_STOP_AT_NON_OPTION);
- grep_commit_pattern_type(pattern_type_arg, &opt);
if (use_index && !startup_info->have_repository) {
int fallback = 0;
@@ -1167,11 +1168,9 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
if (!show_in_pager && !opt.status_only)
setup_pager();
- if (!use_index && (untracked || cached))
- die(_("--cached or --untracked cannot be used with --no-index"));
-
- if (untracked && cached)
- die(_("--untracked cannot be used with --cached"));
+ die_for_incompatible_opt3(!use_index, "--no-index",
+ untracked, "--untracked",
+ cached, "--cached");
if (!use_index || untracked) {
int use_exclude = (opt_exclude < 0) ? use_index : !!opt_exclude;
diff --git a/builtin/hash-object.c b/builtin/hash-object.c
index c7b3ad7..fbae878 100644
--- a/builtin/hash-object.c
+++ b/builtin/hash-object.c
@@ -25,7 +25,7 @@ static int hash_literally(struct object_id *oid, int fd, const char *type, unsig
if (strbuf_read(&buf, fd, 4096) < 0)
ret = -1;
else
- ret = hash_object_file_literally(buf.buf, buf.len, type, oid,
+ ret = write_object_file_literally(buf.buf, buf.len, type, oid,
flags);
strbuf_release(&buf);
return ret;
@@ -81,7 +81,7 @@ int cmd_hash_object(int argc, const char **argv, const char *prefix)
{
static const char * const hash_object_usage[] = {
N_("git hash-object [-t <type>] [-w] [--path=<file> | --no-filters] [--stdin] [--] <file>..."),
- N_("git hash-object --stdin-paths"),
+ "git hash-object --stdin-paths",
NULL
};
const char *type = blob_type;
@@ -92,6 +92,7 @@ int cmd_hash_object(int argc, const char **argv, const char *prefix)
int nongit = 0;
unsigned flags = HASH_FORMAT_CHECK;
const char *vpath = NULL;
+ char *vpath_free = NULL;
const struct option hash_object_options[] = {
OPT_STRING('t', NULL, &type, N_("type"), N_("object type")),
OPT_BIT('w', NULL, &flags, N_("write the object into the object database"),
@@ -114,8 +115,10 @@ int cmd_hash_object(int argc, const char **argv, const char *prefix)
else
prefix = setup_git_directory_gently(&nongit);
- if (vpath && prefix)
- vpath = prefix_filename(prefix, vpath);
+ if (vpath && prefix) {
+ vpath_free = prefix_filename(prefix, vpath);
+ vpath = vpath_free;
+ }
git_config(git_default_config, NULL);
@@ -156,5 +159,7 @@ int cmd_hash_object(int argc, const char **argv, const char *prefix)
if (stdin_paths)
hash_stdin_paths(type, no_filters, flags, literally);
+ free(vpath_free);
+
return 0;
}
diff --git a/builtin/help.c b/builtin/help.c
index d387131..222f994 100644
--- a/builtin/help.c
+++ b/builtin/help.c
@@ -51,9 +51,14 @@ static const char *html_path;
static int verbose = 1;
static enum help_format help_format = HELP_FORMAT_NONE;
static int exclude_guides;
+static int show_external_commands = -1;
+static int show_aliases = -1;
static struct option builtin_help_options[] = {
OPT_CMDMODE('a', "all", &cmd_mode, N_("print all available commands"),
HELP_ACTION_ALL),
+ OPT_BOOL(0, "external-commands", &show_external_commands,
+ N_("show external commands in --all")),
+ OPT_BOOL(0, "aliases", &show_aliases, N_("show aliases in --all")),
OPT_HIDDEN_BOOL(0, "exclude-guides", &exclude_guides, N_("exclude guides")),
OPT_SET_INT('m', "man", &help_format, N_("show man page"), HELP_FORMAT_MAN),
OPT_SET_INT('w', "web", &help_format, N_("show manual in web browser"),
@@ -75,10 +80,10 @@ static struct option builtin_help_options[] = {
};
static const char * const builtin_help_usage[] = {
- N_("git help [-a|--all] [--[no-]verbose]]\n"
- " [[-i|--info] [-m|--man] [-w|--web]] [<command>]"),
- N_("git help [-g|--guides]"),
- N_("git help [-c|--config]"),
+ "git help [-a|--all] [--[no-]verbose]] [--[no-]external-commands] [--[no-]aliases]",
+ N_("git help [[-i|--info] [-m|--man] [-w|--web]] [<command>]"),
+ "git help [-g|--guides]",
+ "git help [-c|--config]",
NULL
};
@@ -574,11 +579,40 @@ static const char *check_git_cmd(const char* cmd)
return cmd;
}
-static void no_extra_argc(int argc)
+static void no_help_format(const char *opt_mode, enum help_format fmt)
+{
+ const char *opt_fmt;
+
+ switch (fmt) {
+ case HELP_FORMAT_NONE:
+ return;
+ case HELP_FORMAT_MAN:
+ opt_fmt = "--man";
+ break;
+ case HELP_FORMAT_INFO:
+ opt_fmt = "--info";
+ break;
+ case HELP_FORMAT_WEB:
+ opt_fmt = "--web";
+ break;
+ default:
+ BUG("unreachable");
+ }
+
+ usage_msg_optf(_("options '%s' and '%s' cannot be used together"),
+ builtin_help_usage, builtin_help_options, opt_mode,
+ opt_fmt);
+}
+
+static void opt_mode_usage(int argc, const char *opt_mode,
+ enum help_format fmt)
{
if (argc)
- usage_msg_opt(_("this option doesn't take any other arguments"),
- builtin_help_usage, builtin_help_options);
+ usage_msg_optf(_("the '%s' option doesn't take any non-option arguments"),
+ builtin_help_usage, builtin_help_options,
+ opt_mode);
+
+ no_help_format(opt_mode, fmt);
}
int cmd_help(int argc, const char **argv, const char *prefix)
@@ -591,11 +625,19 @@ int cmd_help(int argc, const char **argv, const char *prefix)
builtin_help_usage, 0);
parsed_help_format = help_format;
+ if (cmd_mode != HELP_ACTION_ALL &&
+ (show_external_commands >= 0 ||
+ show_aliases >= 0))
+ usage_msg_opt(_("the '--no-[external-commands|aliases]' options can only be used with '--all'"),
+ builtin_help_usage, builtin_help_options);
+
switch (cmd_mode) {
case HELP_ACTION_ALL:
+ opt_mode_usage(argc, "--all", help_format);
if (verbose) {
setup_pager();
- list_all_cmds_help();
+ list_all_cmds_help(show_external_commands,
+ show_aliases);
return 0;
}
printf(_("usage: %s%s"), _(git_usage_string), "\n\n");
@@ -604,20 +646,21 @@ int cmd_help(int argc, const char **argv, const char *prefix)
printf("%s\n", _(git_more_info_string));
break;
case HELP_ACTION_GUIDES:
- no_extra_argc(argc);
+ opt_mode_usage(argc, "--guides", help_format);
list_guides_help();
printf("%s\n", _(git_more_info_string));
return 0;
case HELP_ACTION_CONFIG_FOR_COMPLETION:
- no_extra_argc(argc);
+ opt_mode_usage(argc, "--config-for-completion", help_format);
list_config_help(SHOW_CONFIG_VARS);
return 0;
case HELP_ACTION_CONFIG_SECTIONS_FOR_COMPLETION:
- no_extra_argc(argc);
+ opt_mode_usage(argc, "--config-sections-for-completion",
+ help_format);
list_config_help(SHOW_CONFIG_SECTIONS);
return 0;
case HELP_ACTION_CONFIG:
- no_extra_argc(argc);
+ opt_mode_usage(argc, "--config", help_format);
setup_pager();
list_config_help(SHOW_CONFIG_HUMAN);
printf("\n%s\n", _("'git help config' for more information"));
diff --git a/builtin/hook.c b/builtin/hook.c
new file mode 100644
index 0000000..54e5c6e
--- /dev/null
+++ b/builtin/hook.c
@@ -0,0 +1,84 @@
+#include "cache.h"
+#include "builtin.h"
+#include "config.h"
+#include "hook.h"
+#include "parse-options.h"
+#include "strbuf.h"
+#include "strvec.h"
+
+#define BUILTIN_HOOK_RUN_USAGE \
+ N_("git hook run [--ignore-missing] <hook-name> [-- <hook-args>]")
+
+static const char * const builtin_hook_usage[] = {
+ BUILTIN_HOOK_RUN_USAGE,
+ NULL
+};
+
+static const char * const builtin_hook_run_usage[] = {
+ BUILTIN_HOOK_RUN_USAGE,
+ NULL
+};
+
+static int run(int argc, const char **argv, const char *prefix)
+{
+ int i;
+ struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
+ int ignore_missing = 0;
+ const char *hook_name;
+ struct option run_options[] = {
+ OPT_BOOL(0, "ignore-missing", &ignore_missing,
+ N_("silently ignore missing requested <hook-name>")),
+ OPT_END(),
+ };
+ int ret;
+
+ argc = parse_options(argc, argv, prefix, run_options,
+ builtin_hook_run_usage,
+ PARSE_OPT_KEEP_DASHDASH);
+
+ if (!argc)
+ goto usage;
+
+ /*
+ * Having a -- for "run" when providing <hook-args> is
+ * mandatory.
+ */
+ if (argc > 1 && strcmp(argv[1], "--") &&
+ strcmp(argv[1], "--end-of-options"))
+ goto usage;
+
+ /* Add our arguments, start after -- */
+ for (i = 2 ; i < argc; i++)
+ strvec_push(&opt.args, argv[i]);
+
+ /* Need to take into account core.hooksPath */
+ git_config(git_default_config, NULL);
+
+ hook_name = argv[0];
+ if (!ignore_missing)
+ opt.error_if_missing = 1;
+ ret = run_hooks_opt(hook_name, &opt);
+ if (ret < 0) /* error() return */
+ ret = 1;
+ return ret;
+usage:
+ usage_with_options(builtin_hook_run_usage, run_options);
+}
+
+int cmd_hook(int argc, const char **argv, const char *prefix)
+{
+ struct option builtin_hook_options[] = {
+ OPT_END(),
+ };
+
+ argc = parse_options(argc, argv, NULL, builtin_hook_options,
+ builtin_hook_usage, PARSE_OPT_STOP_AT_NON_OPTION);
+ if (!argc)
+ goto usage;
+
+ if (!strcmp(argv[0], "run"))
+ return run(argc, argv, prefix);
+
+usage:
+ usage_with_options(builtin_hook_usage, builtin_hook_options);
+}
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index 3c2e6ae..680b66b 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -323,8 +323,12 @@ static void use(int bytes)
if (signed_add_overflows(consumed_bytes, bytes))
die(_("pack too large for current definition of off_t"));
consumed_bytes += bytes;
- if (max_input_size && consumed_bytes > max_input_size)
- die(_("pack exceeds maximum allowed size"));
+ if (max_input_size && consumed_bytes > max_input_size) {
+ struct strbuf size_limit = STRBUF_INIT;
+ strbuf_humanise_bytes(&size_limit, max_input_size);
+ die(_("pack exceeds maximum allowed size (%s)"),
+ size_limit.buf);
+ }
}
static const char *open_pack_file(const char *pack_name)
@@ -449,8 +453,7 @@ static void *unpack_entry_data(off_t offset, unsigned long size,
int hdrlen;
if (!is_delta_type(type)) {
- hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX,
- type_name(type),(uintmax_t)size) + 1;
+ hdrlen = format_object_header(hdr, sizeof(hdr), type, size);
the_hash_algo->init_fn(&c);
the_hash_algo->update_fn(&c, hdr, hdrlen);
} else
@@ -579,7 +582,7 @@ static void *unpack_data(struct object_entry *obj,
if (!n)
die(Q_("premature end of pack file, %"PRIuMAX" byte missing",
"premature end of pack file, %"PRIuMAX" bytes missing",
- (unsigned int)len),
+ len),
(uintmax_t)len);
from += n;
len -= n;
@@ -971,7 +974,7 @@ static struct base_data *resolve_delta(struct object_entry *delta_obj,
if (!result_data)
bad_object(delta_obj->idx.offset, _("failed to apply delta"));
hash_object_file(the_hash_algo, result_data, result_size,
- type_name(delta_obj->real_type), &delta_obj->idx.oid);
+ delta_obj->real_type, &delta_obj->idx.oid);
sha1_object(result_data, NULL, result_size, delta_obj->real_type,
&delta_obj->idx.oid);
@@ -1109,6 +1112,7 @@ static void *threaded_second_pass(void *data)
list_add(&child->list, &work_head);
base_cache_used += child->size;
prune_base_data(NULL);
+ free_base_data(child);
} else {
/*
* This child does not have its own children. It may be
@@ -1131,6 +1135,7 @@ static void *threaded_second_pass(void *data)
p = next_p;
}
+ FREE_AND_NULL(child);
}
work_unlock();
}
@@ -1286,7 +1291,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha
nr_objects - nr_objects_initial);
stop_progress_msg(&progress, msg.buf);
strbuf_release(&msg);
- finalize_hashfile(f, tail_hash, 0);
+ finalize_hashfile(f, tail_hash, FSYNC_COMPONENT_PACK, 0);
hashcpy(read_hash, pack_hash);
fixup_pack_header_footer(output_fd, pack_hash,
curr_pack, nr_objects,
@@ -1413,9 +1418,8 @@ static void fix_unresolved_deltas(struct hashfile *f)
if (!data)
continue;
- if (check_object_signature(the_repository, &d->oid,
- data, size,
- type_name(type), NULL))
+ if (check_object_signature(the_repository, &d->oid, data, size,
+ type) < 0)
die(_("local object %s is corrupt"), oid_to_hex(&d->oid));
/*
@@ -1424,6 +1428,7 @@ static void fix_unresolved_deltas(struct hashfile *f)
* object).
*/
append_obj_to_pack(f, d->oid.hash, data, size, type);
+ free(data);
threaded_second_pass(NULL);
display_progress(progress, nr_resolved_deltas);
@@ -1508,7 +1513,7 @@ static void final(const char *final_pack_name, const char *curr_pack_name,
if (!from_stdin) {
close(input_fd);
} else {
- fsync_or_die(output_fd, curr_pack_name);
+ fsync_component_or_die(FSYNC_COMPONENT_PACK, output_fd, curr_pack_name);
err = close(output_fd);
if (err)
die_errno(_("error while closing pack file"));
@@ -1703,6 +1708,7 @@ static void show_pack_info(int stat_only)
i + 1,
chain_histogram[i]);
}
+ free(chain_histogram);
}
int cmd_index_pack(int argc, const char **argv, const char *prefix)
@@ -1932,6 +1938,7 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
if (do_fsck_object && fsck_finish(&fsck_options))
die(_("fsck error in pack objects"));
+ free(opts.anomaly);
free(objects);
strbuf_release(&index_name_buf);
strbuf_release(&rev_index_name_buf);
diff --git a/builtin/log.c b/builtin/log.c
index 4b49340..c211d66 100644
--- a/builtin/log.c
+++ b/builtin/log.c
@@ -35,6 +35,7 @@
#include "repository.h"
#include "commit-reach.h"
#include "range-diff.h"
+#include "tmp-objdir.h"
#define MAIL_DEFAULT_WRAP 72
#define COVER_FROM_AUTO_MAX_SUBJECT_LEN 100
@@ -422,6 +423,13 @@ static int cmd_log_walk(struct rev_info *rev)
int saved_nrl = 0;
int saved_dcctc = 0;
+ if (rev->remerge_diff) {
+ rev->remerge_objdir = tmp_objdir_create("remerge-diff");
+ if (!rev->remerge_objdir)
+ die(_("unable to create temporary object directory"));
+ tmp_objdir_replace_primary_odb(rev->remerge_objdir, 1);
+ }
+
if (rev->early_output)
setup_early_output();
@@ -464,6 +472,11 @@ static int cmd_log_walk(struct rev_info *rev)
rev->diffopt.no_free = 0;
diff_free(&rev->diffopt);
+ if (rev->remerge_diff) {
+ tmp_objdir_destroy(rev->remerge_objdir);
+ rev->remerge_objdir = NULL;
+ }
+
if (rev->diffopt.output_format & DIFF_FORMAT_CHECKDIFF &&
rev->diffopt.flags.check_failed) {
return 02;
@@ -520,8 +533,6 @@ static int git_log_config(const char *var, const char *value, void *cb)
return 0;
}
- if (grep_config(var, value, cb) < 0)
- return -1;
if (git_gpg_config(var, value, cb) < 0)
return -1;
return git_diff_ui_config(var, value, cb);
@@ -536,6 +547,8 @@ int cmd_whatchanged(int argc, const char **argv, const char *prefix)
git_config(git_log_config, NULL);
repo_init_revisions(the_repository, &rev, prefix);
+ git_config(grep_config, &rev.grep_filter);
+
rev.diff = 1;
rev.simplify_history = 0;
memset(&opt, 0, sizeof(opt));
@@ -650,6 +663,8 @@ int cmd_show(int argc, const char **argv, const char *prefix)
memset(&match_all, 0, sizeof(match_all));
repo_init_revisions(the_repository, &rev, prefix);
+ git_config(grep_config, &rev.grep_filter);
+
rev.diff = 1;
rev.always_show_header = 1;
rev.no_walk = 1;
@@ -733,6 +748,8 @@ int cmd_log_reflog(int argc, const char **argv, const char *prefix)
repo_init_revisions(the_repository, &rev, prefix);
init_reflog_walk(&rev.reflog_info);
+ git_config(grep_config, &rev.grep_filter);
+
rev.verbose_header = 1;
memset(&opt, 0, sizeof(opt));
opt.def = "HEAD";
@@ -766,6 +783,8 @@ int cmd_log(int argc, const char **argv, const char *prefix)
git_config(git_log_config, NULL);
repo_init_revisions(the_repository, &rev, prefix);
+ git_config(grep_config, &rev.grep_filter);
+
rev.always_show_header = 1;
memset(&opt, 0, sizeof(opt));
opt.def = "HEAD";
@@ -1848,10 +1867,13 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
extra_hdr.strdup_strings = 1;
extra_to.strdup_strings = 1;
extra_cc.strdup_strings = 1;
+
init_log_defaults();
init_display_notes(&notes_opt);
git_config(git_format_config, NULL);
repo_init_revisions(the_repository, &rev, prefix);
+ git_config(grep_config, &rev.grep_filter);
+
rev.show_notes = show_notes;
memcpy(&rev.notes_opt, &notes_opt, sizeof(notes_opt));
rev.commit_format = CMIT_FMT_EMAIL;
@@ -1958,6 +1980,8 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
die(_("--name-status does not make sense"));
if (rev.diffopt.output_format & DIFF_FORMAT_CHECKDIFF)
die(_("--check does not make sense"));
+ if (rev.remerge_diff)
+ die(_("--remerge-diff does not make sense"));
if (!use_patch_format &&
(!rev.diffopt.output_format ||
@@ -1978,8 +2002,9 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
if (rev.show_notes)
load_display_notes(&rev.notes_opt);
- if (use_stdout + rev.diffopt.close_file + !!output_directory > 1)
- die(_("options '%s', '%s', and '%s' cannot be used together"), "--stdout", "--output", "--output-directory");
+ die_for_incompatible_opt3(use_stdout, "--stdout",
+ rev.diffopt.close_file, "--output",
+ !!output_directory, "--output-directory");
if (use_stdout) {
setup_pager();
diff --git a/builtin/ls-files.c b/builtin/ls-files.c
index f7ea56c..e791b65 100644
--- a/builtin/ls-files.c
+++ b/builtin/ls-files.c
@@ -244,7 +244,7 @@ static void show_ce(struct repository *repo, struct dir_struct *dir,
printf("%s%06o %s %d\t",
tag,
ce->ce_mode,
- find_unique_abbrev(&ce->oid, abbrev),
+ repo_find_unique_abbrev(repo, &ce->oid, abbrev),
ce_stage(ce));
}
write_eolinfo(repo->index, ce, fullname);
@@ -726,7 +726,7 @@ int cmd_ls_files(int argc, const char **argv, const char *cmd_prefix)
setup_work_tree();
if (recurse_submodules &&
- (show_stage || show_deleted || show_others || show_unmerged ||
+ (show_deleted || show_others || show_unmerged ||
show_killed || show_modified || show_resolve_undo || with_tree))
die("ls-files --recurse-submodules unsupported mode");
diff --git a/builtin/ls-remote.c b/builtin/ls-remote.c
index 44448fa..d856085 100644
--- a/builtin/ls-remote.c
+++ b/builtin/ls-remote.c
@@ -155,6 +155,7 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
ref_array_clear(&ref_array);
if (transport_disconnect(transport))
- return 1;
+ status = 1;
+ transport_ls_refs_options_release(&transport_options);
return status;
}
diff --git a/builtin/ls-tree.c b/builtin/ls-tree.c
index 3a44263..e279be8 100644
--- a/builtin/ls-tree.c
+++ b/builtin/ls-tree.c
@@ -16,22 +16,102 @@
static int line_termination = '\n';
#define LS_RECURSIVE 1
-#define LS_TREE_ONLY 2
-#define LS_SHOW_TREES 4
-#define LS_NAME_ONLY 8
-#define LS_SHOW_SIZE 16
+#define LS_TREE_ONLY (1 << 1)
+#define LS_SHOW_TREES (1 << 2)
static int abbrev;
static int ls_options;
static struct pathspec pathspec;
static int chomp_prefix;
static const char *ls_tree_prefix;
+static const char *format;
+struct show_tree_data {
+ unsigned mode;
+ enum object_type type;
+ const struct object_id *oid;
+ const char *pathname;
+ struct strbuf *base;
+};
static const char * const ls_tree_usage[] = {
N_("git ls-tree [<options>] <tree-ish> [<path>...]"),
NULL
};
-static int show_recursive(const char *base, int baselen, const char *pathname)
+static enum ls_tree_cmdmode {
+ MODE_DEFAULT = 0,
+ MODE_LONG,
+ MODE_NAME_ONLY,
+ MODE_NAME_STATUS,
+ MODE_OBJECT_ONLY,
+} cmdmode;
+
+static void expand_objectsize(struct strbuf *line, const struct object_id *oid,
+ const enum object_type type, unsigned int padded)
+{
+ if (type == OBJ_BLOB) {
+ unsigned long size;
+ if (oid_object_info(the_repository, oid, &size) < 0)
+ die(_("could not get object info about '%s'"),
+ oid_to_hex(oid));
+ if (padded)
+ strbuf_addf(line, "%7"PRIuMAX, (uintmax_t)size);
+ else
+ strbuf_addf(line, "%"PRIuMAX, (uintmax_t)size);
+ } else if (padded) {
+ strbuf_addf(line, "%7s", "-");
+ } else {
+ strbuf_addstr(line, "-");
+ }
+}
+
+static size_t expand_show_tree(struct strbuf *sb, const char *start,
+ void *context)
+{
+ struct show_tree_data *data = context;
+ const char *end;
+ const char *p;
+ unsigned int errlen;
+ size_t len = strbuf_expand_literal_cb(sb, start, NULL);
+
+ if (len)
+ return len;
+ if (*start != '(')
+ die(_("bad ls-tree format: element '%s' does not start with '('"), start);
+
+ end = strchr(start + 1, ')');
+ if (!end)
+ die(_("bad ls-tree format: element '%s' does not end in ')'"), start);
+
+ len = end - start + 1;
+ if (skip_prefix(start, "(objectmode)", &p)) {
+ strbuf_addf(sb, "%06o", data->mode);
+ } else if (skip_prefix(start, "(objecttype)", &p)) {
+ strbuf_addstr(sb, type_name(data->type));
+ } else if (skip_prefix(start, "(objectsize:padded)", &p)) {
+ expand_objectsize(sb, data->oid, data->type, 1);
+ } else if (skip_prefix(start, "(objectsize)", &p)) {
+ expand_objectsize(sb, data->oid, data->type, 0);
+ } else if (skip_prefix(start, "(objectname)", &p)) {
+ strbuf_add_unique_abbrev(sb, data->oid, abbrev);
+ } else if (skip_prefix(start, "(path)", &p)) {
+ const char *name = data->base->buf;
+ const char *prefix = chomp_prefix ? ls_tree_prefix : NULL;
+ struct strbuf quoted = STRBUF_INIT;
+ struct strbuf sbuf = STRBUF_INIT;
+ strbuf_addstr(data->base, data->pathname);
+ name = relative_path(data->base->buf, prefix, &sbuf);
+ quote_c_style(name, &quoted, NULL, 0);
+ strbuf_addbuf(sb, &quoted);
+ strbuf_release(&sbuf);
+ strbuf_release(&quoted);
+ } else {
+ errlen = (unsigned long)len;
+ die(_("bad ls-tree format: %%%.*s"), errlen, start);
+ }
+ return len;
+}
+
+static int show_recursive(const char *base, size_t baselen, const char *pathname)
{
int i;
@@ -43,7 +123,7 @@ static int show_recursive(const char *base, int baselen, const char *pathname)
for (i = 0; i < pathspec.nr; i++) {
const char *spec = pathspec.items[i].match;
- int len, speclen;
+ size_t len, speclen;
if (strncmp(base, spec, baselen))
continue;
@@ -61,69 +141,197 @@ static int show_recursive(const char *base, int baselen, const char *pathname)
return 0;
}
-static int show_tree(const struct object_id *oid, struct strbuf *base,
- const char *pathname, unsigned mode, void *context)
+static int show_tree_fmt(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode, void *context)
{
- int retval = 0;
- int baselen;
- const char *type = blob_type;
-
- if (S_ISGITLINK(mode)) {
- /*
- * Maybe we want to have some recursive version here?
- *
- * Something similar to this incomplete example:
- *
- if (show_subprojects(base, baselen, pathname))
- retval = READ_TREE_RECURSIVE;
- *
- */
- type = commit_type;
- } else if (S_ISDIR(mode)) {
- if (show_recursive(base->buf, base->len, pathname)) {
- retval = READ_TREE_RECURSIVE;
- if (!(ls_options & LS_SHOW_TREES))
- return retval;
- }
- type = tree_type;
- }
- else if (ls_options & LS_TREE_ONLY)
+ size_t baselen;
+ int recurse = 0;
+ struct strbuf sb = STRBUF_INIT;
+ enum object_type type = object_type(mode);
+
+ struct show_tree_data data = {
+ .mode = mode,
+ .type = type,
+ .oid = oid,
+ .pathname = pathname,
+ .base = base,
+ };
+
+ if (type == OBJ_TREE && show_recursive(base->buf, base->len, pathname))
+ recurse = READ_TREE_RECURSIVE;
+ if (type == OBJ_TREE && recurse && !(ls_options & LS_SHOW_TREES))
+ return recurse;
+ if (type == OBJ_BLOB && (ls_options & LS_TREE_ONLY))
return 0;
- if (!(ls_options & LS_NAME_ONLY)) {
- if (ls_options & LS_SHOW_SIZE) {
- char size_text[24];
- if (!strcmp(type, blob_type)) {
- unsigned long size;
- if (oid_object_info(the_repository, oid, &size) == OBJ_BAD)
- xsnprintf(size_text, sizeof(size_text),
- "BAD");
- else
- xsnprintf(size_text, sizeof(size_text),
- "%"PRIuMAX, (uintmax_t)size);
- } else
- xsnprintf(size_text, sizeof(size_text), "-");
- printf("%06o %s %s %7s\t", mode, type,
- find_unique_abbrev(oid, abbrev),
- size_text);
- } else
- printf("%06o %s %s\t", mode, type,
- find_unique_abbrev(oid, abbrev));
- }
baselen = base->len;
+ strbuf_expand(&sb, format, expand_show_tree, &data);
+ strbuf_addch(&sb, line_termination);
+ fwrite(sb.buf, sb.len, 1, stdout);
+ strbuf_release(&sb);
+ strbuf_setlen(base, baselen);
+ return recurse;
+}
+
+static int show_tree_common(struct show_tree_data *data, int *recurse,
+ const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode)
+{
+ enum object_type type = object_type(mode);
+ int ret = -1;
+
+ *recurse = 0;
+ data->mode = mode;
+ data->type = type;
+ data->oid = oid;
+ data->pathname = pathname;
+ data->base = base;
+
+ if (type == OBJ_BLOB) {
+ if (ls_options & LS_TREE_ONLY)
+ ret = 0;
+ } else if (type == OBJ_TREE &&
+ show_recursive(base->buf, base->len, pathname)) {
+ *recurse = READ_TREE_RECURSIVE;
+ if (!(ls_options & LS_SHOW_TREES))
+ ret = *recurse;
+ }
+
+ return ret;
+}
+
+static void show_tree_common_default_long(struct strbuf *base,
+ const char *pathname,
+ const size_t baselen)
+{
+ strbuf_addstr(base, pathname);
+ write_name_quoted_relative(base->buf,
+ chomp_prefix ? ls_tree_prefix : NULL, stdout,
+ line_termination);
+ strbuf_setlen(base, baselen);
+}
+
+static int show_tree_default(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode,
+ void *context)
+{
+ int early;
+ int recurse;
+ struct show_tree_data data = { 0 };
+
+ early = show_tree_common(&data, &recurse, oid, base, pathname, mode);
+ if (early >= 0)
+ return early;
+
+ printf("%06o %s %s\t", data.mode, type_name(data.type),
+ find_unique_abbrev(data.oid, abbrev));
+ show_tree_common_default_long(base, pathname, data.base->len);
+ return recurse;
+}
+
+static int show_tree_long(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode, void *context)
+{
+ int early;
+ int recurse;
+ struct show_tree_data data = { 0 };
+ char size_text[24];
+
+ early = show_tree_common(&data, &recurse, oid, base, pathname, mode);
+ if (early >= 0)
+ return early;
+
+ if (data.type == OBJ_BLOB) {
+ unsigned long size;
+ if (oid_object_info(the_repository, data.oid, &size) == OBJ_BAD)
+ xsnprintf(size_text, sizeof(size_text), "BAD");
+ else
+ xsnprintf(size_text, sizeof(size_text),
+ "%" PRIuMAX, (uintmax_t)size);
+ } else {
+ xsnprintf(size_text, sizeof(size_text), "-");
+ }
+
+ printf("%06o %s %s %7s\t", data.mode, type_name(data.type),
+ find_unique_abbrev(data.oid, abbrev), size_text);
+ show_tree_common_default_long(base, pathname, data.base->len);
+ return recurse;
+}
+
+static int show_tree_name_only(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode, void *context)
+{
+ int early;
+ int recurse;
+ const size_t baselen = base->len;
+ struct show_tree_data data = { 0 };
+
+ early = show_tree_common(&data, &recurse, oid, base, pathname, mode);
+ if (early >= 0)
+ return early;
+
strbuf_addstr(base, pathname);
write_name_quoted_relative(base->buf,
chomp_prefix ? ls_tree_prefix : NULL,
stdout, line_termination);
strbuf_setlen(base, baselen);
- return retval;
+ return recurse;
+}
+
+static int show_tree_object(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode, void *context)
+{
+ int early;
+ int recurse;
+ struct show_tree_data data = { 0 };
+
+ early = show_tree_common(&data, &recurse, oid, base, pathname, mode);
+ if (early >= 0)
+ return early;
+
+ printf("%s%c", find_unique_abbrev(oid, abbrev), line_termination);
+ return recurse;
}
+struct ls_tree_cmdmode_to_fmt {
+ enum ls_tree_cmdmode mode;
+ const char *const fmt;
+ read_tree_fn_t fn;
+};
+
+static struct ls_tree_cmdmode_to_fmt ls_tree_cmdmode_format[] = {
+ {
+ .mode = MODE_DEFAULT,
+ .fmt = "%(objectmode) %(objecttype) %(objectname)%x09%(path)",
+ .fn = show_tree_default,
+ },
+ {
+ .mode = MODE_LONG,
+ .fmt = "%(objectmode) %(objecttype) %(objectname) %(objectsize:padded)%x09%(path)",
+ .fn = show_tree_long,
+ },
+ {
+ .mode = MODE_NAME_ONLY, /* And MODE_NAME_STATUS */
+ .fmt = "%(path)",
+ .fn = show_tree_name_only,
+ },
+ {
+ .mode = MODE_OBJECT_ONLY,
+ .fmt = "%(objectname)",
+ .fn = show_tree_object
+ },
+ {
+ /* fallback */
+ .fn = show_tree_default,
+ },
+};
+
int cmd_ls_tree(int argc, const char **argv, const char *prefix)
{
struct object_id oid;
struct tree *tree;
int i, full_tree = 0;
+ read_tree_fn_t fn = NULL;
const struct option ls_tree_options[] = {
OPT_BIT('d', NULL, &ls_options, N_("only show trees"),
LS_TREE_ONLY),
@@ -133,24 +341,30 @@ int cmd_ls_tree(int argc, const char **argv, const char *prefix)
LS_SHOW_TREES),
OPT_SET_INT('z', NULL, &line_termination,
N_("terminate entries with NUL byte"), 0),
- OPT_BIT('l', "long", &ls_options, N_("include object size"),
- LS_SHOW_SIZE),
- OPT_BIT(0, "name-only", &ls_options, N_("list only filenames"),
- LS_NAME_ONLY),
- OPT_BIT(0, "name-status", &ls_options, N_("list only filenames"),
- LS_NAME_ONLY),
+ OPT_CMDMODE('l', "long", &cmdmode, N_("include object size"),
+ MODE_LONG),
+ OPT_CMDMODE(0, "name-only", &cmdmode, N_("list only filenames"),
+ MODE_NAME_ONLY),
+ OPT_CMDMODE(0, "name-status", &cmdmode, N_("list only filenames"),
+ MODE_NAME_STATUS),
+ OPT_CMDMODE(0, "object-only", &cmdmode, N_("list only objects"),
+ MODE_OBJECT_ONLY),
OPT_SET_INT(0, "full-name", &chomp_prefix,
N_("use full path names"), 0),
OPT_BOOL(0, "full-tree", &full_tree,
N_("list entire tree; not just current directory "
"(implies --full-name)")),
+ OPT_STRING_F(0, "format", &format, N_("format"),
+ N_("format to use for the output"),
+ PARSE_OPT_NONEG),
OPT__ABBREV(&abbrev),
OPT_END()
};
+ struct ls_tree_cmdmode_to_fmt *m2f = ls_tree_cmdmode_format;
git_config(git_default_config, NULL);
ls_tree_prefix = prefix;
- if (prefix && *prefix)
+ if (prefix)
chomp_prefix = strlen(prefix);
argc = parse_options(argc, argv, prefix, ls_tree_options,
@@ -159,11 +373,23 @@ int cmd_ls_tree(int argc, const char **argv, const char *prefix)
ls_tree_prefix = prefix = NULL;
chomp_prefix = 0;
}
+ /*
+ * We wanted to detect conflicts between --name-only and
+ * --name-status, but once we're done with that subsequent
+ * code should only need to check the primary name.
+ */
+ if (cmdmode == MODE_NAME_STATUS)
+ cmdmode = MODE_NAME_ONLY;
+
/* -d -r should imply -t, but -d by itself should not have to. */
if ( (LS_TREE_ONLY|LS_RECURSIVE) ==
((LS_TREE_ONLY|LS_RECURSIVE) & ls_options))
ls_options |= LS_SHOW_TREES;
+ if (format && cmdmode)
+ usage_msg_opt(
+ _("--format can't be combined with other format-altering options"),
+ ls_tree_usage, ls_tree_options);
if (argc < 1)
usage_with_options(ls_tree_usage, ls_tree_options);
if (get_oid(argv[0], &oid))
@@ -185,6 +411,24 @@ int cmd_ls_tree(int argc, const char **argv, const char *prefix)
tree = parse_tree_indirect(&oid);
if (!tree)
die("not a tree object");
- return !!read_tree(the_repository, tree,
- &pathspec, show_tree, NULL);
+ /*
+ * The generic show_tree_fmt() is slower than show_tree(), so
+ * take the fast path if possible.
+ */
+ while (m2f) {
+ if (!m2f->fmt) {
+ fn = format ? show_tree_fmt : show_tree_default;
+ } else if (format && !strcmp(format, m2f->fmt)) {
+ cmdmode = m2f->mode;
+ fn = m2f->fn;
+ } else if (!format && cmdmode == m2f->mode) {
+ fn = m2f->fn;
+ } else {
+ m2f++;
+ continue;
+ }
+ break;
+ }
+
+ return !!read_tree(the_repository, tree, &pathspec, fn, NULL);
}
diff --git a/builtin/mailsplit.c b/builtin/mailsplit.c
index 7baef30..3095235 100644
--- a/builtin/mailsplit.c
+++ b/builtin/mailsplit.c
@@ -223,6 +223,9 @@ static int split_mbox(const char *file, const char *dir, int allow_bare,
FILE *f = !strcmp(file, "-") ? stdin : fopen(file, "r");
int file_done = 0;
+ if (isatty(fileno(f)))
+ warning(_("reading patches from stdin/tty..."));
+
if (!f) {
error_errno("cannot open mbox %s", file);
goto out;
diff --git a/builtin/merge-base.c b/builtin/merge-base.c
index 6719ac1..a11f8c6 100644
--- a/builtin/merge-base.c
+++ b/builtin/merge-base.c
@@ -138,6 +138,7 @@ int cmd_merge_base(int argc, const char **argv, const char *prefix)
int rev_nr = 0;
int show_all = 0;
int cmdmode = 0;
+ int ret;
struct option options[] = {
OPT_BOOL('a', "all", &show_all, N_("output all common ancestors")),
@@ -159,12 +160,14 @@ int cmd_merge_base(int argc, const char **argv, const char *prefix)
if (argc < 2)
usage_with_options(merge_base_usage, options);
if (show_all)
- die("--is-ancestor cannot be used with --all");
+ die(_("options '%s' and '%s' cannot be used together"),
+ "--is-ancestor", "--all");
return handle_is_ancestor(argc, argv);
}
if (cmdmode == 'r' && show_all)
- die("--independent cannot be used with --all");
+ die(_("options '%s' and '%s' cannot be used together"),
+ "--independent", "--all");
if (cmdmode == 'o')
return handle_octopus(argc, argv, show_all);
@@ -184,5 +187,7 @@ int cmd_merge_base(int argc, const char **argv, const char *prefix)
ALLOC_ARRAY(rev, argc);
while (argc-- > 0)
rev[rev_nr++] = get_commit_reference(*argv++);
- return show_merge_base(rev, rev_nr, show_all);
+ ret = show_merge_base(rev, rev_nr, show_all);
+ free(rev);
+ return ret;
}
diff --git a/builtin/merge-recursive.c b/builtin/merge-recursive.c
index a4bfd8f..b9acbf5 100644
--- a/builtin/merge-recursive.c
+++ b/builtin/merge-recursive.c
@@ -58,7 +58,7 @@ int cmd_merge_recursive(int argc, const char **argv, const char *prefix)
"Ignoring %s.",
"cannot handle more than %d bases. "
"Ignoring %s.",
- (int)ARRAY_SIZE(bases)-1),
+ ARRAY_SIZE(bases)-1),
(int)ARRAY_SIZE(bases)-1, argv[i]);
}
if (argc - i != 3) /* "--" "<head>" "<remote>" */
diff --git a/builtin/merge.c b/builtin/merge.c
index 74e53cf..f178f5a 100644
--- a/builtin/merge.c
+++ b/builtin/merge.c
@@ -490,7 +490,7 @@ static void finish(struct commit *head_commit,
}
/* Run a post-merge hook */
- run_hook_le(NULL, "post-merge", squash ? "1" : "0", NULL);
+ run_hooks_l("post-merge", squash ? "1" : "0", NULL);
apply_autostash(git_path_merge_autostash(the_repository));
strbuf_release(&reflog_message);
@@ -845,15 +845,20 @@ static void prepare_to_commit(struct commit_list *remoteheads)
struct strbuf msg = STRBUF_INIT;
const char *index_file = get_index_file();
- if (!no_verify && run_commit_hook(0 < option_edit, index_file, "pre-merge-commit", NULL))
- abort_commit(remoteheads, NULL);
- /*
- * Re-read the index as pre-merge-commit hook could have updated it,
- * and write it out as a tree. We must do this before we invoke
- * the editor and after we invoke run_status above.
- */
- if (hook_exists("pre-merge-commit"))
- discard_cache();
+ if (!no_verify) {
+ int invoked_hook;
+
+ if (run_commit_hook(0 < option_edit, index_file, &invoked_hook,
+ "pre-merge-commit", NULL))
+ abort_commit(remoteheads, NULL);
+ /*
+ * Re-read the index as pre-merge-commit hook could have updated it,
+ * and write it out as a tree. We must do this before we invoke
+ * the editor and after we invoke run_status above.
+ */
+ if (invoked_hook)
+ discard_cache();
+ }
read_cache_from(index_file);
strbuf_addbuf(&msg, &merge_msg);
if (squash)
@@ -875,7 +880,8 @@ static void prepare_to_commit(struct commit_list *remoteheads)
append_signoff(&msg, ignore_non_trailer(msg.buf, msg.len), 0);
write_merge_heads(remoteheads);
write_file_buf(git_path_merge_msg(the_repository), msg.buf, msg.len);
- if (run_commit_hook(0 < option_edit, get_index_file(), "prepare-commit-msg",
+ if (run_commit_hook(0 < option_edit, get_index_file(), NULL,
+ "prepare-commit-msg",
git_path_merge_msg(the_repository), "merge", NULL))
abort_commit(remoteheads, NULL);
if (0 < option_edit) {
@@ -884,7 +890,7 @@ static void prepare_to_commit(struct commit_list *remoteheads)
}
if (!no_verify && run_commit_hook(0 < option_edit, get_index_file(),
- "commit-msg",
+ NULL, "commit-msg",
git_path_merge_msg(the_repository), NULL))
abort_commit(remoteheads, NULL);
@@ -1273,7 +1279,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
int best_cnt = -1, merge_was_ok = 0, automerge_was_ok = 0;
struct commit_list *common = NULL;
const char *best_strategy = NULL, *wt_strategy = NULL;
- struct commit_list *remoteheads, *p;
+ struct commit_list *remoteheads = NULL, *p;
void *branch_to_free;
int orig_argc = argc;
@@ -1568,8 +1574,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
if (autostash)
create_autostash(the_repository,
- git_path_merge_autostash(the_repository),
- "merge");
+ git_path_merge_autostash(the_repository));
if (checkout_fast_forward(the_repository,
&head_commit->object.oid,
&commit->object.oid,
@@ -1640,8 +1645,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
if (autostash)
create_autostash(the_repository,
- git_path_merge_autostash(the_repository),
- "merge");
+ git_path_merge_autostash(the_repository));
/* We are going to make a new commit. */
git_committer_info(IDENT_STRICT);
@@ -1752,6 +1756,10 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
ret = suggest_conflicts();
done:
+ if (!automerge_was_ok) {
+ free_commit_list(common);
+ free_commit_list(remoteheads);
+ }
strbuf_release(&buf);
free(branch_to_free);
return ret;
diff --git a/builtin/mktag.c b/builtin/mktag.c
index 3b2dbbb..5d22909 100644
--- a/builtin/mktag.c
+++ b/builtin/mktag.c
@@ -7,7 +7,7 @@
#include "config.h"
static char const * const builtin_mktag_usage[] = {
- N_("git mktag"),
+ "git mktag",
NULL
};
static int option_strict = 1;
@@ -61,9 +61,8 @@ static int verify_object_in_tag(struct object_id *tagged_oid, int *tagged_type)
type_name(*tagged_type), type_name(type));
repl = lookup_replace_object(the_repository, tagged_oid);
- ret = check_object_signature(the_repository, repl,
- buffer, size, type_name(*tagged_type),
- NULL);
+ ret = check_object_signature(the_repository, repl, buffer, size,
+ *tagged_type);
free(buffer);
return ret;
@@ -97,10 +96,10 @@ int cmd_mktag(int argc, const char **argv, const char *prefix)
&tagged_oid, &tagged_type))
die(_("tag on stdin did not pass our strict fsck check"));
- if (verify_object_in_tag(&tagged_oid, &tagged_type))
+ if (verify_object_in_tag(&tagged_oid, &tagged_type) < 0)
die(_("tag on stdin did not refer to a valid object"));
- if (write_object_file(buf.buf, buf.len, tag_type, &result) < 0)
+ if (write_object_file(buf.buf, buf.len, OBJ_TAG, &result) < 0)
die(_("unable to write tag file"));
strbuf_release(&buf);
diff --git a/builtin/mktree.c b/builtin/mktree.c
index ae78ca1..902edba 100644
--- a/builtin/mktree.c
+++ b/builtin/mktree.c
@@ -58,12 +58,12 @@ static void write_tree(struct object_id *oid)
strbuf_add(&buf, ent->oid.hash, the_hash_algo->rawsz);
}
- write_object_file(buf.buf, buf.len, tree_type, oid);
+ write_object_file(buf.buf, buf.len, OBJ_TREE, oid);
strbuf_release(&buf);
}
static const char *mktree_usage[] = {
- N_("git mktree [-z] [--missing] [--batch]"),
+ "git mktree [-z] [--missing] [--batch]",
NULL
};
diff --git a/builtin/name-rev.c b/builtin/name-rev.c
index 27f6015..c59b569 100644
--- a/builtin/name-rev.c
+++ b/builtin/name-rev.c
@@ -9,6 +9,7 @@
#include "prio-queue.h"
#include "hash-lookup.h"
#include "commit-slab.h"
+#include "commit-graph.h"
/*
* One day. See the 'name a rev shortly after epoch' test in t6120 when
@@ -26,9 +27,58 @@ struct rev_name {
define_commit_slab(commit_rev_name, struct rev_name);
+static timestamp_t generation_cutoff = GENERATION_NUMBER_INFINITY;
static timestamp_t cutoff = TIME_MAX;
static struct commit_rev_name rev_names;
+/* Disable the cutoff checks entirely */
+static void disable_cutoff(void)
+{
+ generation_cutoff = 0;
+ cutoff = 0;
+}
+
+/* Cutoff searching any commits older than this one */
+static void set_commit_cutoff(struct commit *commit)
+{
+
+ if (cutoff > commit->date)
+ cutoff = commit->date;
+
+ if (generation_cutoff) {
+ timestamp_t generation = commit_graph_generation(commit);
+
+ if (generation_cutoff > generation)
+ generation_cutoff = generation;
+ }
+}
+
+/* adjust the commit date cutoff with a slop to allow for slightly incorrect
+ * commit timestamps in case of clock skew.
+ */
+static void adjust_cutoff_timestamp_for_slop(void)
+{
+ if (cutoff) {
+ /* check for undeflow */
+ if (cutoff > TIME_MIN + CUTOFF_DATE_SLOP)
+ cutoff = cutoff - CUTOFF_DATE_SLOP;
+ else
+ cutoff = TIME_MIN;
+ }
+}
+
+/* Check if a commit is before the cutoff. Prioritize generation numbers
+ * first, but use the commit timestamp if we lack generation data.
+ */
+static int commit_is_before_cutoff(struct commit *commit)
+{
+ if (generation_cutoff < GENERATION_NUMBER_INFINITY)
+ return generation_cutoff &&
+ commit_graph_generation(commit) < generation_cutoff;
+
+ return commit->date < cutoff;
+}
+
/* How many generations are maximally preferred over _one_ merge traversal? */
#define MERGE_TRAVERSAL_WEIGHT 65535
@@ -151,7 +201,7 @@ static void name_rev(struct commit *start_commit,
struct rev_name *start_name;
parse_commit(start_commit);
- if (start_commit->date < cutoff)
+ if (commit_is_before_cutoff(start_commit))
return;
start_name = create_or_update_name(start_commit, taggerdate, 0, 0,
@@ -181,7 +231,7 @@ static void name_rev(struct commit *start_commit,
int generation, distance;
parse_commit(parent);
- if (parent->date < cutoff)
+ if (commit_is_before_cutoff(parent))
continue;
if (parent_number > 1) {
@@ -473,7 +523,7 @@ static void show_name(const struct object *obj,
static char const * const name_rev_usage[] = {
N_("git name-rev [<options>] <commit>..."),
N_("git name-rev [<options>] --all"),
- N_("git name-rev [<options>] --stdin"),
+ N_("git name-rev [<options>] --annotate-stdin"),
NULL
};
@@ -527,7 +577,7 @@ static void name_rev_line(char *p, struct name_ref_data *data)
int cmd_name_rev(int argc, const char **argv, const char *prefix)
{
struct object_array revs = OBJECT_ARRAY_INIT;
- int all = 0, transform_stdin = 0, allow_undefined = 1, always = 0, peel_tag = 0;
+ int all = 0, annotate_stdin = 0, transform_stdin = 0, allow_undefined = 1, always = 0, peel_tag = 0;
struct name_ref_data data = { 0, 0, STRING_LIST_INIT_NODUP, STRING_LIST_INIT_NODUP };
struct option opts[] = {
OPT_BOOL(0, "name-only", &data.name_only, N_("print only ref-based names (no object names)")),
@@ -538,7 +588,8 @@ int cmd_name_rev(int argc, const char **argv, const char *prefix)
N_("ignore refs matching <pattern>")),
OPT_GROUP(""),
OPT_BOOL(0, "all", &all, N_("list all commits reachable from all refs")),
- OPT_BOOL(0, "stdin", &transform_stdin, N_("read from stdin")),
+ OPT_BOOL(0, "stdin", &transform_stdin, N_("deprecated: use annotate-stdin instead")),
+ OPT_BOOL(0, "annotate-stdin", &annotate_stdin, N_("annotate text from stdin")),
OPT_BOOL(0, "undefined", &allow_undefined, N_("allow to print `undefined` names (default)")),
OPT_BOOL(0, "always", &always,
N_("show abbreviated commit object as fallback")),
@@ -554,12 +605,20 @@ int cmd_name_rev(int argc, const char **argv, const char *prefix)
init_commit_rev_name(&rev_names);
git_config(git_default_config, NULL);
argc = parse_options(argc, argv, prefix, opts, name_rev_usage, 0);
- if (all + transform_stdin + !!argc > 1) {
+
+ if (transform_stdin) {
+ warning("--stdin is deprecated. Please use --annotate-stdin instead, "
+ "which is functionally equivalent.\n"
+ "This option will be removed in a future release.");
+ annotate_stdin = 1;
+ }
+
+ if (all + annotate_stdin + !!argc > 1) {
error("Specify either a list, or --all, not both!");
usage_with_options(name_rev_usage, opts);
}
- if (all || transform_stdin)
- cutoff = 0;
+ if (all || annotate_stdin)
+ disable_cutoff();
for (; argc; argc--, argv++) {
struct object_id oid;
@@ -587,10 +646,8 @@ int cmd_name_rev(int argc, const char **argv, const char *prefix)
continue;
}
- if (commit) {
- if (cutoff > commit->date)
- cutoff = commit->date;
- }
+ if (commit)
+ set_commit_cutoff(commit);
if (peel_tag) {
if (!commit) {
@@ -603,25 +660,19 @@ int cmd_name_rev(int argc, const char **argv, const char *prefix)
add_object_array(object, *argv, &revs);
}
- if (cutoff) {
- /* check for undeflow */
- if (cutoff > TIME_MIN + CUTOFF_DATE_SLOP)
- cutoff = cutoff - CUTOFF_DATE_SLOP;
- else
- cutoff = TIME_MIN;
- }
+ adjust_cutoff_timestamp_for_slop();
+
for_each_ref(name_ref, &data);
name_tips();
- if (transform_stdin) {
- char buffer[2048];
+ if (annotate_stdin) {
+ struct strbuf sb = STRBUF_INIT;
- while (!feof(stdin)) {
- char *p = fgets(buffer, sizeof(buffer), stdin);
- if (!p)
- break;
- name_rev_line(p, &data);
+ while (strbuf_getline(&sb, stdin) != EOF) {
+ strbuf_addch(&sb, '\n');
+ name_rev_line(sb.buf, &data);
}
+ strbuf_release(&sb);
} else if (all) {
int i, max;
diff --git a/builtin/notes.c b/builtin/notes.c
index 05d6048..a3d0d15 100644
--- a/builtin/notes.c
+++ b/builtin/notes.c
@@ -32,8 +32,8 @@ static const char * const git_notes_usage[] = {
N_("git notes [--ref <notes-ref>] edit [--allow-empty] [<object>]"),
N_("git notes [--ref <notes-ref>] show [<object>]"),
N_("git notes [--ref <notes-ref>] merge [-v | -q] [-s <strategy>] <notes-ref>"),
- N_("git notes merge --commit [-v | -q]"),
- N_("git notes merge --abort [-v | -q]"),
+ "git notes merge --commit [-v | -q]",
+ "git notes merge --abort [-v | -q]",
N_("git notes [--ref <notes-ref>] remove [<object>...]"),
N_("git notes [--ref <notes-ref>] prune [-n] [-v]"),
N_("git notes [--ref <notes-ref>] get-ref"),
@@ -89,7 +89,7 @@ static const char * const git_notes_prune_usage[] = {
};
static const char * const git_notes_get_ref_usage[] = {
- N_("git notes get-ref"),
+ "git notes get-ref",
NULL
};
@@ -199,7 +199,7 @@ static void prepare_note_data(const struct object_id *object, struct note_data *
static void write_note_data(struct note_data *d, struct object_id *oid)
{
- if (write_object_file(d->buf.buf, d->buf.len, blob_type, oid)) {
+ if (write_object_file(d->buf.buf, d->buf.len, OBJ_BLOB, oid)) {
int status = die_message(_("unable to write note object"));
if (d->edit_path)
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index ba2006f..014dcd4b 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -237,8 +237,6 @@ static unsigned long cache_max_small_delta_size = 1000;
static unsigned long window_memory_limit = 0;
-static struct list_objects_filter_options filter_options;
-
static struct string_list uri_protocols = STRING_LIST_INIT_NODUP;
enum missing_action {
@@ -1199,16 +1197,26 @@ static void write_pack_file(void)
display_progress(progress_state, written);
}
- /*
- * Did we write the wrong # entries in the header?
- * If so, rewrite it like in fast-import
- */
if (pack_to_stdout) {
- finalize_hashfile(f, hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE);
+ /*
+ * We never fsync when writing to stdout since we may
+ * not be writing to an actual pack file. For instance,
+ * the upload-pack code passes a pipe here. Calling
+ * fsync on a pipe results in unnecessary
+ * synchronization with the reader on some platforms.
+ */
+ finalize_hashfile(f, hash, FSYNC_COMPONENT_NONE,
+ CSUM_HASH_IN_STREAM | CSUM_CLOSE);
} else if (nr_written == nr_remaining) {
- finalize_hashfile(f, hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
+ finalize_hashfile(f, hash, FSYNC_COMPONENT_PACK,
+ CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
} else {
- int fd = finalize_hashfile(f, hash, 0);
+ /*
+ * If we wrote the wrong number of entries in the
+ * header, rewrite it like in fast-import.
+ */
+
+ int fd = finalize_hashfile(f, hash, FSYNC_COMPONENT_PACK, 0);
fixup_pack_header_footer(fd, hash, pack_tmp_name,
nr_written, hash, offset);
close(fd);
@@ -1802,7 +1810,7 @@ static void add_preferred_base(struct object_id *oid)
return;
data = read_object_with_reference(the_repository, oid,
- tree_type, &size, &tree_oid);
+ OBJ_TREE, &size, &tree_oid);
if (!data)
return;
@@ -3504,7 +3512,7 @@ static int option_parse_missing_action(const struct option *opt,
return 0;
}
- die(_("invalid value for --missing"));
+ die(_("invalid value for '%s': '%s'"), "--missing", arg);
return 0;
}
@@ -3651,7 +3659,7 @@ static int pack_options_allow_reuse(void)
static int get_object_list_from_bitmap(struct rev_info *revs)
{
- if (!(bitmap_git = prepare_bitmap_walk(revs, &filter_options, 0)))
+ if (!(bitmap_git = prepare_bitmap_walk(revs, 0)))
return -1;
if (pack_options_allow_reuse() &&
@@ -3714,9 +3722,8 @@ static void mark_bitmap_preferred_tips(void)
}
}
-static void get_object_list(int ac, const char **av)
+static void get_object_list(struct rev_info *revs, int ac, const char **av)
{
- struct rev_info revs;
struct setup_revision_opt s_r_opt = {
.allow_exclude_promisor_objects = 1,
};
@@ -3724,9 +3731,8 @@ static void get_object_list(int ac, const char **av)
int flags = 0;
int save_warning;
- repo_init_revisions(the_repository, &revs, NULL);
save_commit_buffer = 0;
- setup_revisions(ac, av, &revs, &s_r_opt);
+ setup_revisions(ac, av, revs, &s_r_opt);
/* make sure shallows are read */
is_repository_shallow(the_repository);
@@ -3756,13 +3762,13 @@ static void get_object_list(int ac, const char **av)
}
die(_("not a rev '%s'"), line);
}
- if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))
+ if (handle_revision_arg(line, revs, flags, REVARG_CANNOT_BE_FILENAME))
die(_("bad revision '%s'"), line);
}
warn_on_object_refname_ambiguity = save_warning;
- if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
+ if (use_bitmap_index && !get_object_list_from_bitmap(revs))
return;
if (use_delta_islands)
@@ -3771,24 +3777,24 @@ static void get_object_list(int ac, const char **av)
if (write_bitmap_index)
mark_bitmap_preferred_tips();
- if (prepare_revision_walk(&revs))
+ if (prepare_revision_walk(revs))
die(_("revision walk setup failed"));
- mark_edges_uninteresting(&revs, show_edge, sparse);
+ mark_edges_uninteresting(revs, show_edge, sparse);
if (!fn_show_object)
fn_show_object = show_object;
- traverse_commit_list_filtered(&filter_options, &revs,
- show_commit, fn_show_object, NULL,
- NULL);
+ traverse_commit_list(revs,
+ show_commit, fn_show_object,
+ NULL);
if (unpack_unreachable_expiration) {
- revs.ignore_missing_links = 1;
- if (add_unseen_recent_objects_to_traversal(&revs,
+ revs->ignore_missing_links = 1;
+ if (add_unseen_recent_objects_to_traversal(revs,
unpack_unreachable_expiration))
die(_("unable to add recent objects"));
- if (prepare_revision_walk(&revs))
+ if (prepare_revision_walk(revs))
die(_("revision walk setup failed"));
- traverse_commit_list(&revs, record_recent_commit,
+ traverse_commit_list(revs, record_recent_commit,
record_recent_object, NULL);
}
@@ -3861,6 +3867,21 @@ static int option_parse_unpack_unreachable(const struct option *opt,
return 0;
}
+struct po_filter_data {
+ unsigned have_revs:1;
+ struct rev_info revs;
+};
+
+static struct list_objects_filter_options *po_filter_revs_init(void *value)
+{
+ struct po_filter_data *data = value;
+
+ repo_init_revisions(the_repository, &data->revs, NULL);
+ data->have_revs = 1;
+
+ return &data->revs.filter;
+}
+
int cmd_pack_objects(int argc, const char **argv, const char *prefix)
{
int use_internal_rev_list = 0;
@@ -3871,6 +3892,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
int rev_list_index = 0;
int stdin_packs = 0;
struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
+ struct po_filter_data pfd = { .have_revs = 0 };
+
struct option pack_objects_options[] = {
OPT_SET_INT('q', "quiet", &progress,
N_("do not show progress meter"), 0),
@@ -3956,7 +3979,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
&write_bitmap_index,
N_("write a bitmap index if possible"),
WRITE_BITMAP_QUIET, PARSE_OPT_HIDDEN),
- OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
+ OPT_PARSE_LIST_OBJECTS_FILTER_INIT(&pfd, po_filter_revs_init),
OPT_CALLBACK_F(0, "missing", NULL, N_("action"),
N_("handling for missing objects"), PARSE_OPT_NONEG,
option_parse_missing_action),
@@ -3976,9 +3999,11 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
read_replace_refs = 0;
sparse = git_env_bool("GIT_TEST_PACK_SPARSE", -1);
- prepare_repo_settings(the_repository);
- if (sparse < 0)
- sparse = the_repository->settings.pack_use_sparse;
+ if (the_repository->gitdir) {
+ prepare_repo_settings(the_repository);
+ if (sparse < 0)
+ sparse = the_repository->settings.pack_use_sparse;
+ }
reset_pack_idx_option(&pack_idx_opts);
git_config(git_pack_config, NULL);
@@ -4074,7 +4099,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (!rev_list_all || !rev_list_reflog || !rev_list_index)
unpack_unreachable_expiration = 0;
- if (filter_options.choice) {
+ if (pfd.have_revs && pfd.revs.filter.choice) {
if (!pack_to_stdout)
die(_("cannot use --filter without --stdout"));
if (stdin_packs)
@@ -4150,8 +4175,13 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
add_unreachable_loose_objects();
} else if (!use_internal_rev_list) {
read_object_list_from_stdin();
+ } else if (pfd.have_revs) {
+ get_object_list(&pfd.revs, rp.nr, rp.v);
} else {
- get_object_list(rp.nr, rp.v);
+ struct rev_info revs;
+
+ repo_init_revisions(the_repository, &revs, NULL);
+ get_object_list(&revs, rp.nr, rp.v);
}
cleanup_preferred_base();
if (include_tag && nr_result)
diff --git a/builtin/patch-id.c b/builtin/patch-id.c
index 822ffff..881fcf3 100644
--- a/builtin/patch-id.c
+++ b/builtin/patch-id.c
@@ -32,8 +32,12 @@ static int scan_hunk_header(const char *p, int *p_before, int *p_after)
n = strspn(q, digits);
if (q[n] == ',') {
q += n + 1;
+ *p_before = atoi(q);
n = strspn(q, digits);
+ } else {
+ *p_before = 1;
}
+
if (n == 0 || q[n] != ' ' || q[n+1] != '+')
return 0;
@@ -41,13 +45,14 @@ static int scan_hunk_header(const char *p, int *p_before, int *p_after)
n = strspn(r, digits);
if (r[n] == ',') {
r += n + 1;
+ *p_after = atoi(r);
n = strspn(r, digits);
+ } else {
+ *p_after = 1;
}
if (n == 0)
return 0;
- *p_before = atoi(q);
- *p_after = atoi(r);
return 1;
}
diff --git a/builtin/prune-packed.c b/builtin/prune-packed.c
index b7b9281..da3273a 100644
--- a/builtin/prune-packed.c
+++ b/builtin/prune-packed.c
@@ -3,7 +3,7 @@
#include "prune-packed.h"
static const char * const prune_packed_usage[] = {
- N_("git prune-packed [-n | --dry-run] [-q | --quiet]"),
+ "git prune-packed [-n | --dry-run] [-q | --quiet]",
NULL
};
diff --git a/builtin/pull.c b/builtin/pull.c
index 100cbf9..4d667ab 100644
--- a/builtin/pull.c
+++ b/builtin/pull.c
@@ -42,9 +42,9 @@ static enum rebase_type parse_config_rebase(const char *key, const char *value,
return v;
if (fatal)
- die(_("Invalid value for %s: %s"), key, value);
+ die(_("invalid value for '%s': '%s'"), key, value);
else
- error(_("Invalid value for %s: %s"), key, value);
+ error(_("invalid value for '%s': '%s'"), key, value);
return REBASE_INVALID;
}
@@ -318,7 +318,7 @@ static const char *config_get_ff(void)
if (!strcmp(value, "only"))
return "--ff-only";
- die(_("Invalid value for pull.ff: %s"), value);
+ die(_("invalid value for '%s': '%s'"), "pull.ff", value);
}
/**
@@ -994,8 +994,10 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
set_reflog_message(argc, argv);
git_config(git_pull_config, NULL);
- prepare_repo_settings(the_repository);
- the_repository->settings.command_requires_full_index = 0;
+ if (the_repository->gitdir) {
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+ }
argc = parse_options(argc, argv, prefix, pull_options, pull_usage, 0);
@@ -1038,14 +1040,13 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
oidclr(&orig_head);
if (opt_rebase) {
- int autostash = config_autostash;
- if (opt_autostash != -1)
- autostash = opt_autostash;
+ if (opt_autostash == -1)
+ opt_autostash = config_autostash;
if (is_null_oid(&orig_head) && !is_cache_unborn())
die(_("Updating an unborn branch with changes added to the index."));
- if (!autostash)
+ if (!opt_autostash)
require_clean_work_tree(the_repository,
N_("pull with rebase"),
_("please commit or stash them."), 1, 0);
diff --git a/builtin/push.c b/builtin/push.c
index 359db90..cad9979 100644
--- a/builtin/push.c
+++ b/builtin/push.c
@@ -486,7 +486,7 @@ static int git_push_config(const char *k, const char *v, void *cb)
if (value && !strcasecmp(value, "if-asked"))
set_push_cert_flags(flags, SEND_PACK_PUSH_CERT_IF_ASKED);
else
- return error("Invalid value for '%s'", k);
+ return error(_("invalid value for '%s'"), k);
}
}
} else if (!strcmp(k, "push.recursesubmodules")) {
diff --git a/builtin/read-tree.c b/builtin/read-tree.c
index 2109c4c..9f1f33e 100644
--- a/builtin/read-tree.c
+++ b/builtin/read-tree.c
@@ -160,15 +160,22 @@ int cmd_read_tree(int argc, const char **argv, const char *cmd_prefix)
argc = parse_options(argc, argv, cmd_prefix, read_tree_options,
read_tree_usage, 0);
- hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
-
prefix_set = opts.prefix ? 1 : 0;
if (1 < opts.merge + opts.reset + prefix_set)
die("Which one? -m, --reset, or --prefix?");
+ /* Prefix should not start with a directory separator */
+ if (opts.prefix && opts.prefix[0] == '/')
+ die("Invalid prefix, prefix cannot start with '/'");
+
if (opts.reset)
opts.reset = UNPACK_RESET_OVERWRITE_UNTRACKED;
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
+ hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
+
/*
* NEEDSWORK
*
@@ -210,6 +217,9 @@ int cmd_read_tree(int argc, const char **argv, const char *cmd_prefix)
if (opts.merge && !opts.index_only)
setup_work_tree();
+ if (opts.skip_sparse_checkout)
+ ensure_full_index(&the_index);
+
if (opts.merge) {
switch (stage - 1) {
case 0:
diff --git a/builtin/rebase.c b/builtin/rebase.c
index 36490d0..27fde7b 100644
--- a/builtin/rebase.c
+++ b/builtin/rebase.c
@@ -28,6 +28,7 @@
#include "sequencer.h"
#include "rebase-interactive.h"
#include "reset.h"
+#include "hook.h"
#define DEFAULT_REFLOG_ACTION "rebase"
@@ -36,7 +37,7 @@ static char const * const builtin_rebase_usage[] = {
"[--onto <newbase> | --keep-base] [<upstream> [<branch>]]"),
N_("git rebase [-i] [options] [--exec <cmd>] [--onto <newbase>] "
"--root [<branch>]"),
- N_("git rebase --continue | --abort | --skip | --edit-todo"),
+ "git rebase --continue | --abort | --skip | --edit-todo",
NULL
};
@@ -570,7 +571,8 @@ static int finish_rebase(struct rebase_options *opts)
static int move_to_original_branch(struct rebase_options *opts)
{
- struct strbuf orig_head_reflog = STRBUF_INIT, head_reflog = STRBUF_INIT;
+ struct strbuf branch_reflog = STRBUF_INIT, head_reflog = STRBUF_INIT;
+ struct reset_head_opts ropts = { 0 };
int ret;
if (!opts->head_name)
@@ -579,16 +581,17 @@ static int move_to_original_branch(struct rebase_options *opts)
if (!opts->onto)
BUG("move_to_original_branch without onto");
- strbuf_addf(&orig_head_reflog, "rebase finished: %s onto %s",
+ strbuf_addf(&branch_reflog, "rebase finished: %s onto %s",
opts->head_name, oid_to_hex(&opts->onto->object.oid));
strbuf_addf(&head_reflog, "rebase finished: returning to %s",
opts->head_name);
- ret = reset_head(the_repository, NULL, "", opts->head_name,
- RESET_HEAD_REFS_ONLY,
- orig_head_reflog.buf, head_reflog.buf,
- DEFAULT_REFLOG_ACTION);
+ ropts.branch = opts->head_name;
+ ropts.flags = RESET_HEAD_REFS_ONLY;
+ ropts.branch_msg = branch_reflog.buf;
+ ropts.head_msg = head_reflog.buf;
+ ret = reset_head(the_repository, &ropts);
- strbuf_release(&orig_head_reflog);
+ strbuf_release(&branch_reflog);
strbuf_release(&head_reflog);
return ret;
}
@@ -670,13 +673,15 @@ static int run_am(struct rebase_options *opts)
status = run_command(&format_patch);
if (status) {
+ struct reset_head_opts ropts = { 0 };
unlink(rebased_patches);
free(rebased_patches);
strvec_clear(&am.args);
- reset_head(the_repository, &opts->orig_head, "checkout",
- opts->head_name, 0,
- "HEAD", NULL, DEFAULT_REFLOG_ACTION);
+ ropts.oid = &opts->orig_head;
+ ropts.branch = opts->head_name;
+ ropts.default_reflog_action = DEFAULT_REFLOG_ACTION;
+ reset_head(the_repository, &ropts);
error(_("\ngit encountered an error while preparing the "
"patches to replay\n"
"these revisions:\n"
@@ -812,6 +817,28 @@ static int rebase_config(const char *var, const char *value, void *data)
return git_default_config(var, value, data);
}
+static int checkout_up_to_date(struct rebase_options *options)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct reset_head_opts ropts = { 0 };
+ int ret = 0;
+
+ strbuf_addf(&buf, "%s: checkout %s",
+ getenv(GIT_REFLOG_ACTION_ENVIRONMENT),
+ options->switch_to);
+ ropts.oid = &options->orig_head;
+ ropts.branch = options->head_name;
+ ropts.flags = RESET_HEAD_RUN_POST_CHECKOUT_HOOK;
+ if (!ropts.branch)
+ ropts.flags |= RESET_HEAD_DETACH;
+ ropts.head_msg = buf.buf;
+ if (reset_head(the_repository, &ropts) < 0)
+ ret = error(_("could not switch to %s"), options->switch_to);
+ strbuf_release(&buf);
+
+ return ret;
+}
+
/*
* Determines whether the commits in from..to are linear, i.e. contain
* no merge commits. This function *expects* `from` to be an ancestor of
@@ -1017,6 +1044,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
int reschedule_failed_exec = -1;
int allow_preemptive_ff = 1;
int preserve_merges_selected = 0;
+ struct reset_head_opts ropts = { 0 };
struct option builtin_rebase_options[] = {
OPT_STRING(0, "onto", &options.onto_name,
N_("revision"),
@@ -1254,9 +1282,8 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
rerere_clear(the_repository, &merge_rr);
string_list_clear(&merge_rr, 1);
-
- if (reset_head(the_repository, NULL, "reset", NULL, RESET_HEAD_HARD,
- NULL, NULL, DEFAULT_REFLOG_ACTION) < 0)
+ ropts.flags = RESET_HEAD_HARD;
+ if (reset_head(the_repository, &ropts) < 0)
die(_("could not discard worktree changes"));
remove_branch_state(the_repository, 0);
if (read_basic_state(&options))
@@ -1273,9 +1300,11 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
if (read_basic_state(&options))
exit(1);
- if (reset_head(the_repository, &options.orig_head, "reset",
- options.head_name, RESET_HEAD_HARD,
- NULL, NULL, DEFAULT_REFLOG_ACTION) < 0)
+ ropts.oid = &options.orig_head;
+ ropts.branch = options.head_name;
+ ropts.flags = RESET_HEAD_HARD;
+ ropts.default_reflog_action = DEFAULT_REFLOG_ACTION;
+ if (reset_head(the_repository, &ropts) < 0)
die(_("could not move back to %s"),
oid_to_hex(&options.orig_head));
remove_branch_state(the_repository, 0);
@@ -1641,10 +1670,10 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
if (repo_read_index(the_repository) < 0)
die(_("could not read index"));
- if (options.autostash) {
- create_autostash(the_repository, state_dir_path("autostash", &options),
- DEFAULT_REFLOG_ACTION);
- }
+ if (options.autostash)
+ create_autostash(the_repository,
+ state_dir_path("autostash", &options));
+
if (require_clean_work_tree(the_repository, "rebase",
_("Please commit or stash them."), 1, 1)) {
@@ -1673,21 +1702,9 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
if (!(options.flags & REBASE_FORCE)) {
/* Lazily switch to the target branch if needed... */
if (options.switch_to) {
- strbuf_reset(&buf);
- strbuf_addf(&buf, "%s: checkout %s",
- getenv(GIT_REFLOG_ACTION_ENVIRONMENT),
- options.switch_to);
- if (reset_head(the_repository,
- &options.orig_head, "checkout",
- options.head_name,
- RESET_HEAD_RUN_POST_CHECKOUT_HOOK,
- NULL, buf.buf,
- DEFAULT_REFLOG_ACTION) < 0) {
- ret = error(_("could not switch to "
- "%s"),
- options.switch_to);
+ ret = checkout_up_to_date(&options);
+ if (ret)
goto cleanup;
- }
}
if (!(options.flags & REBASE_NO_QUIET))
@@ -1712,7 +1729,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
/* If a hook exists, give it a chance to interrupt*/
if (!ok_to_skip_pre_rebase &&
- run_hook_le(NULL, "pre-rebase", options.upstream_arg,
+ run_hooks_l("pre-rebase", options.upstream_arg,
argc ? argv[0] : NULL, NULL))
die(_("The pre-rebase hook refused to rebase."));
@@ -1754,10 +1771,13 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
strbuf_addf(&msg, "%s: checkout %s",
getenv(GIT_REFLOG_ACTION_ENVIRONMENT), options.onto_name);
- if (reset_head(the_repository, &options.onto->object.oid, "checkout", NULL,
- RESET_HEAD_DETACH | RESET_ORIG_HEAD |
- RESET_HEAD_RUN_POST_CHECKOUT_HOOK,
- NULL, msg.buf, DEFAULT_REFLOG_ACTION))
+ ropts.oid = &options.onto->object.oid;
+ ropts.orig_head = &options.orig_head,
+ ropts.flags = RESET_HEAD_DETACH | RESET_ORIG_HEAD |
+ RESET_HEAD_RUN_POST_CHECKOUT_HOOK;
+ ropts.head_msg = msg.buf;
+ ropts.default_reflog_action = DEFAULT_REFLOG_ACTION;
+ if (reset_head(the_repository, &ropts))
die(_("Could not detach HEAD"));
strbuf_release(&msg);
@@ -1772,9 +1792,11 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
strbuf_addf(&msg, "rebase finished: %s onto %s",
options.head_name ? options.head_name : "detached HEAD",
oid_to_hex(&options.onto->object.oid));
- reset_head(the_repository, NULL, "Fast-forwarded", options.head_name,
- RESET_HEAD_REFS_ONLY, "HEAD", msg.buf,
- DEFAULT_REFLOG_ACTION);
+ memset(&ropts, 0, sizeof(ropts));
+ ropts.branch = options.head_name;
+ ropts.flags = RESET_HEAD_REFS_ONLY;
+ ropts.head_msg = msg.buf;
+ reset_head(the_repository, &ropts);
strbuf_release(&msg);
ret = finish_rebase(&options);
goto cleanup;
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
index 9f4a0b8..9aabffa 100644
--- a/builtin/receive-pack.c
+++ b/builtin/receive-pack.c
@@ -581,32 +581,19 @@ static char *prepare_push_cert_nonce(const char *path, timestamp_t stamp)
return strbuf_detach(&buf, NULL);
}
-/*
- * NEEDSWORK: reuse find_commit_header() from jk/commit-author-parsing
- * after dropping "_commit" from its name and possibly moving it out
- * of commit.c
- */
static char *find_header(const char *msg, size_t len, const char *key,
const char **next_line)
{
- int key_len = strlen(key);
- const char *line = msg;
-
- while (line && line < msg + len) {
- const char *eol = strchrnul(line, '\n');
-
- if ((msg + len <= eol) || line == eol)
- return NULL;
- if (line + key_len < eol &&
- !memcmp(line, key, key_len) && line[key_len] == ' ') {
- int offset = key_len + 1;
- if (next_line)
- *next_line = *eol ? eol + 1 : eol;
- return xmemdupz(line + offset, (eol - line) - offset);
- }
- line = *eol ? eol + 1 : NULL;
- }
- return NULL;
+ size_t out_len;
+ const char *val = find_header_mem(msg, len, key, &out_len);
+
+ if (!val)
+ return NULL;
+
+ if (next_line)
+ *next_line = val + out_len + 1;
+
+ return xmemdupz(val, out_len);
}
/*
@@ -762,7 +749,7 @@ static void prepare_push_cert_sha1(struct child_process *proc)
int bogs /* beginning_of_gpg_sig */;
already_done = 1;
- if (write_object_file(push_cert.buf, push_cert.len, "blob",
+ if (write_object_file(push_cert.buf, push_cert.len, OBJ_BLOB,
&push_cert_oid))
oidclr(&push_cert_oid);
@@ -826,13 +813,14 @@ static int run_and_feed_hook(const char *hook_name, feed_fn feed,
proc.trace2_hook_name = hook_name;
if (feed_state->push_options) {
- int i;
+ size_t i;
for (i = 0; i < feed_state->push_options->nr; i++)
strvec_pushf(&proc.env_array,
- "GIT_PUSH_OPTION_%d=%s", i,
+ "GIT_PUSH_OPTION_%"PRIuMAX"=%s",
+ (uintmax_t)i,
feed_state->push_options->items[i].string);
- strvec_pushf(&proc.env_array, "GIT_PUSH_OPTION_COUNT=%d",
- feed_state->push_options->nr);
+ strvec_pushf(&proc.env_array, "GIT_PUSH_OPTION_COUNT=%"PRIuMAX"",
+ (uintmax_t)feed_state->push_options->nr);
} else
strvec_pushf(&proc.env_array, "GIT_PUSH_OPTION_COUNT");
@@ -1421,12 +1409,17 @@ static const char *push_to_deploy(unsigned char *sha1,
static const char *push_to_checkout_hook = "push-to-checkout";
static const char *push_to_checkout(unsigned char *hash,
+ int *invoked_hook,
struct strvec *env,
const char *work_tree)
{
+ struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
+ opt.invoked_hook = invoked_hook;
+
strvec_pushf(env, "GIT_WORK_TREE=%s", absolute_path(work_tree));
- if (run_hook_le(env->v, push_to_checkout_hook,
- hash_to_hex(hash), NULL))
+ strvec_pushv(&opt.env, env->v);
+ strvec_push(&opt.args, hash_to_hex(hash));
+ if (run_hooks_opt(push_to_checkout_hook, &opt))
return "push-to-checkout hook declined";
else
return NULL;
@@ -1436,6 +1429,7 @@ static const char *update_worktree(unsigned char *sha1, const struct worktree *w
{
const char *retval, *git_dir;
struct strvec env = STRVEC_INIT;
+ int invoked_hook;
if (!worktree || !worktree->path)
BUG("worktree->path must be non-NULL");
@@ -1446,10 +1440,9 @@ static const char *update_worktree(unsigned char *sha1, const struct worktree *w
strvec_pushf(&env, "GIT_DIR=%s", absolute_path(git_dir));
- if (!hook_exists(push_to_checkout_hook))
+ retval = push_to_checkout(sha1, &invoked_hook, &env, worktree->path);
+ if (!invoked_hook)
retval = push_to_deploy(sha1, &env, worktree->path);
- else
- retval = push_to_checkout(sha1, &env, worktree->path);
strvec_clear(&env);
return retval;
@@ -1972,6 +1965,15 @@ static void execute_commands(struct command *commands,
}
/*
+ * If there is no command ready to run, should return directly to destroy
+ * temporary data in the quarantine area.
+ */
+ for (cmd = commands; cmd && cmd->error_string; cmd = cmd->next)
+ ; /* nothing */
+ if (!cmd)
+ return;
+
+ /*
* Now we'll start writing out refs, which means the objects need
* to be in their final positions so that other processes can see them.
*/
diff --git a/builtin/reflog.c b/builtin/reflog.c
index a4b1dd2..c943c2a 100644
--- a/builtin/reflog.c
+++ b/builtin/reflog.c
@@ -1,428 +1,60 @@
#include "builtin.h"
#include "config.h"
-#include "lockfile.h"
-#include "object-store.h"
-#include "repository.h"
-#include "commit.h"
-#include "refs.h"
-#include "dir.h"
-#include "tree-walk.h"
-#include "diff.h"
#include "revision.h"
#include "reachable.h"
#include "worktree.h"
+#include "reflog.h"
-/* NEEDSWORK: switch to using parse_options */
-static const char reflog_expire_usage[] =
-N_("git reflog expire [--expire=<time>] "
- "[--expire-unreachable=<time>] "
- "[--rewrite] [--updateref] [--stale-fix] [--dry-run | -n] "
- "[--verbose] [--all] <refs>...");
-static const char reflog_delete_usage[] =
-N_("git reflog delete [--rewrite] [--updateref] "
- "[--dry-run | -n] [--verbose] <refs>...");
-static const char reflog_exists_usage[] =
-N_("git reflog exists <ref>");
+#define BUILTIN_REFLOG_SHOW_USAGE \
+ N_("git reflog [show] [<log-options>] [<ref>]")
-static timestamp_t default_reflog_expire;
-static timestamp_t default_reflog_expire_unreachable;
+#define BUILTIN_REFLOG_EXPIRE_USAGE \
+ N_("git reflog expire [--expire=<time>] [--expire-unreachable=<time>]\n" \
+ " [--rewrite] [--updateref] [--stale-fix]\n" \
+ " [--dry-run | -n] [--verbose] [--all [--single-worktree] | <refs>...]")
-struct cmd_reflog_expire_cb {
- int stalefix;
- timestamp_t expire_total;
- timestamp_t expire_unreachable;
- int recno;
-};
+#define BUILTIN_REFLOG_DELETE_USAGE \
+ N_("git reflog delete [--rewrite] [--updateref]\n" \
+ " [--dry-run | -n] [--verbose] <ref>@{<specifier>}...")
-struct expire_reflog_policy_cb {
- enum {
- UE_NORMAL,
- UE_ALWAYS,
- UE_HEAD
- } unreachable_expire_kind;
- struct commit_list *mark_list;
- unsigned long mark_limit;
- struct cmd_reflog_expire_cb cmd;
- struct commit *tip_commit;
- struct commit_list *tips;
- unsigned int dry_run:1;
-};
+#define BUILTIN_REFLOG_EXISTS_USAGE \
+ N_("git reflog exists <ref>")
-struct worktree_reflogs {
- struct worktree *worktree;
- struct string_list reflogs;
+static const char *const reflog_show_usage[] = {
+ BUILTIN_REFLOG_SHOW_USAGE,
+ NULL,
};
-/* Remember to update object flag allocation in object.h */
-#define INCOMPLETE (1u<<10)
-#define STUDYING (1u<<11)
-#define REACHABLE (1u<<12)
-
-static int tree_is_complete(const struct object_id *oid)
-{
- struct tree_desc desc;
- struct name_entry entry;
- int complete;
- struct tree *tree;
-
- tree = lookup_tree(the_repository, oid);
- if (!tree)
- return 0;
- if (tree->object.flags & SEEN)
- return 1;
- if (tree->object.flags & INCOMPLETE)
- return 0;
-
- if (!tree->buffer) {
- enum object_type type;
- unsigned long size;
- void *data = read_object_file(oid, &type, &size);
- if (!data) {
- tree->object.flags |= INCOMPLETE;
- return 0;
- }
- tree->buffer = data;
- tree->size = size;
- }
- init_tree_desc(&desc, tree->buffer, tree->size);
- complete = 1;
- while (tree_entry(&desc, &entry)) {
- if (!has_object_file(&entry.oid) ||
- (S_ISDIR(entry.mode) && !tree_is_complete(&entry.oid))) {
- tree->object.flags |= INCOMPLETE;
- complete = 0;
- }
- }
- free_tree_buffer(tree);
-
- if (complete)
- tree->object.flags |= SEEN;
- return complete;
-}
-
-static int commit_is_complete(struct commit *commit)
-{
- struct object_array study;
- struct object_array found;
- int is_incomplete = 0;
- int i;
-
- /* early return */
- if (commit->object.flags & SEEN)
- return 1;
- if (commit->object.flags & INCOMPLETE)
- return 0;
- /*
- * Find all commits that are reachable and are not marked as
- * SEEN. Then make sure the trees and blobs contained are
- * complete. After that, mark these commits also as SEEN.
- * If some of the objects that are needed to complete this
- * commit are missing, mark this commit as INCOMPLETE.
- */
- memset(&study, 0, sizeof(study));
- memset(&found, 0, sizeof(found));
- add_object_array(&commit->object, NULL, &study);
- add_object_array(&commit->object, NULL, &found);
- commit->object.flags |= STUDYING;
- while (study.nr) {
- struct commit *c;
- struct commit_list *parent;
-
- c = (struct commit *)object_array_pop(&study);
- if (!c->object.parsed && !parse_object(the_repository, &c->object.oid))
- c->object.flags |= INCOMPLETE;
-
- if (c->object.flags & INCOMPLETE) {
- is_incomplete = 1;
- break;
- }
- else if (c->object.flags & SEEN)
- continue;
- for (parent = c->parents; parent; parent = parent->next) {
- struct commit *p = parent->item;
- if (p->object.flags & STUDYING)
- continue;
- p->object.flags |= STUDYING;
- add_object_array(&p->object, NULL, &study);
- add_object_array(&p->object, NULL, &found);
- }
- }
- if (!is_incomplete) {
- /*
- * make sure all commits in "found" array have all the
- * necessary objects.
- */
- for (i = 0; i < found.nr; i++) {
- struct commit *c =
- (struct commit *)found.objects[i].item;
- if (!tree_is_complete(get_commit_tree_oid(c))) {
- is_incomplete = 1;
- c->object.flags |= INCOMPLETE;
- }
- }
- if (!is_incomplete) {
- /* mark all found commits as complete, iow SEEN */
- for (i = 0; i < found.nr; i++)
- found.objects[i].item->flags |= SEEN;
- }
- }
- /* clear flags from the objects we traversed */
- for (i = 0; i < found.nr; i++)
- found.objects[i].item->flags &= ~STUDYING;
- if (is_incomplete)
- commit->object.flags |= INCOMPLETE;
- else {
- /*
- * If we come here, we have (1) traversed the ancestry chain
- * from the "commit" until we reach SEEN commits (which are
- * known to be complete), and (2) made sure that the commits
- * encountered during the above traversal refer to trees that
- * are complete. Which means that we know *all* the commits
- * we have seen during this process are complete.
- */
- for (i = 0; i < found.nr; i++)
- found.objects[i].item->flags |= SEEN;
- }
- /* free object arrays */
- object_array_clear(&study);
- object_array_clear(&found);
- return !is_incomplete;
-}
-
-static int keep_entry(struct commit **it, struct object_id *oid)
-{
- struct commit *commit;
-
- if (is_null_oid(oid))
- return 1;
- commit = lookup_commit_reference_gently(the_repository, oid, 1);
- if (!commit)
- return 0;
-
- /*
- * Make sure everything in this commit exists.
- *
- * We have walked all the objects reachable from the refs
- * and cache earlier. The commits reachable by this commit
- * must meet SEEN commits -- and then we should mark them as
- * SEEN as well.
- */
- if (!commit_is_complete(commit))
- return 0;
- *it = commit;
- return 1;
-}
-
-/*
- * Starting from commits in the cb->mark_list, mark commits that are
- * reachable from them. Stop the traversal at commits older than
- * the expire_limit and queue them back, so that the caller can call
- * us again to restart the traversal with longer expire_limit.
- */
-static void mark_reachable(struct expire_reflog_policy_cb *cb)
-{
- struct commit_list *pending;
- timestamp_t expire_limit = cb->mark_limit;
- struct commit_list *leftover = NULL;
-
- for (pending = cb->mark_list; pending; pending = pending->next)
- pending->item->object.flags &= ~REACHABLE;
-
- pending = cb->mark_list;
- while (pending) {
- struct commit_list *parent;
- struct commit *commit = pop_commit(&pending);
- if (commit->object.flags & REACHABLE)
- continue;
- if (parse_commit(commit))
- continue;
- commit->object.flags |= REACHABLE;
- if (commit->date < expire_limit) {
- commit_list_insert(commit, &leftover);
- continue;
- }
- commit->object.flags |= REACHABLE;
- parent = commit->parents;
- while (parent) {
- commit = parent->item;
- parent = parent->next;
- if (commit->object.flags & REACHABLE)
- continue;
- commit_list_insert(commit, &pending);
- }
- }
- cb->mark_list = leftover;
-}
-
-static int unreachable(struct expire_reflog_policy_cb *cb, struct commit *commit, struct object_id *oid)
-{
- /*
- * We may or may not have the commit yet - if not, look it
- * up using the supplied sha1.
- */
- if (!commit) {
- if (is_null_oid(oid))
- return 0;
-
- commit = lookup_commit_reference_gently(the_repository, oid,
- 1);
-
- /* Not a commit -- keep it */
- if (!commit)
- return 0;
- }
-
- /* Reachable from the current ref? Don't prune. */
- if (commit->object.flags & REACHABLE)
- return 0;
-
- if (cb->mark_list && cb->mark_limit) {
- cb->mark_limit = 0; /* dig down to the root */
- mark_reachable(cb);
- }
-
- return !(commit->object.flags & REACHABLE);
-}
-
-/*
- * Return true iff the specified reflog entry should be expired.
- */
-static int should_expire_reflog_ent(struct object_id *ooid, struct object_id *noid,
- const char *email, timestamp_t timestamp, int tz,
- const char *message, void *cb_data)
-{
- struct expire_reflog_policy_cb *cb = cb_data;
- struct commit *old_commit, *new_commit;
-
- if (timestamp < cb->cmd.expire_total)
- return 1;
-
- old_commit = new_commit = NULL;
- if (cb->cmd.stalefix &&
- (!keep_entry(&old_commit, ooid) || !keep_entry(&new_commit, noid)))
- return 1;
-
- if (timestamp < cb->cmd.expire_unreachable) {
- switch (cb->unreachable_expire_kind) {
- case UE_ALWAYS:
- return 1;
- case UE_NORMAL:
- case UE_HEAD:
- if (unreachable(cb, old_commit, ooid) || unreachable(cb, new_commit, noid))
- return 1;
- break;
- }
- }
-
- if (cb->cmd.recno && --(cb->cmd.recno) == 0)
- return 1;
-
- return 0;
-}
-
-static int should_expire_reflog_ent_verbose(struct object_id *ooid,
- struct object_id *noid,
- const char *email,
- timestamp_t timestamp, int tz,
- const char *message, void *cb_data)
-{
- struct expire_reflog_policy_cb *cb = cb_data;
- int expire;
-
- expire = should_expire_reflog_ent(ooid, noid, email, timestamp, tz,
- message, cb);
-
- if (!expire)
- printf("keep %s", message);
- else if (cb->dry_run)
- printf("would prune %s", message);
- else
- printf("prune %s", message);
-
- return expire;
-}
-
-static int push_tip_to_list(const char *refname, const struct object_id *oid,
- int flags, void *cb_data)
-{
- struct commit_list **list = cb_data;
- struct commit *tip_commit;
- if (flags & REF_ISSYMREF)
- return 0;
- tip_commit = lookup_commit_reference_gently(the_repository, oid, 1);
- if (!tip_commit)
- return 0;
- commit_list_insert(tip_commit, list);
- return 0;
-}
-
-static int is_head(const char *refname)
-{
- switch (ref_type(refname)) {
- case REF_TYPE_OTHER_PSEUDOREF:
- case REF_TYPE_MAIN_PSEUDOREF:
- if (parse_worktree_ref(refname, NULL, NULL, &refname))
- BUG("not a worktree ref: %s", refname);
- break;
- default:
- break;
- }
- return !strcmp(refname, "HEAD");
-}
+static const char *const reflog_expire_usage[] = {
+ BUILTIN_REFLOG_EXPIRE_USAGE,
+ NULL
+};
-static void reflog_expiry_prepare(const char *refname,
- const struct object_id *oid,
- void *cb_data)
-{
- struct expire_reflog_policy_cb *cb = cb_data;
- struct commit_list *elem;
- struct commit *commit = NULL;
-
- if (!cb->cmd.expire_unreachable || is_head(refname)) {
- cb->unreachable_expire_kind = UE_HEAD;
- } else {
- commit = lookup_commit(the_repository, oid);
- cb->unreachable_expire_kind = commit ? UE_NORMAL : UE_ALWAYS;
- }
+static const char *const reflog_delete_usage[] = {
+ BUILTIN_REFLOG_DELETE_USAGE,
+ NULL
+};
- if (cb->cmd.expire_unreachable <= cb->cmd.expire_total)
- cb->unreachable_expire_kind = UE_ALWAYS;
+static const char *const reflog_exists_usage[] = {
+ BUILTIN_REFLOG_EXISTS_USAGE,
+ NULL,
+};
- switch (cb->unreachable_expire_kind) {
- case UE_ALWAYS:
- return;
- case UE_HEAD:
- for_each_ref(push_tip_to_list, &cb->tips);
- for (elem = cb->tips; elem; elem = elem->next)
- commit_list_insert(elem->item, &cb->mark_list);
- break;
- case UE_NORMAL:
- commit_list_insert(commit, &cb->mark_list);
- /* For reflog_expiry_cleanup() below */
- cb->tip_commit = commit;
- }
- cb->mark_limit = cb->cmd.expire_total;
- mark_reachable(cb);
-}
+static const char *const reflog_usage[] = {
+ BUILTIN_REFLOG_SHOW_USAGE,
+ BUILTIN_REFLOG_EXPIRE_USAGE,
+ BUILTIN_REFLOG_DELETE_USAGE,
+ BUILTIN_REFLOG_EXISTS_USAGE,
+ NULL
+};
-static void reflog_expiry_cleanup(void *cb_data)
-{
- struct expire_reflog_policy_cb *cb = cb_data;
- struct commit_list *elem;
+static timestamp_t default_reflog_expire;
+static timestamp_t default_reflog_expire_unreachable;
- switch (cb->unreachable_expire_kind) {
- case UE_ALWAYS:
- return;
- case UE_HEAD:
- for (elem = cb->tips; elem; elem = elem->next)
- clear_commit_marks(elem->item, REACHABLE);
- free_commit_list(cb->tips);
- break;
- case UE_NORMAL:
- clear_commit_marks(cb->tip_commit, REACHABLE);
- break;
- }
-}
+struct worktree_reflogs {
+ struct worktree *worktree;
+ struct string_list reflogs;
+};
static int collect_reflog(const char *ref, const struct object_id *oid, int unused, void *cb_data)
{
@@ -520,18 +152,18 @@ static int reflog_expire_config(const char *var, const char *value, void *cb)
return 0;
}
-static void set_reflog_expiry_param(struct cmd_reflog_expire_cb *cb, int slot, const char *ref)
+static void set_reflog_expiry_param(struct cmd_reflog_expire_cb *cb, const char *ref)
{
struct reflog_expire_cfg *ent;
- if (slot == (EXPIRE_TOTAL|EXPIRE_UNREACH))
+ if (cb->explicit_expiry == (EXPIRE_TOTAL|EXPIRE_UNREACH))
return; /* both given explicitly -- nothing to tweak */
for (ent = reflog_expire_cfg; ent; ent = ent->next) {
if (!wildmatch(ent->pattern, ref, 0)) {
- if (!(slot & EXPIRE_TOTAL))
+ if (!(cb->explicit_expiry & EXPIRE_TOTAL))
cb->expire_total = ent->expire_total;
- if (!(slot & EXPIRE_UNREACH))
+ if (!(cb->explicit_expiry & EXPIRE_UNREACH))
cb->expire_unreachable = ent->expire_unreachable;
return;
}
@@ -541,29 +173,94 @@ static void set_reflog_expiry_param(struct cmd_reflog_expire_cb *cb, int slot, c
* If unconfigured, make stash never expire
*/
if (!strcmp(ref, "refs/stash")) {
- if (!(slot & EXPIRE_TOTAL))
+ if (!(cb->explicit_expiry & EXPIRE_TOTAL))
cb->expire_total = 0;
- if (!(slot & EXPIRE_UNREACH))
+ if (!(cb->explicit_expiry & EXPIRE_UNREACH))
cb->expire_unreachable = 0;
return;
}
/* Nothing matched -- use the default value */
- if (!(slot & EXPIRE_TOTAL))
+ if (!(cb->explicit_expiry & EXPIRE_TOTAL))
cb->expire_total = default_reflog_expire;
- if (!(slot & EXPIRE_UNREACH))
+ if (!(cb->explicit_expiry & EXPIRE_UNREACH))
cb->expire_unreachable = default_reflog_expire_unreachable;
}
+static int expire_unreachable_callback(const struct option *opt,
+ const char *arg,
+ int unset)
+{
+ struct cmd_reflog_expire_cb *cmd = opt->value;
+
+ if (parse_expiry_date(arg, &cmd->expire_unreachable))
+ die(_("invalid timestamp '%s' given to '--%s'"),
+ arg, opt->long_name);
+
+ cmd->explicit_expiry |= EXPIRE_UNREACH;
+ return 0;
+}
+
+static int expire_total_callback(const struct option *opt,
+ const char *arg,
+ int unset)
+{
+ struct cmd_reflog_expire_cb *cmd = opt->value;
+
+ if (parse_expiry_date(arg, &cmd->expire_total))
+ die(_("invalid timestamp '%s' given to '--%s'"),
+ arg, opt->long_name);
+
+ cmd->explicit_expiry |= EXPIRE_TOTAL;
+ return 0;
+}
+
+static int cmd_reflog_show(int argc, const char **argv, const char *prefix)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+
+ parse_options(argc, argv, prefix, options, reflog_show_usage,
+ PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0 |
+ PARSE_OPT_KEEP_UNKNOWN);
+
+ return cmd_log_reflog(argc, argv, prefix);
+}
+
static int cmd_reflog_expire(int argc, const char **argv, const char *prefix)
{
struct cmd_reflog_expire_cb cmd = { 0 };
timestamp_t now = time(NULL);
int i, status, do_all, all_worktrees = 1;
- int explicit_expiry = 0;
unsigned int flags = 0;
int verbose = 0;
reflog_expiry_should_prune_fn *should_prune_fn = should_expire_reflog_ent;
+ const struct option options[] = {
+ OPT_BIT(0, "dry-run", &flags, N_("do not actually prune any entries"),
+ EXPIRE_REFLOGS_DRY_RUN),
+ OPT_BIT(0, "rewrite", &flags,
+ N_("rewrite the old SHA1 with the new SHA1 of the entry that now precedes it"),
+ EXPIRE_REFLOGS_REWRITE),
+ OPT_BIT(0, "updateref", &flags,
+ N_("update the reference to the value of the top reflog entry"),
+ EXPIRE_REFLOGS_UPDATE_REF),
+ OPT_BOOL(0, "verbose", &verbose, N_("print extra information on screen")),
+ OPT_CALLBACK_F(0, "expire", &cmd, N_("timestamp"),
+ N_("prune entries older than the specified time"),
+ PARSE_OPT_NONEG,
+ expire_total_callback),
+ OPT_CALLBACK_F(0, "expire-unreachable", &cmd, N_("timestamp"),
+ N_("prune entries older than <time> that are not reachable from the current tip of the branch"),
+ PARSE_OPT_NONEG,
+ expire_unreachable_callback),
+ OPT_BOOL(0, "stale-fix", &cmd.stalefix,
+ N_("prune any reflog entries that point to broken commits")),
+ OPT_BOOL(0, "all", &do_all, N_("process the reflogs of all references")),
+ OPT_BOOL(1, "single-worktree", &all_worktrees,
+ N_("limits processing to reflogs from the current worktree only")),
+ OPT_END()
+ };
default_reflog_expire_unreachable = now - 30 * 24 * 3600;
default_reflog_expire = now - 90 * 24 * 3600;
@@ -572,45 +269,11 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix)
save_commit_buffer = 0;
do_all = status = 0;
+ cmd.explicit_expiry = 0;
cmd.expire_total = default_reflog_expire;
cmd.expire_unreachable = default_reflog_expire_unreachable;
- for (i = 1; i < argc; i++) {
- const char *arg = argv[i];
-
- if (!strcmp(arg, "--dry-run") || !strcmp(arg, "-n"))
- flags |= EXPIRE_REFLOGS_DRY_RUN;
- else if (skip_prefix(arg, "--expire=", &arg)) {
- if (parse_expiry_date(arg, &cmd.expire_total))
- die(_("'%s' is not a valid timestamp"), arg);
- explicit_expiry |= EXPIRE_TOTAL;
- }
- else if (skip_prefix(arg, "--expire-unreachable=", &arg)) {
- if (parse_expiry_date(arg, &cmd.expire_unreachable))
- die(_("'%s' is not a valid timestamp"), arg);
- explicit_expiry |= EXPIRE_UNREACH;
- }
- else if (!strcmp(arg, "--stale-fix"))
- cmd.stalefix = 1;
- else if (!strcmp(arg, "--rewrite"))
- flags |= EXPIRE_REFLOGS_REWRITE;
- else if (!strcmp(arg, "--updateref"))
- flags |= EXPIRE_REFLOGS_UPDATE_REF;
- else if (!strcmp(arg, "--all"))
- do_all = 1;
- else if (!strcmp(arg, "--single-worktree"))
- all_worktrees = 0;
- else if (!strcmp(arg, "--verbose"))
- verbose = 1;
- else if (!strcmp(arg, "--")) {
- i++;
- break;
- }
- else if (arg[0] == '-')
- usage(_(reflog_expire_usage));
- else
- break;
- }
+ argc = parse_options(argc, argv, prefix, options, reflog_expire_usage, 0);
if (verbose)
should_prune_fn = should_expire_reflog_ent_verbose;
@@ -657,7 +320,7 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix)
.dry_run = !!(flags & EXPIRE_REFLOGS_DRY_RUN),
};
- set_reflog_expiry_param(&cb.cmd, explicit_expiry, item->string);
+ set_reflog_expiry_param(&cb.cmd, item->string);
status |= reflog_expire(item->string, flags,
reflog_expiry_prepare,
should_prune_fn,
@@ -667,7 +330,7 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix)
string_list_clear(&collected.reflogs, 0);
}
- for (; i < argc; i++) {
+ for (i = 0; i < argc; i++) {
char *ref;
struct expire_reflog_policy_cb cb = { .cmd = cmd };
@@ -675,7 +338,7 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix)
status |= error(_("%s points nowhere!"), argv[i]);
continue;
}
- set_reflog_expiry_param(&cb.cmd, explicit_expiry, ref);
+ set_reflog_expiry_param(&cb.cmd, ref);
status |= reflog_expire(ref, flags,
reflog_expiry_prepare,
should_prune_fn,
@@ -686,142 +349,94 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix)
return status;
}
-static int count_reflog_ent(struct object_id *ooid, struct object_id *noid,
- const char *email, timestamp_t timestamp, int tz,
- const char *message, void *cb_data)
-{
- struct cmd_reflog_expire_cb *cb = cb_data;
- if (!cb->expire_total || timestamp < cb->expire_total)
- cb->recno++;
- return 0;
-}
-
static int cmd_reflog_delete(int argc, const char **argv, const char *prefix)
{
- struct cmd_reflog_expire_cb cmd = { 0 };
int i, status = 0;
unsigned int flags = 0;
int verbose = 0;
- reflog_expiry_should_prune_fn *should_prune_fn = should_expire_reflog_ent;
-
- for (i = 1; i < argc; i++) {
- const char *arg = argv[i];
- if (!strcmp(arg, "--dry-run") || !strcmp(arg, "-n"))
- flags |= EXPIRE_REFLOGS_DRY_RUN;
- else if (!strcmp(arg, "--rewrite"))
- flags |= EXPIRE_REFLOGS_REWRITE;
- else if (!strcmp(arg, "--updateref"))
- flags |= EXPIRE_REFLOGS_UPDATE_REF;
- else if (!strcmp(arg, "--verbose"))
- verbose = 1;
- else if (!strcmp(arg, "--")) {
- i++;
- break;
- }
- else if (arg[0] == '-')
- usage(_(reflog_delete_usage));
- else
- break;
- }
-
- if (verbose)
- should_prune_fn = should_expire_reflog_ent_verbose;
- if (argc - i < 1)
+ const struct option options[] = {
+ OPT_BIT(0, "dry-run", &flags, N_("do not actually prune any entries"),
+ EXPIRE_REFLOGS_DRY_RUN),
+ OPT_BIT(0, "rewrite", &flags,
+ N_("rewrite the old SHA1 with the new SHA1 of the entry that now precedes it"),
+ EXPIRE_REFLOGS_REWRITE),
+ OPT_BIT(0, "updateref", &flags,
+ N_("update the reference to the value of the top reflog entry"),
+ EXPIRE_REFLOGS_UPDATE_REF),
+ OPT_BOOL(0, "verbose", &verbose, N_("print extra information on screen")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options, reflog_delete_usage, 0);
+
+ if (argc < 1)
return error(_("no reflog specified to delete"));
- for ( ; i < argc; i++) {
- const char *spec = strstr(argv[i], "@{");
- char *ep, *ref;
- int recno;
- struct expire_reflog_policy_cb cb = {
- .dry_run = !!(flags & EXPIRE_REFLOGS_DRY_RUN),
- };
-
- if (!spec) {
- status |= error(_("not a reflog: %s"), argv[i]);
- continue;
- }
-
- if (!dwim_log(argv[i], spec - argv[i], NULL, &ref)) {
- status |= error(_("no reflog for '%s'"), argv[i]);
- continue;
- }
-
- recno = strtoul(spec + 2, &ep, 10);
- if (*ep == '}') {
- cmd.recno = -recno;
- for_each_reflog_ent(ref, count_reflog_ent, &cmd);
- } else {
- cmd.expire_total = approxidate(spec + 2);
- for_each_reflog_ent(ref, count_reflog_ent, &cmd);
- cmd.expire_total = 0;
- }
+ for (i = 0; i < argc; i++)
+ status |= reflog_delete(argv[i], flags, verbose);
- cb.cmd = cmd;
- status |= reflog_expire(ref, flags,
- reflog_expiry_prepare,
- should_prune_fn,
- reflog_expiry_cleanup,
- &cb);
- free(ref);
- }
return status;
}
static int cmd_reflog_exists(int argc, const char **argv, const char *prefix)
{
- int i, start = 0;
-
- for (i = 1; i < argc; i++) {
- const char *arg = argv[i];
- if (!strcmp(arg, "--")) {
- i++;
- break;
- }
- else if (arg[0] == '-')
- usage(_(reflog_exists_usage));
- else
- break;
- }
-
- start = i;
-
- if (argc - start != 1)
- usage(_(reflog_exists_usage));
-
- if (check_refname_format(argv[start], REFNAME_ALLOW_ONELEVEL))
- die(_("invalid ref format: %s"), argv[start]);
- return !reflog_exists(argv[start]);
+ struct option options[] = {
+ OPT_END()
+ };
+ const char *refname;
+
+ argc = parse_options(argc, argv, prefix, options, reflog_exists_usage,
+ 0);
+ if (!argc)
+ usage_with_options(reflog_exists_usage, options);
+
+ refname = argv[0];
+ if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL))
+ die(_("invalid ref format: %s"), refname);
+ return !reflog_exists(refname);
}
/*
* main "reflog"
*/
-static const char reflog_usage[] =
-N_("git reflog [ show | expire | delete | exists ]");
-
int cmd_reflog(int argc, const char **argv, const char *prefix)
{
- if (argc > 1 && !strcmp(argv[1], "-h"))
- usage(_(reflog_usage));
+ struct option options[] = {
+ OPT_END()
+ };
- /* With no command, we default to showing it. */
- if (argc < 2 || *argv[1] == '-')
- return cmd_log_reflog(argc, argv, prefix);
+ argc = parse_options(argc, argv, prefix, options, reflog_usage,
+ PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0 |
+ PARSE_OPT_KEEP_UNKNOWN |
+ PARSE_OPT_NO_INTERNAL_HELP);
- if (!strcmp(argv[1], "show"))
- return cmd_log_reflog(argc - 1, argv + 1, prefix);
+ /*
+ * With "git reflog" we default to showing it. !argc is
+ * impossible with PARSE_OPT_KEEP_ARGV0.
+ */
+ if (argc == 1)
+ goto log_reflog;
- if (!strcmp(argv[1], "expire"))
- return cmd_reflog_expire(argc - 1, argv + 1, prefix);
+ if (!strcmp(argv[1], "-h"))
+ usage_with_options(reflog_usage, options);
+ else if (*argv[1] == '-')
+ goto log_reflog;
- if (!strcmp(argv[1], "delete"))
+ if (!strcmp(argv[1], "show"))
+ return cmd_reflog_show(argc - 1, argv + 1, prefix);
+ else if (!strcmp(argv[1], "expire"))
+ return cmd_reflog_expire(argc - 1, argv + 1, prefix);
+ else if (!strcmp(argv[1], "delete"))
return cmd_reflog_delete(argc - 1, argv + 1, prefix);
-
- if (!strcmp(argv[1], "exists"))
+ else if (!strcmp(argv[1], "exists"))
return cmd_reflog_exists(argc - 1, argv + 1, prefix);
+ /*
+ * Fall-through for e.g. "git reflog -1", "git reflog master",
+ * as well as the plain "git reflog" above goto above.
+ */
+log_reflog:
return cmd_log_reflog(argc, argv, prefix);
}
diff --git a/builtin/remote.c b/builtin/remote.c
index 299c466..5f4cde9 100644
--- a/builtin/remote.c
+++ b/builtin/remote.c
@@ -12,11 +12,12 @@
#include "object-store.h"
#include "strvec.h"
#include "commit-reach.h"
+#include "progress.h"
static const char * const builtin_remote_usage[] = {
- N_("git remote [-v | --verbose]"),
+ "git remote [-v | --verbose]",
N_("git remote add [-t <branch>] [-m <master>] [-f] [--tags | --no-tags] [--mirror=<fetch|push>] <name> <url>"),
- N_("git remote rename <old> <new>"),
+ N_("git remote rename [--[no-]progress] <old> <new>"),
N_("git remote remove <name>"),
N_("git remote set-head <name> (-a | --auto | -d | --delete | <branch>)"),
N_("git remote [-v | --verbose] show [-n] <name>"),
@@ -36,7 +37,7 @@ static const char * const builtin_remote_add_usage[] = {
};
static const char * const builtin_remote_rename_usage[] = {
- N_("git remote rename <old> <new>"),
+ N_("git remote rename [--[no-]progress] <old> <new>"),
NULL
};
@@ -571,6 +572,7 @@ struct rename_info {
const char *old_name;
const char *new_name;
struct string_list *remote_branches;
+ uint32_t symrefs_nr;
};
static int read_remote_branches(const char *refname,
@@ -587,10 +589,12 @@ static int read_remote_branches(const char *refname,
item = string_list_append(rename->remote_branches, refname);
symref = resolve_ref_unsafe(refname, RESOLVE_REF_READING,
NULL, &flag);
- if (symref && (flag & REF_ISSYMREF))
+ if (symref && (flag & REF_ISSYMREF)) {
item->util = xstrdup(symref);
- else
+ rename->symrefs_nr++;
+ } else {
item->util = NULL;
+ }
}
strbuf_release(&buf);
@@ -674,7 +678,9 @@ static void handle_push_default(const char* old_name, const char* new_name)
static int mv(int argc, const char **argv)
{
+ int show_progress = isatty(2);
struct option options[] = {
+ OPT_BOOL(0, "progress", &show_progress, N_("force progress reporting")),
OPT_END()
};
struct remote *oldremote, *newremote;
@@ -682,14 +688,19 @@ static int mv(int argc, const char **argv)
old_remote_context = STRBUF_INIT;
struct string_list remote_branches = STRING_LIST_INIT_DUP;
struct rename_info rename;
- int i, refspec_updated = 0;
+ int i, refs_renamed_nr = 0, refspec_updated = 0;
+ struct progress *progress = NULL;
+
+ argc = parse_options(argc, argv, NULL, options,
+ builtin_remote_rename_usage, 0);
- if (argc != 3)
+ if (argc != 2)
usage_with_options(builtin_remote_rename_usage, options);
- rename.old_name = argv[1];
- rename.new_name = argv[2];
+ rename.old_name = argv[0];
+ rename.new_name = argv[1];
rename.remote_branches = &remote_branches;
+ rename.symrefs_nr = 0;
oldremote = remote_get(rename.old_name);
if (!remote_is_configured(oldremote, 1)) {
@@ -764,15 +775,26 @@ static int mv(int argc, const char **argv)
* the new symrefs.
*/
for_each_ref(read_remote_branches, &rename);
+ if (show_progress) {
+ /*
+ * Count symrefs twice, since "renaming" them is done by
+ * deleting and recreating them in two separate passes.
+ */
+ progress = start_progress(_("Renaming remote references"),
+ rename.remote_branches->nr + rename.symrefs_nr);
+ }
for (i = 0; i < remote_branches.nr; i++) {
struct string_list_item *item = remote_branches.items + i;
- int flag = 0;
+ struct strbuf referent = STRBUF_INIT;
- read_ref_full(item->string, RESOLVE_REF_READING, NULL, &flag);
- if (!(flag & REF_ISSYMREF))
+ if (refs_read_symbolic_ref(get_main_ref_store(the_repository), item->string,
+ &referent))
continue;
if (delete_ref(NULL, item->string, NULL, REF_NO_DEREF))
die(_("deleting '%s' failed"), item->string);
+
+ strbuf_release(&referent);
+ display_progress(progress, ++refs_renamed_nr);
}
for (i = 0; i < remote_branches.nr; i++) {
struct string_list_item *item = remote_branches.items + i;
@@ -788,6 +810,7 @@ static int mv(int argc, const char **argv)
item->string, buf.buf);
if (rename_ref(item->string, buf.buf, buf2.buf))
die(_("renaming '%s' failed"), item->string);
+ display_progress(progress, ++refs_renamed_nr);
}
for (i = 0; i < remote_branches.nr; i++) {
struct string_list_item *item = remote_branches.items + i;
@@ -807,7 +830,9 @@ static int mv(int argc, const char **argv)
item->string, buf.buf);
if (create_symref(buf.buf, buf2.buf, buf3.buf))
die(_("creating '%s' failed"), buf.buf);
+ display_progress(progress, ++refs_renamed_nr);
}
+ stop_progress(&progress);
string_list_clear(&remote_branches, 1);
handle_push_default(rename.old_name, rename.new_name);
diff --git a/builtin/repack.c b/builtin/repack.c
index da1e364..d1a563d 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -22,6 +22,7 @@ static int delta_base_offset = 1;
static int pack_kept_objects = -1;
static int write_bitmaps = -1;
static int use_delta_islands;
+static int run_update_server_info = 1;
static char *packdir, *packtmp_name, *packtmp;
static const char *const git_repack_usage[] = {
@@ -54,6 +55,10 @@ static int repack_config(const char *var, const char *value, void *cb)
use_delta_islands = git_config_bool(var, value);
return 0;
}
+ if (strcmp(var, "repack.updateserverinfo") == 0) {
+ run_update_server_info = git_config_bool(var, value);
+ return 0;
+ }
return git_default_config(var, value, cb);
}
@@ -620,7 +625,6 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
const char *unpack_unreachable = NULL;
int keep_unreachable = 0;
struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
- int no_update_server_info = 0;
struct pack_objects_args po_args = {NULL};
int geometric_factor = 0;
int write_midx = 0;
@@ -637,8 +641,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
N_("pass --no-reuse-delta to git-pack-objects")),
OPT_BOOL('F', NULL, &po_args.no_reuse_object,
N_("pass --no-reuse-object to git-pack-objects")),
- OPT_BOOL('n', NULL, &no_update_server_info,
- N_("do not run git-update-server-info")),
+ OPT_NEGBIT('n', NULL, &run_update_server_info,
+ N_("do not run git-update-server-info"), 1),
OPT__QUIET(&po_args.quiet, N_("be quiet")),
OPT_BOOL('l', "local", &po_args.local,
N_("pass --local to git-pack-objects")),
@@ -939,7 +943,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
prune_shallow(PRUNE_QUICK);
}
- if (!no_update_server_info)
+ if (run_update_server_info)
update_server_info(0);
remove_temporary_files();
diff --git a/builtin/replace.c b/builtin/replace.c
index 6ff1734..5068f4f 100644
--- a/builtin/replace.c
+++ b/builtin/replace.c
@@ -22,7 +22,7 @@ static const char * const git_replace_usage[] = {
N_("git replace [-f] <object> <replacement>"),
N_("git replace [-f] --edit <object>"),
N_("git replace [-f] --graft <commit> [<parent>...]"),
- N_("git replace [-f] --convert-graft-file"),
+ "git replace [-f] --convert-graft-file",
N_("git replace -d <object>..."),
N_("git replace [--format=<format>] [-l [<pattern>]]"),
NULL
@@ -409,7 +409,7 @@ static int check_one_mergetag(struct commit *commit,
int i;
hash_object_file(the_hash_algo, extra->value, extra->len,
- type_name(OBJ_TAG), &tag_oid);
+ OBJ_TAG, &tag_oid);
tag = lookup_tag(the_repository, &tag_oid);
if (!tag)
return error(_("bad mergetag in commit '%s'"), ref);
@@ -474,7 +474,7 @@ static int create_graft(int argc, const char **argv, int force, int gentle)
return -1;
}
- if (write_object_file(buf.buf, buf.len, commit_type, &new_oid)) {
+ if (write_object_file(buf.buf, buf.len, OBJ_COMMIT, &new_oid)) {
strbuf_release(&buf);
return error(_("could not write replacement commit for: '%s'"),
old_ref);
diff --git a/builtin/reset.c b/builtin/reset.c
index b97745e..344fff8 100644
--- a/builtin/reset.c
+++ b/builtin/reset.c
@@ -204,10 +204,16 @@ static int pathspec_needs_expanded_index(const struct pathspec *pathspec)
/*
* Special case: if the pattern is a path inside the cone
* followed by only wildcards, the pattern cannot match
- * partial sparse directories, so we don't expand the index.
+ * partial sparse directories, so we know we don't need to
+ * expand the index.
+ *
+ * Examples:
+ * - in-cone/foo***: doesn't need expanded index
+ * - not-in-cone/bar*: may need expanded index
+ * - **.c: may need expanded index
*/
- if (path_in_cone_mode_sparse_checkout(item.original, &the_index) &&
- strspn(item.original + item.nowildcard_len, "*") == item.len - item.nowildcard_len)
+ if (strspn(item.original + item.nowildcard_len, "*") == item.len - item.nowildcard_len &&
+ path_in_cone_mode_sparse_checkout(item.original, &the_index))
continue;
for (pos = 0; pos < active_nr; pos++) {
@@ -274,7 +280,6 @@ static int read_from_tree(const struct pathspec *pathspec,
return 1;
diffcore_std(&opt);
diff_flush(&opt);
- clear_pathspec(&opt.pathspec);
return 0;
}
@@ -387,6 +392,7 @@ static int git_reset_config(const char *var, const char *value, void *cb)
int cmd_reset(int argc, const char **argv, const char *prefix)
{
int reset_type = NONE, update_ref_status = 0, quiet = 0;
+ int no_refresh = 0;
int patch_mode = 0, pathspec_file_nul = 0, unborn;
const char *rev, *pathspec_from_file = NULL;
struct object_id oid;
@@ -394,6 +400,8 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
int intent_to_add = 0;
const struct option options[] = {
OPT__QUIET(&quiet, N_("be quiet, only report errors")),
+ OPT_BOOL(0, "no-refresh", &no_refresh,
+ N_("skip refreshing the index after reset")),
OPT_SET_INT(0, "mixed", &reset_type,
N_("reset HEAD and index"), MIXED),
OPT_SET_INT(0, "soft", &reset_type, N_("reset only HEAD"), SOFT),
@@ -415,7 +423,6 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
};
git_config(git_reset_config, NULL);
- git_config_get_bool("reset.quiet", &quiet);
argc = parse_options(argc, argv, prefix, options, git_reset_usage,
PARSE_OPT_KEEP_DASHDASH);
@@ -512,17 +519,16 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
if (read_from_tree(&pathspec, &oid, intent_to_add))
return 1;
the_index.updated_skipworktree = 1;
- if (!quiet && get_git_work_tree()) {
+ if (!no_refresh && get_git_work_tree()) {
uint64_t t_begin, t_delta_in_ms;
t_begin = getnanotime();
refresh_index(&the_index, flags, NULL, NULL,
_("Unstaged changes after reset:"));
t_delta_in_ms = (getnanotime() - t_begin) / 1000000;
- if (advice_enabled(ADVICE_RESET_QUIET_WARNING) && t_delta_in_ms > REFRESH_INDEX_DELAY_WARNING_IN_MS) {
- printf(_("\nIt took %.2f seconds to enumerate unstaged changes after reset. You can\n"
- "use '--quiet' to avoid this. Set the config setting reset.quiet to true\n"
- "to make this the default.\n"), t_delta_in_ms / 1000.0);
+ if (!quiet && advice_enabled(ADVICE_RESET_NO_REFRESH_WARNING) && t_delta_in_ms > REFRESH_INDEX_DELAY_WARNING_IN_MS) {
+ advise(_("It took %.2f seconds to refresh the index after reset. You can use\n"
+ "'--no-refresh' to avoid this."), t_delta_in_ms / 1000.0);
}
}
} else {
diff --git a/builtin/rev-list.c b/builtin/rev-list.c
index 777558e..572da14 100644
--- a/builtin/rev-list.c
+++ b/builtin/rev-list.c
@@ -20,7 +20,7 @@
#include "packfile.h"
static const char rev_list_usage[] =
-"git rev-list [OPTION] <commit-id>... [ -- paths... ]\n"
+"git rev-list [<options>] <commit-id>... [-- <path>...]\n"
" limiting output:\n"
" --max-count=<n>\n"
" --max-age=<epoch>\n"
@@ -62,7 +62,6 @@ static const char rev_list_usage[] =
static struct progress *progress;
static unsigned progress_counter;
-static struct list_objects_filter_options filter_options;
static struct oidset omitted_objects;
static int arg_print_omitted; /* print objects omitted by filter */
@@ -400,7 +399,6 @@ static inline int parse_missing_action_value(const char *value)
}
static int try_bitmap_count(struct rev_info *revs,
- struct list_objects_filter_options *filter,
int filter_provided_objects)
{
uint32_t commit_count = 0,
@@ -436,7 +434,7 @@ static int try_bitmap_count(struct rev_info *revs,
*/
max_count = revs->max_count;
- bitmap_git = prepare_bitmap_walk(revs, filter, filter_provided_objects);
+ bitmap_git = prepare_bitmap_walk(revs, filter_provided_objects);
if (!bitmap_git)
return -1;
@@ -453,7 +451,6 @@ static int try_bitmap_count(struct rev_info *revs,
}
static int try_bitmap_traversal(struct rev_info *revs,
- struct list_objects_filter_options *filter,
int filter_provided_objects)
{
struct bitmap_index *bitmap_git;
@@ -465,7 +462,7 @@ static int try_bitmap_traversal(struct rev_info *revs,
if (revs->max_count >= 0)
return -1;
- bitmap_git = prepare_bitmap_walk(revs, filter, filter_provided_objects);
+ bitmap_git = prepare_bitmap_walk(revs, filter_provided_objects);
if (!bitmap_git)
return -1;
@@ -475,7 +472,6 @@ static int try_bitmap_traversal(struct rev_info *revs,
}
static int try_bitmap_disk_usage(struct rev_info *revs,
- struct list_objects_filter_options *filter,
int filter_provided_objects)
{
struct bitmap_index *bitmap_git;
@@ -483,7 +479,7 @@ static int try_bitmap_disk_usage(struct rev_info *revs,
if (!show_disk_usage)
return -1;
- bitmap_git = prepare_bitmap_walk(revs, filter, filter_provided_objects);
+ bitmap_git = prepare_bitmap_walk(revs, filter_provided_objects);
if (!bitmap_git)
return -1;
@@ -595,17 +591,6 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
show_progress = arg;
continue;
}
-
- if (skip_prefix(arg, ("--" CL_ARG__FILTER "="), &arg)) {
- parse_list_objects_filter(&filter_options, arg);
- if (filter_options.choice && !revs.blob_objects)
- die(_("object filtering requires --objects"));
- continue;
- }
- if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) {
- list_objects_filter_set_no_filter(&filter_options);
- continue;
- }
if (!strcmp(arg, "--filter-provided-objects")) {
filter_provided_objects = 1;
continue;
@@ -688,11 +673,11 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
progress = start_delayed_progress(show_progress, 0);
if (use_bitmap_index) {
- if (!try_bitmap_count(&revs, &filter_options, filter_provided_objects))
+ if (!try_bitmap_count(&revs, filter_provided_objects))
return 0;
- if (!try_bitmap_disk_usage(&revs, &filter_options, filter_provided_objects))
+ if (!try_bitmap_disk_usage(&revs, filter_provided_objects))
return 0;
- if (!try_bitmap_traversal(&revs, &filter_options, filter_provided_objects))
+ if (!try_bitmap_traversal(&revs, filter_provided_objects))
return 0;
}
@@ -733,7 +718,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
oidset_init(&missing_objects, DEFAULT_OIDSET_SIZE);
traverse_commit_list_filtered(
- &filter_options, &revs, show_commit, show_object, &info,
+ &revs, show_commit, show_object, &info,
(arg_print_omitted ? &omitted_objects : NULL));
if (arg_print_omitted) {
diff --git a/builtin/send-pack.c b/builtin/send-pack.c
index 69c432e..64962be 100644
--- a/builtin/send-pack.c
+++ b/builtin/send-pack.c
@@ -145,7 +145,7 @@ static int send_pack_config(const char *k, const char *v, void *cb)
if (value && !strcasecmp(value, "if-asked"))
args.push_cert = SEND_PACK_PUSH_CERT_IF_ASKED;
else
- return error("Invalid value for '%s'", k);
+ return error(_("invalid value for '%s'"), k);
}
}
}
diff --git a/builtin/shortlog.c b/builtin/shortlog.c
index e7f7af5..26c5c0c 100644
--- a/builtin/shortlog.c
+++ b/builtin/shortlog.c
@@ -388,6 +388,7 @@ int cmd_shortlog(int argc, const char **argv, const char *prefix)
parse_revision_opt(&rev, &ctx, options, shortlog_usage);
}
parse_done:
+ revision_opts_finish(&rev);
argc = parse_options_end(&ctx);
if (nongit && argc > 1) {
@@ -434,7 +435,7 @@ static void add_wrapped_shortlog_msg(struct strbuf *sb, const char *s,
void shortlog_output(struct shortlog *log)
{
- int i, j;
+ size_t i, j;
struct strbuf sb = STRBUF_INIT;
if (log->sort_by_number)
@@ -447,10 +448,10 @@ void shortlog_output(struct shortlog *log)
(int)UTIL_TO_INT(item), item->string);
} else {
struct string_list *onelines = item->util;
- fprintf(log->file, "%s (%d):\n",
- item->string, onelines->nr);
- for (j = onelines->nr - 1; j >= 0; j--) {
- const char *msg = onelines->items[j].string;
+ fprintf(log->file, "%s (%"PRIuMAX"):\n",
+ item->string, (uintmax_t)onelines->nr);
+ for (j = onelines->nr; j >= 1; j--) {
+ const char *msg = onelines->items[j - 1].string;
if (log->wrap_lines) {
strbuf_reset(&sb);
diff --git a/builtin/show-branch.c b/builtin/show-branch.c
index e12c5e8..330b055 100644
--- a/builtin/show-branch.c
+++ b/builtin/show-branch.c
@@ -8,6 +8,7 @@
#include "parse-options.h"
#include "dir.h"
#include "commit-slab.h"
+#include "date.h"
static const char* show_branch_usage[] = {
N_("git show-branch [-a | --all] [-r | --remotes] [--topo-order | --date-order]\n"
diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c
index 679c107..0217d44 100644
--- a/builtin/sparse-checkout.c
+++ b/builtin/sparse-checkout.c
@@ -1,4 +1,5 @@
#include "builtin.h"
+#include "cache.h"
#include "config.h"
#include "dir.h"
#include "parse-options.h"
@@ -7,7 +8,6 @@
#include "run-command.h"
#include "strbuf.h"
#include "string-list.h"
-#include "cache.h"
#include "cache-tree.h"
#include "lockfile.h"
#include "resolve-undo.h"
@@ -15,6 +15,7 @@
#include "wt-status.h"
#include "quote.h"
#include "sparse-index.h"
+#include "worktree.h"
static const char *empty_base = "";
@@ -43,7 +44,7 @@ static void write_patterns_to_file(FILE *fp, struct pattern_list *pl)
}
static char const * const builtin_sparse_checkout_list_usage[] = {
- N_("git sparse-checkout list"),
+ "git sparse-checkout list",
NULL
};
@@ -185,6 +186,8 @@ static void clean_tracked_sparse_directories(struct repository *r)
item->string);
}
+ strvec_clear(&s);
+ clear_pathspec(&p);
dir_clear(&dir);
}
@@ -325,11 +328,11 @@ static int write_patterns_and_update(struct pattern_list *pl)
fd = hold_lock_file_for_update(&lk, sparse_filename,
LOCK_DIE_ON_ERROR);
+ free(sparse_filename);
result = update_working_directory(pl);
if (result) {
rollback_lock_file(&lk);
- free(sparse_filename);
clear_pattern_list(pl);
update_working_directory(NULL);
return result;
@@ -345,7 +348,6 @@ static int write_patterns_and_update(struct pattern_list *pl)
fflush(fp);
commit_lock_file(&lk);
- free(sparse_filename);
clear_pattern_list(pl);
return 0;
@@ -359,26 +361,23 @@ enum sparse_checkout_mode {
static int set_config(enum sparse_checkout_mode mode)
{
- const char *config_path;
-
- if (upgrade_repository_format(1) < 0)
- die(_("unable to upgrade repository format to enable worktreeConfig"));
- if (git_config_set_gently("extensions.worktreeConfig", "true")) {
- error(_("failed to set extensions.worktreeConfig setting"));
+ /* Update to use worktree config, if not already. */
+ if (init_worktree_config(the_repository)) {
+ error(_("failed to initialize worktree config"));
return 1;
}
- config_path = git_path("config.worktree");
- git_config_set_in_file_gently(config_path,
- "core.sparseCheckout",
- mode ? "true" : NULL);
-
- git_config_set_in_file_gently(config_path,
- "core.sparseCheckoutCone",
- mode == MODE_CONE_PATTERNS ? "true" : NULL);
+ if (repo_config_set_worktree_gently(the_repository,
+ "core.sparseCheckout",
+ mode ? "true" : "false") ||
+ repo_config_set_worktree_gently(the_repository,
+ "core.sparseCheckoutCone",
+ mode == MODE_CONE_PATTERNS ?
+ "true" : "false"))
+ return 1;
if (mode == MODE_NO_PATTERNS)
- set_sparse_index_config(the_repository, 0);
+ return set_sparse_index_config(the_repository, 0);
return 0;
}
@@ -401,6 +400,7 @@ static int update_modes(int *cone_mode, int *sparse_index)
core_sparse_checkout_cone = 1;
} else {
mode = MODE_ALL_PATTERNS;
+ core_sparse_checkout_cone = 0;
}
if (record_mode && set_config(mode))
return 1;
@@ -419,7 +419,7 @@ static int update_modes(int *cone_mode, int *sparse_index)
}
static char const * const builtin_sparse_checkout_init_usage[] = {
- N_("git sparse-checkout init [--cone] [--[no-]sparse-index]"),
+ "git sparse-checkout init [--cone] [--[no-]sparse-index]",
NULL
};
@@ -471,6 +471,9 @@ static int sparse_checkout_init(int argc, const char **argv)
FILE *fp;
/* assume we are in a fresh repo, but update the sparse-checkout file */
+ if (safe_create_leading_directories(sparse_filename))
+ die(_("unable to create leading directories of %s"),
+ sparse_filename);
fp = xfopen(sparse_filename, "w");
if (!fp)
die(_("failed to open '%s'"), sparse_filename);
@@ -678,18 +681,76 @@ static int modify_pattern_list(int argc, const char **argv, int use_stdin,
return result;
}
+static void sanitize_paths(int argc, const char **argv,
+ const char *prefix, int skip_checks)
+{
+ int i;
+
+ if (!argc)
+ return;
+
+ if (prefix && *prefix && core_sparse_checkout_cone) {
+ /*
+ * The args are not pathspecs, so unfortunately we
+ * cannot imitate how cmd_add() uses parse_pathspec().
+ */
+ int prefix_len = strlen(prefix);
+
+ for (i = 0; i < argc; i++)
+ argv[i] = prefix_path(prefix, prefix_len, argv[i]);
+ }
+
+ if (skip_checks)
+ return;
+
+ if (prefix && *prefix && !core_sparse_checkout_cone)
+ die(_("please run from the toplevel directory in non-cone mode"));
+
+ if (core_sparse_checkout_cone) {
+ for (i = 0; i < argc; i++) {
+ if (argv[i][0] == '/')
+ die(_("specify directories rather than patterns (no leading slash)"));
+ if (argv[i][0] == '!')
+ die(_("specify directories rather than patterns. If your directory starts with a '!', pass --skip-checks"));
+ if (strpbrk(argv[i], "*?[]"))
+ die(_("specify directories rather than patterns. If your directory really has any of '*?[]\\' in it, pass --skip-checks"));
+ }
+ }
+
+ for (i = 0; i < argc; i++) {
+ struct cache_entry *ce;
+ struct index_state *index = the_repository->index;
+ int pos = index_name_pos(index, argv[i], strlen(argv[i]));
+
+ if (pos < 0)
+ continue;
+ ce = index->cache[pos];
+ if (S_ISSPARSEDIR(ce->ce_mode))
+ continue;
+
+ if (core_sparse_checkout_cone)
+ die(_("'%s' is not a directory; to treat it as a directory anyway, rerun with --skip-checks"), argv[i]);
+ else
+ warning(_("pass a leading slash before paths such as '%s' if you want a single file (see NON-CONE PROBLEMS in the git-sparse-checkout manual)."), argv[i]);
+ }
+}
+
static char const * const builtin_sparse_checkout_add_usage[] = {
- N_("git sparse-checkout add (--stdin | <patterns>)"),
+ N_("git sparse-checkout add [--skip-checks] (--stdin | <patterns>)"),
NULL
};
static struct sparse_checkout_add_opts {
+ int skip_checks;
int use_stdin;
} add_opts;
static int sparse_checkout_add(int argc, const char **argv, const char *prefix)
{
static struct option builtin_sparse_checkout_add_options[] = {
+ OPT_BOOL_F(0, "skip-checks", &add_opts.skip_checks,
+ N_("skip some sanity checks on the given paths that might give false positives"),
+ PARSE_OPT_NONEG),
OPT_BOOL(0, "stdin", &add_opts.use_stdin,
N_("read patterns from standard in")),
OPT_END(),
@@ -705,17 +766,20 @@ static int sparse_checkout_add(int argc, const char **argv, const char *prefix)
builtin_sparse_checkout_add_usage,
PARSE_OPT_KEEP_UNKNOWN);
+ sanitize_paths(argc, argv, prefix, add_opts.skip_checks);
+
return modify_pattern_list(argc, argv, add_opts.use_stdin, ADD);
}
static char const * const builtin_sparse_checkout_set_usage[] = {
- N_("git sparse-checkout set [--[no-]cone] [--[no-]sparse-index] (--stdin | <patterns>)"),
+ N_("git sparse-checkout set [--[no-]cone] [--[no-]sparse-index] [--skip-checks] (--stdin | <patterns>)"),
NULL
};
static struct sparse_checkout_set_opts {
int cone_mode;
int sparse_index;
+ int skip_checks;
int use_stdin;
} set_opts;
@@ -729,6 +793,9 @@ static int sparse_checkout_set(int argc, const char **argv, const char *prefix)
N_("initialize the sparse-checkout in cone mode")),
OPT_BOOL(0, "sparse-index", &set_opts.sparse_index,
N_("toggle the use of a sparse index")),
+ OPT_BOOL_F(0, "skip-checks", &set_opts.skip_checks,
+ N_("skip some sanity checks on the given paths that might give false positives"),
+ PARSE_OPT_NONEG),
OPT_BOOL_F(0, "stdin", &set_opts.use_stdin,
N_("read patterns from standard in"),
PARSE_OPT_NONEG),
@@ -756,13 +823,15 @@ static int sparse_checkout_set(int argc, const char **argv, const char *prefix)
if (!core_sparse_checkout_cone && argc == 0) {
argv = default_patterns;
argc = default_patterns_nr;
+ } else {
+ sanitize_paths(argc, argv, prefix, set_opts.skip_checks);
}
return modify_pattern_list(argc, argv, set_opts.use_stdin, REPLACE);
}
static char const * const builtin_sparse_checkout_reapply_usage[] = {
- N_("git sparse-checkout reapply [--[no-]cone] [--[no-]sparse-index]"),
+ "git sparse-checkout reapply [--[no-]cone] [--[no-]sparse-index]",
NULL
};
@@ -784,15 +853,15 @@ static int sparse_checkout_reapply(int argc, const char **argv)
if (!core_apply_sparse_checkout)
die(_("must be in a sparse-checkout to reapply sparsity patterns"));
+ reapply_opts.cone_mode = -1;
+ reapply_opts.sparse_index = -1;
+
argc = parse_options(argc, argv, NULL,
builtin_sparse_checkout_reapply_options,
builtin_sparse_checkout_reapply_usage, 0);
repo_read_index(the_repository);
- reapply_opts.cone_mode = -1;
- reapply_opts.sparse_index = -1;
-
if (update_modes(&reapply_opts.cone_mode, &reapply_opts.sparse_index))
return 1;
@@ -800,7 +869,7 @@ static int sparse_checkout_reapply(int argc, const char **argv)
}
static char const * const builtin_sparse_checkout_disable_usage[] = {
- N_("git sparse-checkout disable"),
+ "git sparse-checkout disable",
NULL
};
diff --git a/builtin/stash.c b/builtin/stash.c
index 86cd0b4..0c7b6a9 100644
--- a/builtin/stash.c
+++ b/builtin/stash.c
@@ -16,7 +16,7 @@
#include "log-tree.h"
#include "diffcore.h"
#include "exec-cmd.h"
-#include "entry.h"
+#include "reflog.h"
#define INCLUDE_ALL_FILES 2
@@ -310,7 +310,7 @@ static int reset_head(void)
* API for resetting.
*/
cp.git_cmd = 1;
- strvec_push(&cp.args, "reset");
+ strvec_pushl(&cp.args, "reset", "--quiet", "--refresh", NULL);
return run_command(&cp);
}
@@ -634,20 +634,9 @@ static int reflog_is_empty(const char *refname)
static int do_drop_stash(struct stash_info *info, int quiet)
{
- int ret;
- struct child_process cp_reflog = CHILD_PROCESS_INIT;
-
- /*
- * reflog does not provide a simple function for deleting refs. One will
- * need to be added to avoid implementing too much reflog code here
- */
-
- cp_reflog.git_cmd = 1;
- strvec_pushl(&cp_reflog.args, "reflog", "delete", "--updateref",
- "--rewrite", NULL);
- strvec_push(&cp_reflog.args, info->revision.buf);
- ret = run_command(&cp_reflog);
- if (!ret) {
+ if (!reflog_delete(info->revision.buf,
+ EXPIRE_REFLOGS_REWRITE | EXPIRE_REFLOGS_UPDATE_REF,
+ 0)) {
if (!quiet)
printf_ln(_("Dropped %s (%s)"), info->revision.buf,
oid_to_hex(&info->w_commit));
@@ -788,7 +777,6 @@ static int list_stash(int argc, const char **argv, const char *prefix)
static int show_stat = 1;
static int show_patch;
static int show_include_untracked;
-static int use_legacy_stash;
static int git_stash_config(const char *var, const char *value, void *cb)
{
@@ -804,10 +792,6 @@ static int git_stash_config(const char *var, const char *value, void *cb)
show_include_untracked = git_config_bool(var, value);
return 0;
}
- if (!strcmp(var, "stash.usebuiltin")) {
- use_legacy_stash = !git_config_bool(var, value);
- return 0;
- }
return git_diff_basic_config(var, value, cb);
}
@@ -1332,7 +1316,7 @@ static int do_create_stash(const struct pathspec *ps, struct strbuf *stash_msg_b
branch_ref = resolve_ref_unsafe("HEAD", 0, NULL, &flags);
if (flags & REF_ISSYMREF)
- branch_name = strrchr(branch_ref, '/') + 1;
+ skip_prefix(branch_ref, "refs/heads/", &branch_name);
head_short_sha1 = find_unique_abbrev(&head_commit->object.oid,
DEFAULT_ABBREV);
strbuf_addf(&msg, "%s: %s ", branch_name, head_short_sha1);
@@ -1638,7 +1622,8 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q
struct child_process cp = CHILD_PROCESS_INIT;
cp.git_cmd = 1;
- strvec_pushl(&cp.args, "reset", "-q", "--", NULL);
+ strvec_pushl(&cp.args, "reset", "-q", "--refresh", "--",
+ NULL);
add_pathspecs(&cp.args, ps);
if (run_command(&cp)) {
ret = -1;
@@ -1782,11 +1767,6 @@ int cmd_stash(int argc, const char **argv, const char *prefix)
git_config(git_stash_config, NULL);
- if (use_legacy_stash ||
- !git_env_bool("GIT_TEST_STASH_USE_BUILTIN", -1))
- warning(_("the stash.useBuiltin support has been removed!\n"
- "See its entry in 'git help config' for details."));
-
argc = parse_options(argc, argv, prefix, options, git_stash_usage,
PARSE_OPT_KEEP_UNKNOWN | PARSE_OPT_KEEP_DASHDASH);
@@ -1819,8 +1799,8 @@ int cmd_stash(int argc, const char **argv, const char *prefix)
else if (!strcmp(argv[0], "save"))
return !!save_stash(argc, argv, prefix);
else if (*argv[0] != '-')
- usage_msg_opt(xstrfmt(_("unknown subcommand: %s"), argv[0]),
- git_stash_usage, options);
+ usage_msg_optf(_("unknown subcommand: %s"),
+ git_stash_usage, options, argv[0]);
/* Assume 'stash push' */
strvec_push(&args, "push");
diff --git a/builtin/stripspace.c b/builtin/stripspace.c
index be33eb8..1e34cf2 100644
--- a/builtin/stripspace.c
+++ b/builtin/stripspace.c
@@ -15,8 +15,8 @@ static void comment_lines(struct strbuf *buf)
}
static const char * const stripspace_usage[] = {
- N_("git stripspace [-s | --strip-comments]"),
- N_("git stripspace [-c | --comment-lines]"),
+ "git stripspace [-s | --strip-comments]",
+ "git stripspace [-c | --comment-lines]",
NULL
};
diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c
index c5d3fc3..0b8b226 100644
--- a/builtin/submodule--helper.c
+++ b/builtin/submodule--helper.c
@@ -20,6 +20,8 @@
#include "diff.h"
#include "object-store.h"
#include "advice.h"
+#include "branch.h"
+#include "list-objects-filter-options.h"
#define OPT_QUIET (1 << 0)
#define OPT_CACHED (1 << 1)
@@ -29,11 +31,13 @@
typedef void (*each_submodule_fn)(const struct cache_entry *list_item,
void *cb_data);
-static char *get_default_remote(void)
+static char *repo_get_default_remote(struct repository *repo)
{
char *dest = NULL, *ret;
struct strbuf sb = STRBUF_INIT;
- const char *refname = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
+ struct ref_store *store = get_main_ref_store(repo);
+ const char *refname = refs_resolve_ref_unsafe(store, "HEAD", 0, NULL,
+ NULL);
if (!refname)
die(_("No such ref: %s"), "HEAD");
@@ -46,7 +50,7 @@ static char *get_default_remote(void)
die(_("Expecting a full ref name, got %s"), refname);
strbuf_addf(&sb, "branch.%s.remote", refname);
- if (git_config_get_string(sb.buf, &dest))
+ if (repo_config_get_string(repo, sb.buf, &dest))
ret = xstrdup("origin");
else
ret = dest;
@@ -55,19 +59,17 @@ static char *get_default_remote(void)
return ret;
}
-static int print_default_remote(int argc, const char **argv, const char *prefix)
+static char *get_default_remote_submodule(const char *module_path)
{
- char *remote;
-
- if (argc != 1)
- die(_("submodule--helper print-default-remote takes no arguments"));
+ struct repository subrepo;
- remote = get_default_remote();
- if (remote)
- printf("%s\n", remote);
+ repo_submodule_init(&subrepo, the_repository, module_path, null_oid());
+ return repo_get_default_remote(&subrepo);
+}
- free(remote);
- return 0;
+static char *get_default_remote(void)
+{
+ return repo_get_default_remote(the_repository);
}
static int starts_with_dot_slash(const char *str)
@@ -245,11 +247,10 @@ static int resolve_relative_url_test(int argc, const char **argv, const char *pr
return 0;
}
-/* the result should be freed by the caller. */
-static char *get_submodule_displaypath(const char *path, const char *prefix)
+static char *do_get_submodule_displaypath(const char *path,
+ const char *prefix,
+ const char *super_prefix)
{
- const char *super_prefix = get_super_prefix();
-
if (prefix && super_prefix) {
BUG("cannot have prefix '%s' and superprefix '%s'",
prefix, super_prefix);
@@ -265,6 +266,13 @@ static char *get_submodule_displaypath(const char *path, const char *prefix)
}
}
+/* the result should be freed by the caller. */
+static char *get_submodule_displaypath(const char *path, const char *prefix)
+{
+ const char *super_prefix = get_super_prefix();
+ return do_get_submodule_displaypath(path, prefix, super_prefix);
+}
+
static char *compute_rev_name(const char *sub_path, const char* object_id)
{
struct strbuf sb = STRBUF_INIT;
@@ -586,18 +594,22 @@ static int module_foreach(int argc, const char **argv, const char *prefix)
struct init_cb {
const char *prefix;
+ const char *superprefix;
unsigned int flags;
};
#define INIT_CB_INIT { 0 }
static void init_submodule(const char *path, const char *prefix,
- unsigned int flags)
+ const char *superprefix, unsigned int flags)
{
const struct submodule *sub;
struct strbuf sb = STRBUF_INIT;
char *upd = NULL, *url = NULL, *displaypath;
- displaypath = get_submodule_displaypath(path, prefix);
+ /* try superprefix from the environment, if it is not passed explicitly */
+ if (!superprefix)
+ superprefix = get_super_prefix();
+ displaypath = do_get_submodule_displaypath(path, prefix, superprefix);
sub = submodule_from_path(the_repository, null_oid(), path);
@@ -671,7 +683,7 @@ static void init_submodule(const char *path, const char *prefix,
static void init_submodule_cb(const struct cache_entry *list_item, void *cb_data)
{
struct init_cb *info = cb_data;
- init_submodule(list_item->name, info->prefix, info->flags);
+ init_submodule(list_item->name, info->prefix, info->superprefix, info->flags);
}
static int module_init(int argc, const char **argv, const char *prefix)
@@ -1341,9 +1353,8 @@ static void sync_submodule(const char *path, const char *prefix,
{
const struct submodule *sub;
char *remote_key = NULL;
- char *sub_origin_url, *super_config_url, *displaypath;
+ char *sub_origin_url, *super_config_url, *displaypath, *default_remote;
struct strbuf sb = STRBUF_INIT;
- struct child_process cp = CHILD_PROCESS_INIT;
char *sub_config_path = NULL;
if (!is_submodule_active(the_repository, path))
@@ -1382,21 +1393,15 @@ static void sync_submodule(const char *path, const char *prefix,
if (!is_submodule_populated_gently(path, NULL))
goto cleanup;
- prepare_submodule_repo_env(&cp.env_array);
- cp.git_cmd = 1;
- cp.dir = path;
- strvec_pushl(&cp.args, "submodule--helper",
- "print-default-remote", NULL);
-
strbuf_reset(&sb);
- if (capture_command(&cp, &sb, 0))
+ default_remote = get_default_remote_submodule(path);
+ if (!default_remote)
die(_("failed to get the default remote for submodule '%s'"),
path);
- strbuf_strip_suffix(&sb, "\n");
- remote_key = xstrfmt("remote.%s.url", sb.buf);
+ remote_key = xstrfmt("remote.%s.url", default_remote);
+ free(default_remote);
- strbuf_reset(&sb);
submodule_to_gitdir(&sb, path);
strbuf_addstr(&sb, "/config");
@@ -1630,6 +1635,7 @@ struct module_clone_data {
const char *name;
const char *url;
const char *depth;
+ struct list_objects_filter_options *filter_options;
struct string_list reference;
unsigned int quiet: 1;
unsigned int progress: 1;
@@ -1637,7 +1643,10 @@ struct module_clone_data {
unsigned int require_init: 1;
int single_branch;
};
-#define MODULE_CLONE_DATA_INIT { .reference = STRING_LIST_INIT_NODUP, .single_branch = -1 }
+#define MODULE_CLONE_DATA_INIT { \
+ .reference = STRING_LIST_INIT_NODUP, \
+ .single_branch = -1, \
+}
struct submodule_alternate_setup {
const char *submodule_name;
@@ -1796,6 +1805,10 @@ static int clone_submodule(struct module_clone_data *clone_data)
strvec_push(&cp.args, "--dissociate");
if (sm_gitdir && *sm_gitdir)
strvec_pushl(&cp.args, "--separate-git-dir", sm_gitdir, NULL);
+ if (clone_data->filter_options && clone_data->filter_options->choice)
+ strvec_pushf(&cp.args, "--filter=%s",
+ expand_list_objects_filter_spec(
+ clone_data->filter_options));
if (clone_data->single_branch >= 0)
strvec_push(&cp.args, clone_data->single_branch ?
"--single-branch" :
@@ -1852,6 +1865,7 @@ static int module_clone(int argc, const char **argv, const char *prefix)
{
int dissociate = 0, quiet = 0, progress = 0, require_init = 0;
struct module_clone_data clone_data = MODULE_CLONE_DATA_INIT;
+ struct list_objects_filter_options filter_options;
struct option module_clone_options[] = {
OPT_STRING(0, "prefix", &clone_data.prefix,
@@ -1874,24 +1888,26 @@ static int module_clone(int argc, const char **argv, const char *prefix)
OPT_STRING(0, "depth", &clone_data.depth,
N_("string"),
N_("depth for shallow clones")),
- OPT__QUIET(&quiet, "Suppress output for cloning a submodule"),
+ OPT__QUIET(&quiet, "suppress output for cloning a submodule"),
OPT_BOOL(0, "progress", &progress,
N_("force cloning progress")),
OPT_BOOL(0, "require-init", &require_init,
N_("disallow cloning into non-empty directory")),
OPT_BOOL(0, "single-branch", &clone_data.single_branch,
N_("clone only one branch, HEAD or --branch")),
+ OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
OPT_END()
};
const char *const git_submodule_helper_usage[] = {
N_("git submodule--helper clone [--prefix=<path>] [--quiet] "
"[--reference <repository>] [--name <name>] [--depth <depth>] "
- "[--single-branch] "
+ "[--single-branch] [--filter <filter-spec>] "
"--url <url> --path <path>"),
NULL
};
+ memset(&filter_options, 0, sizeof(filter_options));
argc = parse_options(argc, argv, prefix, module_clone_options,
git_submodule_helper_usage, 0);
@@ -1899,12 +1915,14 @@ static int module_clone(int argc, const char **argv, const char *prefix)
clone_data.quiet = !!quiet;
clone_data.progress = !!progress;
clone_data.require_init = !!require_init;
+ clone_data.filter_options = &filter_options;
if (argc || !clone_data.url || !clone_data.path || !*(clone_data.path))
usage_with_options(git_submodule_helper_usage,
module_clone_options);
clone_submodule(&clone_data);
+ list_objects_filter_release(&filter_options);
return 0;
}
@@ -1945,29 +1963,6 @@ static void determine_submodule_update_strategy(struct repository *r,
free(key);
}
-static int module_update_module_mode(int argc, const char **argv, const char *prefix)
-{
- const char *path, *update = NULL;
- int just_cloned;
- struct submodule_update_strategy update_strategy = { .type = SM_UPDATE_CHECKOUT };
-
- if (argc < 3 || argc > 4)
- die("submodule--helper update-module-clone expects <just-cloned> <path> [<update>]");
-
- just_cloned = git_config_int("just_cloned", argv[1]);
- path = argv[2];
-
- if (argc == 4)
- update = argv[3];
-
- determine_submodule_update_strategy(the_repository,
- just_cloned, path, update,
- &update_strategy);
- fputs(submodule_strategy_to_string(&update_strategy), stdout);
-
- return 0;
-}
-
struct update_clone_data {
const struct submodule *sub;
struct object_id oid;
@@ -1975,27 +1970,13 @@ struct update_clone_data {
};
struct submodule_update_clone {
- /* index into 'list', the list of submodules to look into for cloning */
+ /* index into 'update_data.list', the list of submodules to look into for cloning */
int current;
- struct module_list list;
- unsigned warn_if_uninitialized : 1;
-
- /* update parameter passed via commandline */
- struct submodule_update_strategy update;
/* configuration parameters which are passed on to the children */
- int progress;
- int quiet;
- int recommend_shallow;
- struct string_list references;
- int dissociate;
- unsigned require_init;
- const char *depth;
- const char *recursive_prefix;
- const char *prefix;
- int single_branch;
+ struct update_data *update_data;
- /* to be consumed by git-submodule.sh */
+ /* to be consumed by update_submodule() */
struct update_clone_data *update_clone;
int update_clone_nr; int update_clone_alloc;
@@ -2005,32 +1986,48 @@ struct submodule_update_clone {
/* failed clones to be retried again */
const struct cache_entry **failed_clones;
int failed_clones_nr, failed_clones_alloc;
-
- int max_jobs;
};
-#define SUBMODULE_UPDATE_CLONE_INIT { \
- .list = MODULE_LIST_INIT, \
- .update = SUBMODULE_UPDATE_STRATEGY_INIT, \
- .recommend_shallow = -1, \
- .references = STRING_LIST_INIT_DUP, \
- .single_branch = -1, \
- .max_jobs = 1, \
-}
+#define SUBMODULE_UPDATE_CLONE_INIT { 0 }
struct update_data {
+ const char *prefix;
const char *recursive_prefix;
- const char *sm_path;
const char *displaypath;
- struct object_id oid;
+ const char *update_default;
struct object_id suboid;
+ struct string_list references;
struct submodule_update_strategy update_strategy;
+ struct list_objects_filter_options *filter_options;
+ struct module_list list;
int depth;
- unsigned int force: 1;
- unsigned int quiet: 1;
- unsigned int nofetch: 1;
- unsigned int just_cloned: 1;
+ int max_jobs;
+ int single_branch;
+ int recommend_shallow;
+ unsigned int require_init;
+ unsigned int force;
+ unsigned int quiet;
+ unsigned int nofetch;
+ unsigned int remote;
+ unsigned int progress;
+ unsigned int dissociate;
+ unsigned int init;
+ unsigned int warn_if_uninitialized;
+ unsigned int recursive;
+
+ /* copied over from update_clone_data */
+ struct object_id oid;
+ unsigned int just_cloned;
+ const char *sm_path;
};
-#define UPDATE_DATA_INIT { .update_strategy = SUBMODULE_UPDATE_STRATEGY_INIT }
+#define UPDATE_DATA_INIT { \
+ .update_strategy = SUBMODULE_UPDATE_STRATEGY_INIT, \
+ .list = MODULE_LIST_INIT, \
+ .recommend_shallow = -1, \
+ .references = STRING_LIST_INIT_DUP, \
+ .single_branch = -1, \
+ .max_jobs = 1, \
+ .warn_if_uninitialized = 1, \
+}
static void next_submodule_warn_missing(struct submodule_update_clone *suc,
struct strbuf *out, const char *displaypath)
@@ -2039,7 +2036,7 @@ static void next_submodule_warn_missing(struct submodule_update_clone *suc,
* Only mention uninitialized submodules when their
* paths have been specified.
*/
- if (suc->warn_if_uninitialized) {
+ if (suc->update_data->warn_if_uninitialized) {
strbuf_addf(out,
_("Submodule path '%s' not initialized"),
displaypath);
@@ -2071,8 +2068,8 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
int need_free_url = 0;
if (ce_stage(ce)) {
- if (suc->recursive_prefix)
- strbuf_addf(&sb, "%s/%s", suc->recursive_prefix, ce->name);
+ if (suc->update_data->recursive_prefix)
+ strbuf_addf(&sb, "%s/%s", suc->update_data->recursive_prefix, ce->name);
else
strbuf_addstr(&sb, ce->name);
strbuf_addf(out, _("Skipping unmerged submodule %s"), sb.buf);
@@ -2082,8 +2079,8 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
sub = submodule_from_path(the_repository, null_oid(), ce->name);
- if (suc->recursive_prefix)
- displaypath = relative_path(suc->recursive_prefix,
+ if (suc->update_data->recursive_prefix)
+ displaypath = relative_path(suc->update_data->recursive_prefix,
ce->name, &displaypath_sb);
else
displaypath = ce->name;
@@ -2101,8 +2098,8 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
}
free(key);
- if (suc->update.type == SM_UPDATE_NONE
- || (suc->update.type == SM_UPDATE_UNSPECIFIED
+ if (suc->update_data->update_strategy.type == SM_UPDATE_NONE
+ || (suc->update_data->update_strategy.type == SM_UPDATE_UNSPECIFIED
&& update_type == SM_UPDATE_NONE)) {
strbuf_addf(out, _("Skipping submodule '%s'"), displaypath);
strbuf_addch(out, '\n');
@@ -2146,30 +2143,33 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
child->err = -1;
strvec_push(&child->args, "submodule--helper");
strvec_push(&child->args, "clone");
- if (suc->progress)
+ if (suc->update_data->progress)
strvec_push(&child->args, "--progress");
- if (suc->quiet)
+ if (suc->update_data->quiet)
strvec_push(&child->args, "--quiet");
- if (suc->prefix)
- strvec_pushl(&child->args, "--prefix", suc->prefix, NULL);
- if (suc->recommend_shallow && sub->recommend_shallow == 1)
+ if (suc->update_data->prefix)
+ strvec_pushl(&child->args, "--prefix", suc->update_data->prefix, NULL);
+ if (suc->update_data->recommend_shallow && sub->recommend_shallow == 1)
strvec_push(&child->args, "--depth=1");
- if (suc->require_init)
+ else if (suc->update_data->depth)
+ strvec_pushf(&child->args, "--depth=%d", suc->update_data->depth);
+ if (suc->update_data->filter_options && suc->update_data->filter_options->choice)
+ strvec_pushf(&child->args, "--filter=%s",
+ expand_list_objects_filter_spec(suc->update_data->filter_options));
+ if (suc->update_data->require_init)
strvec_push(&child->args, "--require-init");
strvec_pushl(&child->args, "--path", sub->path, NULL);
strvec_pushl(&child->args, "--name", sub->name, NULL);
strvec_pushl(&child->args, "--url", url, NULL);
- if (suc->references.nr) {
+ if (suc->update_data->references.nr) {
struct string_list_item *item;
- for_each_string_list_item(item, &suc->references)
+ for_each_string_list_item(item, &suc->update_data->references)
strvec_pushl(&child->args, "--reference", item->string, NULL);
}
- if (suc->dissociate)
+ if (suc->update_data->dissociate)
strvec_push(&child->args, "--dissociate");
- if (suc->depth)
- strvec_push(&child->args, suc->depth);
- if (suc->single_branch >= 0)
- strvec_push(&child->args, suc->single_branch ?
+ if (suc->update_data->single_branch >= 0)
+ strvec_push(&child->args, suc->update_data->single_branch ?
"--single-branch" :
"--no-single-branch");
@@ -2191,8 +2191,8 @@ static int update_clone_get_next_task(struct child_process *child,
const struct cache_entry *ce;
int index;
- for (; suc->current < suc->list.nr; suc->current++) {
- ce = suc->list.entries[suc->current];
+ for (; suc->current < suc->update_data->list.nr; suc->current++) {
+ ce = suc->update_data->list.entries[suc->current];
if (prepare_to_clone_next_submodule(ce, child, suc, err)) {
int *p = xmalloc(sizeof(*p));
*p = suc->current;
@@ -2207,7 +2207,7 @@ static int update_clone_get_next_task(struct child_process *child,
* stragglers again, which we can imagine as an extension of the
* entry list.
*/
- index = suc->current - suc->list.nr;
+ index = suc->current - suc->update_data->list.nr;
if (index < suc->failed_clones_nr) {
int *p;
ce = suc->failed_clones[index];
@@ -2252,8 +2252,8 @@ static int update_clone_task_finished(int result,
if (!result)
return 0;
- if (idx < suc->list.nr) {
- ce = suc->list.entries[idx];
+ if (idx < suc->update_data->list.nr) {
+ ce = suc->update_data->list.entries[idx];
strbuf_addf(err, _("Failed to clone '%s'. Retry scheduled"),
ce->name);
strbuf_addch(err, '\n');
@@ -2263,7 +2263,7 @@ static int update_clone_task_finished(int result,
suc->failed_clones[suc->failed_clones_nr++] = ce;
return 0;
} else {
- idx -= suc->list.nr;
+ idx -= suc->update_data->list.nr;
ce = suc->failed_clones[idx];
strbuf_addf(err, _("Failed to clone '%s' a second time, aborting"),
ce->name);
@@ -2327,83 +2327,76 @@ static int fetch_in_submodule(const char *module_path, int depth, int quiet, str
static int run_update_command(struct update_data *ud, int subforce)
{
- struct strvec args = STRVEC_INIT;
- struct strvec child_env = STRVEC_INIT;
+ struct child_process cp = CHILD_PROCESS_INIT;
char *oid = oid_to_hex(&ud->oid);
int must_die_on_failure = 0;
- int git_cmd;
switch (ud->update_strategy.type) {
case SM_UPDATE_CHECKOUT:
- git_cmd = 1;
- strvec_pushl(&args, "checkout", "-q", NULL);
+ cp.git_cmd = 1;
+ strvec_pushl(&cp.args, "checkout", "-q", NULL);
if (subforce)
- strvec_push(&args, "-f");
+ strvec_push(&cp.args, "-f");
break;
case SM_UPDATE_REBASE:
- git_cmd = 1;
- strvec_push(&args, "rebase");
+ cp.git_cmd = 1;
+ strvec_push(&cp.args, "rebase");
if (ud->quiet)
- strvec_push(&args, "--quiet");
+ strvec_push(&cp.args, "--quiet");
must_die_on_failure = 1;
break;
case SM_UPDATE_MERGE:
- git_cmd = 1;
- strvec_push(&args, "merge");
+ cp.git_cmd = 1;
+ strvec_push(&cp.args, "merge");
if (ud->quiet)
- strvec_push(&args, "--quiet");
+ strvec_push(&cp.args, "--quiet");
must_die_on_failure = 1;
break;
case SM_UPDATE_COMMAND:
- git_cmd = 0;
- strvec_push(&args, ud->update_strategy.command);
+ cp.use_shell = 1;
+ strvec_push(&cp.args, ud->update_strategy.command);
must_die_on_failure = 1;
break;
default:
BUG("unexpected update strategy type: %s",
submodule_strategy_to_string(&ud->update_strategy));
}
- strvec_push(&args, oid);
+ strvec_push(&cp.args, oid);
- prepare_submodule_repo_env(&child_env);
- if (run_command_v_opt_cd_env(args.v, git_cmd ? RUN_GIT_CMD : RUN_USING_SHELL,
- ud->sm_path, child_env.v)) {
+ cp.dir = xstrdup(ud->sm_path);
+ prepare_submodule_repo_env(&cp.env_array);
+ if (run_command(&cp)) {
switch (ud->update_strategy.type) {
case SM_UPDATE_CHECKOUT:
- printf(_("Unable to checkout '%s' in submodule path '%s'"),
- oid, ud->displaypath);
+ die_message(_("Unable to checkout '%s' in submodule path '%s'"),
+ oid, ud->displaypath);
break;
case SM_UPDATE_REBASE:
- printf(_("Unable to rebase '%s' in submodule path '%s'"),
- oid, ud->displaypath);
+ die_message(_("Unable to rebase '%s' in submodule path '%s'"),
+ oid, ud->displaypath);
break;
case SM_UPDATE_MERGE:
- printf(_("Unable to merge '%s' in submodule path '%s'"),
- oid, ud->displaypath);
+ die_message(_("Unable to merge '%s' in submodule path '%s'"),
+ oid, ud->displaypath);
break;
case SM_UPDATE_COMMAND:
- printf(_("Execution of '%s %s' failed in submodule path '%s'"),
- ud->update_strategy.command, oid, ud->displaypath);
+ die_message(_("Execution of '%s %s' failed in submodule path '%s'"),
+ ud->update_strategy.command, oid, ud->displaypath);
break;
default:
BUG("unexpected update strategy type: %s",
submodule_strategy_to_string(&ud->update_strategy));
}
- /*
- * NEEDSWORK: We are currently printing to stdout with error
- * return so that the shell caller handles the error output
- * properly. Once we start handling the error messages within
- * C, we should use die() instead.
- */
if (must_die_on_failure)
- return 2;
- /*
- * This signifies to the caller in shell that the command
- * failed without dying
- */
+ exit(128);
+
+ /* the command failed, but update must continue */
return 1;
}
+ if (ud->quiet)
+ return 0;
+
switch (ud->update_strategy.type) {
case SM_UPDATE_CHECKOUT:
printf(_("Submodule path '%s': checked out '%s'\n"),
@@ -2429,7 +2422,7 @@ static int run_update_command(struct update_data *ud, int subforce)
return 0;
}
-static int do_run_update_procedure(struct update_data *ud)
+static int run_update_procedure(struct update_data *ud)
{
int subforce = is_null_oid(&ud->suboid) || ud->force;
@@ -2459,21 +2452,211 @@ static int do_run_update_procedure(struct update_data *ud)
return run_update_command(ud, subforce);
}
-static void update_submodule(struct update_clone_data *ucd)
+static const char *remote_submodule_branch(const char *path)
{
- fprintf(stdout, "dummy %s %d\t%s\n",
- oid_to_hex(&ucd->oid),
- ucd->just_cloned,
- ucd->sub->path);
+ const struct submodule *sub;
+ const char *branch = NULL;
+ char *key;
+
+ sub = submodule_from_path(the_repository, null_oid(), path);
+ if (!sub)
+ return NULL;
+
+ key = xstrfmt("submodule.%s.branch", sub->name);
+ if (repo_config_get_string_tmp(the_repository, key, &branch))
+ branch = sub->branch;
+ free(key);
+
+ if (!branch)
+ return "HEAD";
+
+ if (!strcmp(branch, ".")) {
+ const char *refname = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
+
+ if (!refname)
+ die(_("No such ref: %s"), "HEAD");
+
+ /* detached HEAD */
+ if (!strcmp(refname, "HEAD"))
+ die(_("Submodule (%s) branch configured to inherit "
+ "branch from superproject, but the superproject "
+ "is not on any branch"), sub->name);
+
+ if (!skip_prefix(refname, "refs/heads/", &refname))
+ die(_("Expecting a full ref name, got %s"), refname);
+ return refname;
+ }
+
+ return branch;
}
-static int update_submodules(struct submodule_update_clone *suc)
+static void ensure_core_worktree(const char *path)
{
- int i;
+ const char *cw;
+ struct repository subrepo;
+
+ if (repo_submodule_init(&subrepo, the_repository, path, null_oid()))
+ die(_("could not get a repository handle for submodule '%s'"), path);
+
+ if (!repo_config_get_string_tmp(&subrepo, "core.worktree", &cw)) {
+ char *cfg_file, *abs_path;
+ const char *rel_path;
+ struct strbuf sb = STRBUF_INIT;
+
+ cfg_file = repo_git_path(&subrepo, "config");
+
+ abs_path = absolute_pathdup(path);
+ rel_path = relative_path(abs_path, subrepo.gitdir, &sb);
+
+ git_config_set_in_file(cfg_file, "core.worktree", rel_path);
+
+ free(cfg_file);
+ free(abs_path);
+ strbuf_release(&sb);
+ }
+}
+
+static void update_data_to_args(struct update_data *update_data, struct strvec *args)
+{
+ strvec_pushl(args, "submodule--helper", "update", "--recursive", NULL);
+ strvec_pushf(args, "--jobs=%d", update_data->max_jobs);
+ if (update_data->recursive_prefix)
+ strvec_pushl(args, "--recursive-prefix",
+ update_data->recursive_prefix, NULL);
+ if (update_data->quiet)
+ strvec_push(args, "--quiet");
+ if (update_data->force)
+ strvec_push(args, "--force");
+ if (update_data->init)
+ strvec_push(args, "--init");
+ if (update_data->remote)
+ strvec_push(args, "--remote");
+ if (update_data->nofetch)
+ strvec_push(args, "--no-fetch");
+ if (update_data->dissociate)
+ strvec_push(args, "--dissociate");
+ if (update_data->progress)
+ strvec_push(args, "--progress");
+ if (update_data->require_init)
+ strvec_push(args, "--require-init");
+ if (update_data->depth)
+ strvec_pushf(args, "--depth=%d", update_data->depth);
+ if (update_data->update_default)
+ strvec_pushl(args, "--update", update_data->update_default, NULL);
+ if (update_data->references.nr) {
+ struct string_list_item *item;
+ for_each_string_list_item(item, &update_data->references)
+ strvec_pushl(args, "--reference", item->string, NULL);
+ }
+ if (update_data->filter_options && update_data->filter_options->choice)
+ strvec_pushf(args, "--filter=%s",
+ expand_list_objects_filter_spec(
+ update_data->filter_options));
+ if (update_data->recommend_shallow == 0)
+ strvec_push(args, "--no-recommend-shallow");
+ else if (update_data->recommend_shallow == 1)
+ strvec_push(args, "--recommend-shallow");
+ if (update_data->single_branch >= 0)
+ strvec_push(args, update_data->single_branch ?
+ "--single-branch" :
+ "--no-single-branch");
+}
+
+static int update_submodule(struct update_data *update_data)
+{
+ char *prefixed_path;
+
+ ensure_core_worktree(update_data->sm_path);
+
+ if (update_data->recursive_prefix)
+ prefixed_path = xstrfmt("%s%s", update_data->recursive_prefix,
+ update_data->sm_path);
+ else
+ prefixed_path = xstrdup(update_data->sm_path);
+
+ update_data->displaypath = get_submodule_displaypath(prefixed_path,
+ update_data->prefix);
+ free(prefixed_path);
+
+ determine_submodule_update_strategy(the_repository, update_data->just_cloned,
+ update_data->sm_path, update_data->update_default,
+ &update_data->update_strategy);
+
+ if (update_data->just_cloned)
+ oidcpy(&update_data->suboid, null_oid());
+ else if (resolve_gitlink_ref(update_data->sm_path, "HEAD", &update_data->suboid))
+ die(_("Unable to find current revision in submodule path '%s'"),
+ update_data->displaypath);
+
+ if (update_data->remote) {
+ char *remote_name = get_default_remote_submodule(update_data->sm_path);
+ const char *branch = remote_submodule_branch(update_data->sm_path);
+ char *remote_ref = xstrfmt("refs/remotes/%s/%s", remote_name, branch);
+
+ if (!update_data->nofetch) {
+ if (fetch_in_submodule(update_data->sm_path, update_data->depth,
+ 0, NULL))
+ die(_("Unable to fetch in submodule path '%s'"),
+ update_data->sm_path);
+ }
+
+ if (resolve_gitlink_ref(update_data->sm_path, remote_ref, &update_data->oid))
+ die(_("Unable to find %s revision in submodule path '%s'"),
+ remote_ref, update_data->sm_path);
+
+ free(remote_ref);
+ }
+
+ if (!oideq(&update_data->oid, &update_data->suboid) || update_data->force)
+ if (run_update_procedure(update_data))
+ return 1;
+
+ if (update_data->recursive) {
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct update_data next = *update_data;
+ int res;
+
+ if (update_data->recursive_prefix)
+ prefixed_path = xstrfmt("%s%s/", update_data->recursive_prefix,
+ update_data->sm_path);
+ else
+ prefixed_path = xstrfmt("%s/", update_data->sm_path);
+
+ next.recursive_prefix = get_submodule_displaypath(prefixed_path,
+ update_data->prefix);
+ next.prefix = NULL;
+ oidcpy(&next.oid, null_oid());
+ oidcpy(&next.suboid, null_oid());
+
+ cp.dir = update_data->sm_path;
+ cp.git_cmd = 1;
+ prepare_submodule_repo_env(&cp.env_array);
+ update_data_to_args(&next, &cp.args);
- run_processes_parallel_tr2(suc->max_jobs, update_clone_get_next_task,
+ /* die() if child process die()'d */
+ res = run_command(&cp);
+ if (!res)
+ return 0;
+ die_message(_("Failed to recurse into submodule path '%s'"),
+ update_data->displaypath);
+ if (res == 128)
+ exit(res);
+ else if (res)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int update_submodules(struct update_data *update_data)
+{
+ int i, res = 0;
+ struct submodule_update_clone suc = SUBMODULE_UPDATE_CLONE_INIT;
+
+ suc.update_data = update_data;
+ run_processes_parallel_tr2(suc.update_data->max_jobs, update_clone_get_next_task,
update_clone_start_failure,
- update_clone_task_finished, suc, "submodule",
+ update_clone_task_finished, &suc, "submodule",
"parallel/update");
/*
@@ -2484,209 +2667,139 @@ static int update_submodules(struct submodule_update_clone *suc)
* checkout involve more straightforward sequential I/O.
* - the listener can avoid doing any work if fetching failed.
*/
- if (suc->quickstop)
- return 1;
+ if (suc.quickstop) {
+ res = 1;
+ goto cleanup;
+ }
- for (i = 0; i < suc->update_clone_nr; i++)
- update_submodule(&suc->update_clone[i]);
+ for (i = 0; i < suc.update_clone_nr; i++) {
+ struct update_clone_data ucd = suc.update_clone[i];
- return 0;
+ oidcpy(&update_data->oid, &ucd.oid);
+ update_data->just_cloned = ucd.just_cloned;
+ update_data->sm_path = ucd.sub->path;
+
+ if (update_submodule(update_data))
+ res = 1;
+ }
+
+cleanup:
+ string_list_clear(&update_data->references, 0);
+ return res;
}
-static int update_clone(int argc, const char **argv, const char *prefix)
+static int module_update(int argc, const char **argv, const char *prefix)
{
- const char *update = NULL;
struct pathspec pathspec;
- struct submodule_update_clone suc = SUBMODULE_UPDATE_CLONE_INIT;
+ struct update_data opt = UPDATE_DATA_INIT;
+ struct list_objects_filter_options filter_options;
+ int ret;
- struct option module_update_clone_options[] = {
- OPT_STRING(0, "prefix", &prefix,
+ struct option module_update_options[] = {
+ OPT__FORCE(&opt.force, N_("force checkout updates"), 0),
+ OPT_BOOL(0, "init", &opt.init,
+ N_("initialize uninitialized submodules before update")),
+ OPT_BOOL(0, "remote", &opt.remote,
+ N_("use SHA-1 of submodule's remote tracking branch")),
+ OPT_BOOL(0, "recursive", &opt.recursive,
+ N_("traverse submodules recursively")),
+ OPT_BOOL('N', "no-fetch", &opt.nofetch,
+ N_("don't fetch new objects from the remote site")),
+ OPT_STRING(0, "prefix", &opt.prefix,
N_("path"),
N_("path into the working tree")),
- OPT_STRING(0, "recursive-prefix", &suc.recursive_prefix,
+ OPT_STRING(0, "recursive-prefix", &opt.recursive_prefix,
N_("path"),
N_("path into the working tree, across nested "
"submodule boundaries")),
- OPT_STRING(0, "update", &update,
+ OPT_STRING(0, "update", &opt.update_default,
N_("string"),
N_("rebase, merge, checkout or none")),
- OPT_STRING_LIST(0, "reference", &suc.references, N_("repo"),
+ OPT_STRING_LIST(0, "reference", &opt.references, N_("repo"),
N_("reference repository")),
- OPT_BOOL(0, "dissociate", &suc.dissociate,
+ OPT_BOOL(0, "dissociate", &opt.dissociate,
N_("use --reference only while cloning")),
- OPT_STRING(0, "depth", &suc.depth, "<depth>",
+ OPT_INTEGER(0, "depth", &opt.depth,
N_("create a shallow clone truncated to the "
"specified number of revisions")),
- OPT_INTEGER('j', "jobs", &suc.max_jobs,
+ OPT_INTEGER('j', "jobs", &opt.max_jobs,
N_("parallel jobs")),
- OPT_BOOL(0, "recommend-shallow", &suc.recommend_shallow,
+ OPT_BOOL(0, "recommend-shallow", &opt.recommend_shallow,
N_("whether the initial clone should follow the shallow recommendation")),
- OPT__QUIET(&suc.quiet, N_("don't print cloning progress")),
- OPT_BOOL(0, "progress", &suc.progress,
+ OPT__QUIET(&opt.quiet, N_("don't print cloning progress")),
+ OPT_BOOL(0, "progress", &opt.progress,
N_("force cloning progress")),
- OPT_BOOL(0, "require-init", &suc.require_init,
+ OPT_BOOL(0, "require-init", &opt.require_init,
N_("disallow cloning into non-empty directory")),
- OPT_BOOL(0, "single-branch", &suc.single_branch,
+ OPT_BOOL(0, "single-branch", &opt.single_branch,
N_("clone only one branch, HEAD or --branch")),
+ OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
OPT_END()
};
const char *const git_submodule_helper_usage[] = {
- N_("git submodule--helper update-clone [--prefix=<path>] [<path>...]"),
+ N_("git submodule [--quiet] update"
+ " [--init [--filter=<filter-spec>]] [--remote]"
+ " [-N|--no-fetch] [-f|--force]"
+ " [--checkout|--merge|--rebase]"
+ " [--[no-]recommend-shallow] [--reference <repository>]"
+ " [--recursive] [--[no-]single-branch] [--] [<path>...]"),
NULL
};
- suc.prefix = prefix;
- update_clone_config_from_gitmodules(&suc.max_jobs);
- git_config(git_update_clone_config, &suc.max_jobs);
+ update_clone_config_from_gitmodules(&opt.max_jobs);
+ git_config(git_update_clone_config, &opt.max_jobs);
- argc = parse_options(argc, argv, prefix, module_update_clone_options,
+ memset(&filter_options, 0, sizeof(filter_options));
+ argc = parse_options(argc, argv, prefix, module_update_options,
git_submodule_helper_usage, 0);
- if (update)
- if (parse_submodule_update_strategy(update, &suc.update) < 0)
+ if (filter_options.choice && !opt.init) {
+ usage_with_options(git_submodule_helper_usage,
+ module_update_options);
+ }
+
+ opt.filter_options = &filter_options;
+
+ if (opt.update_default)
+ if (parse_submodule_update_strategy(opt.update_default,
+ &opt.update_strategy) < 0)
die(_("bad value for update parameter"));
- if (module_list_compute(argc, argv, prefix, &pathspec, &suc.list) < 0)
+ if (module_list_compute(argc, argv, prefix, &pathspec, &opt.list) < 0) {
+ list_objects_filter_release(&filter_options);
return 1;
+ }
if (pathspec.nr)
- suc.warn_if_uninitialized = 1;
-
- return update_submodules(&suc);
-}
-
-static int run_update_procedure(int argc, const char **argv, const char *prefix)
-{
- int force = 0, quiet = 0, nofetch = 0, just_cloned = 0;
- char *prefixed_path, *update = NULL;
- struct update_data update_data = UPDATE_DATA_INIT;
-
- struct option options[] = {
- OPT__QUIET(&quiet, N_("suppress output for update by rebase or merge")),
- OPT__FORCE(&force, N_("force checkout updates"), 0),
- OPT_BOOL('N', "no-fetch", &nofetch,
- N_("don't fetch new objects from the remote site")),
- OPT_BOOL(0, "just-cloned", &just_cloned,
- N_("overrides update mode in case the repository is a fresh clone")),
- OPT_INTEGER(0, "depth", &update_data.depth, N_("depth for shallow fetch")),
- OPT_STRING(0, "prefix", &prefix,
- N_("path"),
- N_("path into the working tree")),
- OPT_STRING(0, "update", &update,
- N_("string"),
- N_("rebase, merge, checkout or none")),
- OPT_STRING(0, "recursive-prefix", &update_data.recursive_prefix, N_("path"),
- N_("path into the working tree, across nested "
- "submodule boundaries")),
- OPT_CALLBACK_F(0, "oid", &update_data.oid, N_("sha1"),
- N_("SHA1 expected by superproject"), PARSE_OPT_NONEG,
- parse_opt_object_id),
- OPT_CALLBACK_F(0, "suboid", &update_data.suboid, N_("subsha1"),
- N_("SHA1 of submodule's HEAD"), PARSE_OPT_NONEG,
- parse_opt_object_id),
- OPT_END()
- };
-
- const char *const usage[] = {
- N_("git submodule--helper run-update-procedure [<options>] <path>"),
- NULL
- };
-
- argc = parse_options(argc, argv, prefix, options, usage, 0);
-
- if (argc != 1)
- usage_with_options(usage, options);
-
- update_data.force = !!force;
- update_data.quiet = !!quiet;
- update_data.nofetch = !!nofetch;
- update_data.just_cloned = !!just_cloned;
- update_data.sm_path = argv[0];
-
- if (update_data.recursive_prefix)
- prefixed_path = xstrfmt("%s%s", update_data.recursive_prefix, update_data.sm_path);
- else
- prefixed_path = xstrdup(update_data.sm_path);
-
- update_data.displaypath = get_submodule_displaypath(prefixed_path, prefix);
-
- determine_submodule_update_strategy(the_repository, update_data.just_cloned,
- update_data.sm_path, update,
- &update_data.update_strategy);
-
- free(prefixed_path);
+ opt.warn_if_uninitialized = 1;
- if (!oideq(&update_data.oid, &update_data.suboid) || update_data.force)
- return do_run_update_procedure(&update_data);
+ if (opt.init) {
+ struct module_list list = MODULE_LIST_INIT;
+ struct init_cb info = INIT_CB_INIT;
- return 3;
-}
-
-static int resolve_relative_path(int argc, const char **argv, const char *prefix)
-{
- struct strbuf sb = STRBUF_INIT;
- if (argc != 3)
- die("submodule--helper relative-path takes exactly 2 arguments, got %d", argc);
-
- printf("%s", relative_path(argv[1], argv[2], &sb));
- strbuf_release(&sb);
- return 0;
-}
-
-static const char *remote_submodule_branch(const char *path)
-{
- const struct submodule *sub;
- const char *branch = NULL;
- char *key;
-
- sub = submodule_from_path(the_repository, null_oid(), path);
- if (!sub)
- return NULL;
-
- key = xstrfmt("submodule.%s.branch", sub->name);
- if (repo_config_get_string_tmp(the_repository, key, &branch))
- branch = sub->branch;
- free(key);
-
- if (!branch)
- return "HEAD";
-
- if (!strcmp(branch, ".")) {
- const char *refname = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
+ if (module_list_compute(argc, argv, opt.prefix,
+ &pathspec, &list) < 0)
+ return 1;
- if (!refname)
- die(_("No such ref: %s"), "HEAD");
+ /*
+ * If there are no path args and submodule.active is set then,
+ * by default, only initialize 'active' modules.
+ */
+ if (!argc && git_config_get_value_multi("submodule.active"))
+ module_list_active(&list);
- /* detached HEAD */
- if (!strcmp(refname, "HEAD"))
- die(_("Submodule (%s) branch configured to inherit "
- "branch from superproject, but the superproject "
- "is not on any branch"), sub->name);
+ info.prefix = opt.prefix;
+ info.superprefix = opt.recursive_prefix;
+ if (opt.quiet)
+ info.flags |= OPT_QUIET;
- if (!skip_prefix(refname, "refs/heads/", &refname))
- die(_("Expecting a full ref name, got %s"), refname);
- return refname;
+ for_each_listed_submodule(&list, init_submodule_cb, &info);
}
- return branch;
-}
-
-static int resolve_remote_submodule_branch(int argc, const char **argv,
- const char *prefix)
-{
- const char *ret;
- struct strbuf sb = STRBUF_INIT;
- if (argc != 2)
- die("submodule--helper remote-branch takes exactly one arguments, got %d", argc);
-
- ret = remote_submodule_branch(argv[1]);
- if (!ret)
- die("submodule %s doesn't exist", argv[1]);
-
- printf("%s", ret);
- strbuf_release(&sb);
- return 0;
+ ret = update_submodules(&opt);
+ list_objects_filter_release(&filter_options);
+ return ret;
}
static int push_check(int argc, const char **argv, const char *prefix)
@@ -2766,40 +2879,6 @@ static int push_check(int argc, const char **argv, const char *prefix)
return 0;
}
-static int ensure_core_worktree(int argc, const char **argv, const char *prefix)
-{
- const char *path;
- const char *cw;
- struct repository subrepo;
-
- if (argc != 2)
- BUG("submodule--helper ensure-core-worktree <path>");
-
- path = argv[1];
-
- if (repo_submodule_init(&subrepo, the_repository, path, null_oid()))
- die(_("could not get a repository handle for submodule '%s'"), path);
-
- if (!repo_config_get_string_tmp(&subrepo, "core.worktree", &cw)) {
- char *cfg_file, *abs_path;
- const char *rel_path;
- struct strbuf sb = STRBUF_INIT;
-
- cfg_file = repo_git_path(&subrepo, "config");
-
- abs_path = absolute_pathdup(path);
- rel_path = relative_path(abs_path, subrepo.gitdir, &sb);
-
- git_config_set_in_file(cfg_file, "core.worktree", rel_path);
-
- free(cfg_file);
- free(abs_path);
- strbuf_release(&sb);
- }
-
- return 0;
-}
-
static int absorb_git_dirs(int argc, const char **argv, const char *prefix)
{
int i;
@@ -2883,7 +2962,7 @@ static int module_config(int argc, const char **argv, const char *prefix)
const char *const git_submodule_helper_usage[] = {
N_("git submodule--helper config <name> [<value>]"),
N_("git submodule--helper config --unset <name>"),
- N_("git submodule--helper config --check-writeable"),
+ "git submodule--helper config --check-writeable",
NULL
};
@@ -2984,6 +3063,44 @@ static int module_set_branch(int argc, const char **argv, const char *prefix)
return !!ret;
}
+static int module_create_branch(int argc, const char **argv, const char *prefix)
+{
+ enum branch_track track;
+ int quiet = 0, force = 0, reflog = 0, dry_run = 0;
+
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("print only error messages")),
+ OPT__FORCE(&force, N_("force creation"), 0),
+ OPT_BOOL(0, "create-reflog", &reflog,
+ N_("create the branch's reflog")),
+ OPT_CALLBACK_F('t', "track", &track, "(direct|inherit)",
+ N_("set branch tracking configuration"),
+ PARSE_OPT_OPTARG,
+ parse_opt_tracking_mode),
+ OPT__DRY_RUN(&dry_run,
+ N_("show whether the branch would be created")),
+ OPT_END()
+ };
+ const char *const usage[] = {
+ N_("git submodule--helper create-branch [-f|--force] [--create-reflog] [-q|--quiet] [-t|--track] [-n|--dry-run] <name> <start_oid> <start_name>"),
+ NULL
+ };
+
+ git_config(git_default_config, NULL);
+ track = git_branch_track;
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+
+ if (argc != 3)
+ usage_with_options(usage, options);
+
+ if (!quiet && !dry_run)
+ printf_ln(_("creating branch '%s'"), argv[0]);
+
+ create_branches_recursively(the_repository, argv[0], argv[1], argv[2],
+ force, reflog, quiet, track, dry_run);
+ return 0;
+}
+
struct add_data {
const char *prefix;
const char *branch;
@@ -3248,6 +3365,7 @@ static int module_add(int argc, const char **argv, const char *prefix)
{
int force = 0, quiet = 0, progress = 0, dissociate = 0;
struct add_data add_data = ADD_DATA_INIT;
+ char *to_free = NULL;
struct option options[] = {
OPT_STRING('b', "branch", &add_data.branch, N_("branch"),
@@ -3299,7 +3417,8 @@ static int module_add(int argc, const char **argv, const char *prefix)
"of the working tree"));
/* dereference source url relative to parent's url */
- add_data.realrepo = resolve_relative_url(add_data.repo, NULL, 1);
+ to_free = resolve_relative_url(add_data.repo, NULL, 1);
+ add_data.realrepo = to_free;
} else if (is_dir_sep(add_data.repo[0]) || strchr(add_data.repo, ':')) {
add_data.realrepo = add_data.repo;
} else {
@@ -3352,6 +3471,7 @@ static int module_add(int argc, const char **argv, const char *prefix)
}
configure_added_submodule(&add_data);
free(add_data.sm_path);
+ free(to_free);
return 0;
}
@@ -3369,20 +3489,14 @@ static struct cmd_struct commands[] = {
{"name", module_name, 0},
{"clone", module_clone, 0},
{"add", module_add, SUPPORT_SUPER_PREFIX},
- {"update-module-mode", module_update_module_mode, 0},
- {"update-clone", update_clone, 0},
- {"run-update-procedure", run_update_procedure, 0},
- {"ensure-core-worktree", ensure_core_worktree, 0},
- {"relative-path", resolve_relative_path, 0},
+ {"update", module_update, 0},
{"resolve-relative-url-test", resolve_relative_url_test, 0},
{"foreach", module_foreach, SUPPORT_SUPER_PREFIX},
{"init", module_init, SUPPORT_SUPER_PREFIX},
{"status", module_status, SUPPORT_SUPER_PREFIX},
- {"print-default-remote", print_default_remote, 0},
{"sync", module_sync, SUPPORT_SUPER_PREFIX},
{"deinit", module_deinit, 0},
{"summary", module_summary, SUPPORT_SUPER_PREFIX},
- {"remote-branch", resolve_remote_submodule_branch, 0},
{"push-check", push_check, 0},
{"absorb-git-dirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX},
{"is-active", is_active, 0},
@@ -3390,6 +3504,7 @@ static struct cmd_struct commands[] = {
{"config", module_config, 0},
{"set-url", module_set_url, 0},
{"set-branch", module_set_branch, 0},
+ {"create-branch", module_create_branch, 0},
};
int cmd_submodule__helper(int argc, const char **argv, const char *prefix)
diff --git a/builtin/tag.c b/builtin/tag.c
index 134b3f1..e5a8f85 100644
--- a/builtin/tag.c
+++ b/builtin/tag.c
@@ -20,6 +20,7 @@
#include "oid-array.h"
#include "column.h"
#include "ref-filter.h"
+#include "date.h"
static const char * const git_tag_usage[] = {
N_("git tag [-a | -s | -u <key-id>] [-f] [-m <msg> | -F <file>]\n"
@@ -238,7 +239,7 @@ static int build_tag_object(struct strbuf *buf, int sign, struct object_id *resu
{
if (sign && do_sign(buf) < 0)
return error(_("unable to sign the tag"));
- if (write_object_file(buf->buf, buf->len, tag_type, result) < 0)
+ if (write_object_file(buf->buf, buf->len, OBJ_TAG, result) < 0)
return error(_("unable to write tag file"));
return 0;
}
diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c
index 4a94662..dbeb068 100644
--- a/builtin/unpack-objects.c
+++ b/builtin/unpack-objects.c
@@ -177,7 +177,7 @@ static void write_cached_object(struct object *obj, struct obj_buffer *obj_buf)
struct object_id oid;
if (write_object_file(obj_buf->buffer, obj_buf->size,
- type_name(obj->type), &oid) < 0)
+ obj->type, &oid) < 0)
die("failed to write object %s", oid_to_hex(&obj->oid));
obj->flags |= FLAG_WRITTEN;
}
@@ -243,7 +243,7 @@ static void write_object(unsigned nr, enum object_type type,
void *buf, unsigned long size)
{
if (!strict) {
- if (write_object_file(buf, size, type_name(type),
+ if (write_object_file(buf, size, type,
&obj_list[nr].oid) < 0)
die("failed to write object");
added_object(nr, type, buf, size);
@@ -251,7 +251,7 @@ static void write_object(unsigned nr, enum object_type type,
obj_list[nr].obj = NULL;
} else if (type == OBJ_BLOB) {
struct blob *blob;
- if (write_object_file(buf, size, type_name(type),
+ if (write_object_file(buf, size, type,
&obj_list[nr].oid) < 0)
die("failed to write object");
added_object(nr, type, buf, size);
@@ -266,7 +266,7 @@ static void write_object(unsigned nr, enum object_type type,
} else {
struct object *obj;
int eaten;
- hash_object_file(the_hash_algo, buf, size, type_name(type),
+ hash_object_file(the_hash_algo, buf, size, type,
&obj_list[nr].oid);
added_object(nr, type, buf, size);
obj = parse_object_buffer(the_repository, &obj_list[nr].oid,
diff --git a/builtin/update-index.c b/builtin/update-index.c
index 187203e..876112a 100644
--- a/builtin/update-index.c
+++ b/builtin/update-index.c
@@ -606,7 +606,7 @@ static struct cache_entry *read_one_ent(const char *which,
error("%s: not in %s branch.", path, which);
return NULL;
}
- if (mode == S_IFDIR) {
+ if (!the_index.sparse_index && mode == S_IFDIR) {
if (which)
error("%s: not a blob in %s branch.", path, which);
return NULL;
@@ -743,8 +743,6 @@ static int do_reupdate(int ac, const char **av,
*/
has_head = 0;
redo:
- /* TODO: audit for interaction with sparse-index. */
- ensure_full_index(&the_index);
for (pos = 0; pos < active_nr; pos++) {
const struct cache_entry *ce = active_cache[pos];
struct cache_entry *old = NULL;
@@ -761,6 +759,16 @@ static int do_reupdate(int ac, const char **av,
discard_cache_entry(old);
continue; /* unchanged */
}
+
+ /* At this point, we know the contents of the sparse directory are
+ * modified with respect to HEAD, so we expand the index and restart
+ * to process each path individually
+ */
+ if (S_ISSPARSEDIR(ce->ce_mode)) {
+ ensure_full_index(&the_index);
+ goto redo;
+ }
+
/* Be careful. The working tree may not have the
* path anymore, in which case, under 'allow_remove',
* or worse yet 'allow_replace', active_nr may decrease.
@@ -787,6 +795,17 @@ static int refresh(struct refresh_params *o, unsigned int flag)
setup_work_tree();
read_cache();
*o->has_errors |= refresh_cache(o->flags | flag);
+ if (has_racy_timestamp(&the_index)) {
+ /*
+ * Even if nothing else has changed, updating the file
+ * increases the chance that racy timestamps become
+ * non-racy, helping future run-time performance.
+ * We do that even in case of "errors" returned by
+ * refresh_cache() as these are no actual errors.
+ * cmd_status() does the same.
+ */
+ active_cache_changed |= SOMETHING_CHANGED;
+ }
return 0;
}
@@ -1077,6 +1096,9 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
git_config(git_default_config, NULL);
+ prepare_repo_settings(r);
+ the_repository->settings.command_requires_full_index = 0;
+
/* we will diagnose later if it turns out that we need to update it */
newfd = hold_locked_index(&lock_file, 0);
if (newfd < 0)
@@ -1214,14 +1236,17 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
}
if (fsmonitor > 0) {
- if (git_config_get_fsmonitor() == 0)
+ enum fsmonitor_mode fsm_mode = fsm_settings__get_mode(r);
+ if (fsm_mode == FSMONITOR_MODE_DISABLED) {
warning(_("core.fsmonitor is unset; "
"set it if you really want to "
"enable fsmonitor"));
+ }
add_fsmonitor(&the_index);
report(_("fsmonitor enabled"));
} else if (!fsmonitor) {
- if (git_config_get_fsmonitor() == 1)
+ enum fsmonitor_mode fsm_mode = fsm_settings__get_mode(r);
+ if (fsm_mode > FSMONITOR_MODE_DISABLED)
warning(_("core.fsmonitor is set; "
"remove it if you really want to "
"disable fsmonitor"));
diff --git a/builtin/update-server-info.c b/builtin/update-server-info.c
index 4321a34..880fffe 100644
--- a/builtin/update-server-info.c
+++ b/builtin/update-server-info.c
@@ -4,7 +4,7 @@
#include "parse-options.h"
static const char * const update_server_info_usage[] = {
- N_("git update-server-info [--force]"),
+ "git update-server-info [--force]",
NULL
};
diff --git a/builtin/worktree.c b/builtin/worktree.c
index 2838254..8b32cd1 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -22,6 +22,7 @@ static const char * const worktree_usage[] = {
N_("git worktree move <worktree> <new-path>"),
N_("git worktree prune [<options>]"),
N_("git worktree remove [<options>] <worktree>"),
+ N_("git worktree repair [<path>...]"),
N_("git worktree unlock <path>"),
NULL
};
@@ -236,6 +237,74 @@ static void check_candidate_path(const char *path,
die(_("'%s' is a missing but already registered worktree;\nuse '%s -f' to override, or 'prune' or 'remove' to clear"), path, cmd);
}
+static void copy_sparse_checkout(const char *worktree_git_dir)
+{
+ char *from_file = git_pathdup("info/sparse-checkout");
+ char *to_file = xstrfmt("%s/info/sparse-checkout", worktree_git_dir);
+
+ if (file_exists(from_file)) {
+ if (safe_create_leading_directories(to_file) ||
+ copy_file(to_file, from_file, 0666))
+ error(_("failed to copy '%s' to '%s'; sparse-checkout may not work correctly"),
+ from_file, to_file);
+ }
+
+ free(from_file);
+ free(to_file);
+}
+
+static void copy_filtered_worktree_config(const char *worktree_git_dir)
+{
+ char *from_file = git_pathdup("config.worktree");
+ char *to_file = xstrfmt("%s/config.worktree", worktree_git_dir);
+
+ if (file_exists(from_file)) {
+ struct config_set cs = { { 0 } };
+ const char *core_worktree;
+ int bare;
+
+ if (safe_create_leading_directories(to_file) ||
+ copy_file(to_file, from_file, 0666)) {
+ error(_("failed to copy worktree config from '%s' to '%s'"),
+ from_file, to_file);
+ goto worktree_copy_cleanup;
+ }
+
+ git_configset_init(&cs);
+ git_configset_add_file(&cs, from_file);
+
+ if (!git_configset_get_bool(&cs, "core.bare", &bare) &&
+ bare &&
+ git_config_set_multivar_in_file_gently(
+ to_file, "core.bare", NULL, "true", 0))
+ error(_("failed to unset '%s' in '%s'"),
+ "core.bare", to_file);
+ if (!git_configset_get_value(&cs, "core.worktree", &core_worktree) &&
+ git_config_set_in_file_gently(to_file,
+ "core.worktree", NULL))
+ error(_("failed to unset '%s' in '%s'"),
+ "core.worktree", to_file);
+
+ git_configset_clear(&cs);
+ }
+
+worktree_copy_cleanup:
+ free(from_file);
+ free(to_file);
+}
+
+static int checkout_worktree(const struct add_opts *opts,
+ struct strvec *child_env)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ cp.git_cmd = 1;
+ strvec_pushl(&cp.args, "reset", "--hard", "--no-recurse-submodules", NULL);
+ if (opts->quiet)
+ strvec_push(&cp.args, "--quiet");
+ strvec_pushv(&cp.env_array, child_env->v);
+ return run_command(&cp);
+}
+
static int add_worktree(const char *path, const char *refname,
const struct add_opts *opts)
{
@@ -335,6 +404,21 @@ static int add_worktree(const char *path, const char *refname,
strbuf_addf(&sb, "%s/commondir", sb_repo.buf);
write_file(sb.buf, "../..");
+ /*
+ * If the current worktree has sparse-checkout enabled, then copy
+ * the sparse-checkout patterns from the current worktree.
+ */
+ if (core_apply_sparse_checkout)
+ copy_sparse_checkout(sb_repo.buf);
+
+ /*
+ * If we are using worktree config, then copy all current config
+ * values from the current worktree into the new one, that way the
+ * new worktree behaves the same as this one.
+ */
+ if (repository_format_worktree_config)
+ copy_filtered_worktree_config(sb_repo.buf);
+
strvec_pushf(&child_env, "%s=%s", GIT_DIR_ENVIRONMENT, sb_git.buf);
strvec_pushf(&child_env, "%s=%s", GIT_WORK_TREE_ENVIRONMENT, path);
cp.git_cmd = 1;
@@ -354,17 +438,9 @@ static int add_worktree(const char *path, const char *refname,
if (ret)
goto done;
- if (opts->checkout) {
- struct child_process cp = CHILD_PROCESS_INIT;
- cp.git_cmd = 1;
- strvec_pushl(&cp.args, "reset", "--hard", "--no-recurse-submodules", NULL);
- if (opts->quiet)
- strvec_push(&cp.args, "--quiet");
- strvec_pushv(&cp.env_array, child_env.v);
- ret = run_command(&cp);
- if (ret)
- goto done;
- }
+ if (opts->checkout &&
+ (ret = checkout_worktree(opts, &child_env)))
+ goto done;
is_junk = 0;
FREE_AND_NULL(junk_work_tree);
@@ -382,21 +458,17 @@ done:
* is_junk is cleared, but do return appropriate code when hook fails.
*/
if (!ret && opts->checkout) {
- const char *hook = find_hook("post-checkout");
- if (hook) {
- const char *env[] = { "GIT_DIR", "GIT_WORK_TREE", NULL };
- struct child_process cp = CHILD_PROCESS_INIT;
- cp.no_stdin = 1;
- cp.stdout_to_stderr = 1;
- cp.dir = path;
- strvec_pushv(&cp.env_array, env);
- cp.trace2_hook_name = "post-checkout";
- strvec_pushl(&cp.args, absolute_path(hook),
- oid_to_hex(null_oid()),
- oid_to_hex(&commit->object.oid),
- "1", NULL);
- ret = run_command(&cp);
- }
+ struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
+
+ strvec_pushl(&opt.env, "GIT_DIR", "GIT_WORK_TREE", NULL);
+ strvec_pushl(&opt.args,
+ oid_to_hex(null_oid()),
+ oid_to_hex(&commit->object.oid),
+ "1",
+ NULL);
+ opt.dir = path;
+
+ ret = run_hooks_opt("post-checkout", &opt);
}
strvec_clear(&child_env);
@@ -579,35 +651,37 @@ static int add(int ac, const char **av, const char *prefix)
return add_worktree(path, branch, &opts);
}
-static void show_worktree_porcelain(struct worktree *wt)
+static void show_worktree_porcelain(struct worktree *wt, int line_terminator)
{
const char *reason;
- printf("worktree %s\n", wt->path);
+ printf("worktree %s%c", wt->path, line_terminator);
if (wt->is_bare)
- printf("bare\n");
+ printf("bare%c", line_terminator);
else {
- printf("HEAD %s\n", oid_to_hex(&wt->head_oid));
+ printf("HEAD %s%c", oid_to_hex(&wt->head_oid), line_terminator);
if (wt->is_detached)
- printf("detached\n");
+ printf("detached%c", line_terminator);
else if (wt->head_ref)
- printf("branch %s\n", wt->head_ref);
+ printf("branch %s%c", wt->head_ref, line_terminator);
}
reason = worktree_lock_reason(wt);
- if (reason && *reason) {
- struct strbuf sb = STRBUF_INIT;
- quote_c_style(reason, &sb, NULL, 0);
- printf("locked %s\n", sb.buf);
- strbuf_release(&sb);
- } else if (reason)
- printf("locked\n");
+ if (reason) {
+ fputs("locked", stdout);
+ if (*reason) {
+ fputc(' ', stdout);
+ write_name_quoted(reason, stdout, line_terminator);
+ } else {
+ fputc(line_terminator, stdout);
+ }
+ }
reason = worktree_prune_reason(wt, expire);
if (reason)
- printf("prunable %s\n", reason);
+ printf("prunable %s%c", reason, line_terminator);
- printf("\n");
+ fputc(line_terminator, stdout);
}
static void show_worktree(struct worktree *wt, int path_maxlen, int abbrev_len)
@@ -685,12 +759,15 @@ static void pathsort(struct worktree **wt)
static int list(int ac, const char **av, const char *prefix)
{
int porcelain = 0;
+ int line_terminator = '\n';
struct option options[] = {
OPT_BOOL(0, "porcelain", &porcelain, N_("machine-readable output")),
OPT__VERBOSE(&verbose, N_("show extended annotations and reasons, if available")),
OPT_EXPIRY_DATE(0, "expire", &expire,
N_("add 'prunable' annotation to worktrees older than <time>")),
+ OPT_SET_INT('z', NULL, &line_terminator,
+ N_("terminate records with a NUL character"), '\0'),
OPT_END()
};
@@ -700,6 +777,8 @@ static int list(int ac, const char **av, const char *prefix)
usage_with_options(worktree_usage, options);
else if (verbose && porcelain)
die(_("options '%s' and '%s' cannot be used together"), "--verbose", "--porcelain");
+ else if (!line_terminator && !porcelain)
+ die(_("the option '%s' requires '%s'"), "-z", "--porcelain");
else {
struct worktree **worktrees = get_worktrees();
int path_maxlen = 0, abbrev = DEFAULT_ABBREV, i;
@@ -712,7 +791,8 @@ static int list(int ac, const char **av, const char *prefix)
for (i = 0; worktrees[i]; i++) {
if (porcelain)
- show_worktree_porcelain(worktrees[i]);
+ show_worktree_porcelain(worktrees[i],
+ line_terminator);
else
show_worktree(worktrees[i], path_maxlen, abbrev);
}
diff --git a/bulk-checkin.c b/bulk-checkin.c
index 8785b2a..6d6c371 100644
--- a/bulk-checkin.c
+++ b/bulk-checkin.c
@@ -53,9 +53,10 @@ static void finish_bulk_checkin(struct bulk_checkin_state *state)
unlink(state->pack_tmp_name);
goto clear_exit;
} else if (state->nr_written == 1) {
- finalize_hashfile(state->f, hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
+ finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK,
+ CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
} else {
- int fd = finalize_hashfile(state->f, hash, 0);
+ int fd = finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK, 0);
fixup_pack_header_footer(fd, hash, state->pack_tmp_name,
state->nr_written, hash,
state->offset);
@@ -220,8 +221,8 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
if (seekback == (off_t) -1)
return error("cannot find the current offset");
- header_len = xsnprintf((char *)obuf, sizeof(obuf), "%s %" PRIuMAX,
- type_name(type), (uintmax_t)size) + 1;
+ header_len = format_object_header((char *)obuf, sizeof(obuf),
+ type, size);
the_hash_algo->init_fn(&ctx);
the_hash_algo->update_fn(&ctx, obuf, header_len);
diff --git a/bundle.c b/bundle.c
index a0bb687..d50cfb5 100644
--- a/bundle.c
+++ b/bundle.c
@@ -11,7 +11,7 @@
#include "run-command.h"
#include "refs.h"
#include "strvec.h"
-
+#include "list-objects-filter-options.h"
static const char v2_bundle_signature[] = "# v2 git bundle\n";
static const char v3_bundle_signature[] = "# v3 git bundle\n";
@@ -33,6 +33,7 @@ void bundle_header_release(struct bundle_header *header)
{
string_list_clear(&header->prerequisites, 1);
string_list_clear(&header->references, 1);
+ list_objects_filter_release(&header->filter);
}
static int parse_capability(struct bundle_header *header, const char *capability)
@@ -45,6 +46,10 @@ static int parse_capability(struct bundle_header *header, const char *capability
header->hash_algo = &hash_algos[algo];
return 0;
}
+ if (skip_prefix(capability, "filter=", &arg)) {
+ parse_list_objects_filter(&header->filter, arg);
+ return 0;
+ }
return error(_("unknown capability '%s'"), capability);
}
@@ -220,6 +225,8 @@ int verify_bundle(struct repository *r,
req_nr = revs.pending.nr;
setup_revisions(2, argv, &revs, NULL);
+ list_objects_filter_copy(&revs.filter, &header->filter);
+
if (prepare_revision_walk(&revs))
die(_("revision walk setup failed"));
@@ -255,20 +262,27 @@ int verify_bundle(struct repository *r,
r = &header->references;
printf_ln(Q_("The bundle contains this ref:",
- "The bundle contains these %d refs:",
+ "The bundle contains these %"PRIuMAX" refs:",
r->nr),
- r->nr);
+ (uintmax_t)r->nr);
list_refs(r, 0, NULL);
+
r = &header->prerequisites;
if (!r->nr) {
printf_ln(_("The bundle records a complete history."));
} else {
printf_ln(Q_("The bundle requires this ref:",
- "The bundle requires these %d refs:",
+ "The bundle requires these %"PRIuMAX" refs:",
r->nr),
- r->nr);
+ (uintmax_t)r->nr);
list_refs(r, 0, NULL);
}
+
+ printf_ln("The bundle uses this hash algorithm: %s",
+ header->hash_algo->name);
+ if (header->filter.choice)
+ printf_ln("The bundle uses this filter: %s",
+ list_objects_filter_spec(&header->filter));
}
return ret;
}
@@ -319,6 +333,9 @@ static int write_pack_data(int bundle_fd, struct rev_info *revs, struct strvec *
"--stdout", "--thin", "--delta-base-offset",
NULL);
strvec_pushv(&pack_objects.args, pack_options->v);
+ if (revs->filter.choice)
+ strvec_pushf(&pack_objects.args, "--filter=%s",
+ list_objects_filter_spec(&revs->filter));
pack_objects.in = -1;
pack_objects.out = bundle_fd;
pack_objects.git_cmd = 1;
@@ -486,10 +503,37 @@ int create_bundle(struct repository *r, const char *path,
int bundle_to_stdout;
int ref_count = 0;
struct rev_info revs, revs_copy;
- int min_version = the_hash_algo == &hash_algos[GIT_HASH_SHA1] ? 2 : 3;
+ int min_version = 2;
struct bundle_prerequisites_info bpi;
int i;
+ /* init revs to list objects for pack-objects later */
+ save_commit_buffer = 0;
+ repo_init_revisions(r, &revs, NULL);
+
+ /*
+ * Pre-initialize the '--objects' flag so we can parse a
+ * --filter option successfully.
+ */
+ revs.tree_objects = revs.blob_objects = 1;
+
+ argc = setup_revisions(argc, argv, &revs, NULL);
+
+ /*
+ * Reasons to require version 3:
+ *
+ * 1. @object-format is required because our hash algorithm is not
+ * SHA1.
+ * 2. @filter is required because we parsed an object filter.
+ */
+ if (the_hash_algo != &hash_algos[GIT_HASH_SHA1] || revs.filter.choice)
+ min_version = 3;
+
+ if (argc > 1) {
+ error(_("unrecognized argument: %s"), argv[1]);
+ goto err;
+ }
+
bundle_to_stdout = !strcmp(path, "-");
if (bundle_to_stdout)
bundle_fd = 1;
@@ -512,17 +556,14 @@ int create_bundle(struct repository *r, const char *path,
write_or_die(bundle_fd, capability, strlen(capability));
write_or_die(bundle_fd, the_hash_algo->name, strlen(the_hash_algo->name));
write_or_die(bundle_fd, "\n", 1);
- }
- /* init revs to list objects for pack-objects later */
- save_commit_buffer = 0;
- repo_init_revisions(r, &revs, NULL);
-
- argc = setup_revisions(argc, argv, &revs, NULL);
-
- if (argc > 1) {
- error(_("unrecognized argument: %s"), argv[1]);
- goto err;
+ if (revs.filter.choice) {
+ const char *value = expand_list_objects_filter_spec(&revs.filter);
+ capability = "@filter=";
+ write_or_die(bundle_fd, capability, strlen(capability));
+ write_or_die(bundle_fd, value, strlen(value));
+ write_or_die(bundle_fd, "\n", 1);
+ }
}
/* save revs.pending in revs_copy for later use */
@@ -544,6 +585,12 @@ int create_bundle(struct repository *r, const char *path,
die("revision walk setup failed");
bpi.fd = bundle_fd;
bpi.pending = &revs_copy.pending;
+
+ /*
+ * Remove any object walking here. We only care about commits and
+ * tags here. The revs_copy has the right instances of these values.
+ */
+ revs.blob_objects = revs.tree_objects = 0;
traverse_commit_list(&revs, write_bundle_prerequisites, NULL, &bpi);
object_array_remove_duplicates(&revs_copy.pending);
@@ -574,6 +621,10 @@ int unbundle(struct repository *r, struct bundle_header *header,
struct child_process ip = CHILD_PROCESS_INIT;
strvec_pushl(&ip.args, "index-pack", "--fix-thin", "--stdin", NULL);
+ /* If there is a filter, then we need to create the promisor pack. */
+ if (header->filter.choice)
+ strvec_push(&ip.args, "--promisor=from-bundle");
+
if (extra_index_pack_args) {
strvec_pushv(&ip.args, extra_index_pack_args->v);
strvec_clear(extra_index_pack_args);
diff --git a/bundle.h b/bundle.h
index 06009fe..7fef210 100644
--- a/bundle.h
+++ b/bundle.h
@@ -4,12 +4,14 @@
#include "strvec.h"
#include "cache.h"
#include "string-list.h"
+#include "list-objects-filter-options.h"
struct bundle_header {
unsigned version;
struct string_list prerequisites;
struct string_list references;
const struct git_hash_algo *hash_algo;
+ struct list_objects_filter_options filter;
};
#define BUNDLE_HEADER_INIT \
diff --git a/cache-tree.c b/cache-tree.c
index 65ca993..6752f69 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -432,15 +432,15 @@ static int update_one(struct cache_tree *it,
if (repair) {
struct object_id oid;
hash_object_file(the_hash_algo, buffer.buf, buffer.len,
- tree_type, &oid);
+ OBJ_TREE, &oid);
if (has_object_file_with_flags(&oid, OBJECT_INFO_SKIP_FETCH_OBJECT))
oidcpy(&it->oid, &oid);
else
to_invalidate = 1;
} else if (dryrun) {
hash_object_file(the_hash_algo, buffer.buf, buffer.len,
- tree_type, &it->oid);
- } else if (write_object_file_flags(buffer.buf, buffer.len, tree_type,
+ OBJ_TREE, &it->oid);
+ } else if (write_object_file_flags(buffer.buf, buffer.len, OBJ_TREE,
&it->oid, flags & WRITE_TREE_SILENT
? HASH_SILENT : 0)) {
strbuf_release(&buffer);
@@ -948,7 +948,7 @@ static int verify_one(struct repository *r,
strbuf_addf(&tree_buf, "%o %.*s%c", mode, entlen, name, '\0');
strbuf_add(&tree_buf, oid->hash, r->hash_algo->rawsz);
}
- hash_object_file(r->hash_algo, tree_buf.buf, tree_buf.len, tree_type,
+ hash_object_file(r->hash_algo, tree_buf.buf, tree_buf.len, OBJ_TREE,
&new_oid);
if (!oideq(&new_oid, &it->oid))
BUG("cache-tree for path %.*s does not match. "
diff --git a/cache.h b/cache.h
index 281f00a..6226f6a 100644
--- a/cache.h
+++ b/cache.h
@@ -18,7 +18,6 @@
#include "repository.h"
#include "mem-pool.h"
-#include <zlib.h>
typedef struct git_zstream {
z_stream z;
unsigned long avail_in;
@@ -889,6 +888,7 @@ void *read_blob_data_from_index(struct index_state *, const char *, unsigned lon
#define CE_MATCH_IGNORE_FSMONITOR 0X20
int is_racy_timestamp(const struct index_state *istate,
const struct cache_entry *ce);
+int has_racy_timestamp(struct index_state *istate);
int ie_match_stat(struct index_state *, const struct cache_entry *, struct stat *, unsigned int);
int ie_modified(struct index_state *, const struct cache_entry *, struct stat *, unsigned int);
@@ -993,16 +993,65 @@ void reset_shared_repository(void);
extern int read_replace_refs;
extern char *git_replace_ref_base;
+/*
+ * These values are used to help identify parts of a repository to fsync.
+ * FSYNC_COMPONENT_NONE identifies data that will not be a persistent part of the
+ * repository and so shouldn't be fsynced.
+ */
+enum fsync_component {
+ FSYNC_COMPONENT_NONE,
+ FSYNC_COMPONENT_LOOSE_OBJECT = 1 << 0,
+ FSYNC_COMPONENT_PACK = 1 << 1,
+ FSYNC_COMPONENT_PACK_METADATA = 1 << 2,
+ FSYNC_COMPONENT_COMMIT_GRAPH = 1 << 3,
+ FSYNC_COMPONENT_INDEX = 1 << 4,
+ FSYNC_COMPONENT_REFERENCE = 1 << 5,
+};
+
+#define FSYNC_COMPONENTS_OBJECTS (FSYNC_COMPONENT_LOOSE_OBJECT | \
+ FSYNC_COMPONENT_PACK)
+
+#define FSYNC_COMPONENTS_DERIVED_METADATA (FSYNC_COMPONENT_PACK_METADATA | \
+ FSYNC_COMPONENT_COMMIT_GRAPH)
+
+#define FSYNC_COMPONENTS_DEFAULT ((FSYNC_COMPONENTS_OBJECTS | \
+ FSYNC_COMPONENTS_DERIVED_METADATA) & \
+ ~FSYNC_COMPONENT_LOOSE_OBJECT)
+
+#define FSYNC_COMPONENTS_COMMITTED (FSYNC_COMPONENTS_OBJECTS | \
+ FSYNC_COMPONENT_REFERENCE)
+
+#define FSYNC_COMPONENTS_ADDED (FSYNC_COMPONENTS_COMMITTED | \
+ FSYNC_COMPONENT_INDEX)
+
+#define FSYNC_COMPONENTS_ALL (FSYNC_COMPONENT_LOOSE_OBJECT | \
+ FSYNC_COMPONENT_PACK | \
+ FSYNC_COMPONENT_PACK_METADATA | \
+ FSYNC_COMPONENT_COMMIT_GRAPH | \
+ FSYNC_COMPONENT_INDEX | \
+ FSYNC_COMPONENT_REFERENCE)
+
+/*
+ * A bitmask indicating which components of the repo should be fsynced.
+ */
+extern enum fsync_component fsync_components;
extern int fsync_object_files;
extern int use_fsync;
+
+enum fsync_method {
+ FSYNC_METHOD_FSYNC,
+ FSYNC_METHOD_WRITEOUT_ONLY
+};
+
+extern enum fsync_method fsync_method;
extern int core_preload_index;
extern int precomposed_unicode;
extern int protect_hfs;
extern int protect_ntfs;
-extern const char *core_fsmonitor;
extern int core_apply_sparse_checkout;
extern int core_sparse_checkout_cone;
+extern int sparse_expect_files_outside_of_patterns;
/*
* Returns the boolean value of $GIT_OPTIONAL_LOCKS (or the default value).
@@ -1319,9 +1368,23 @@ enum unpack_loose_header_result unpack_loose_header(git_zstream *stream,
struct object_info;
int parse_loose_header(const char *hdr, struct object_info *oi);
+/**
+ * With in-core object data in "buf", rehash it to make sure the
+ * object name actually matches "oid" to detect object corruption.
+ *
+ * A negative value indicates an error, usually that the OID is not
+ * what we expected, but it might also indicate another error.
+ */
int check_object_signature(struct repository *r, const struct object_id *oid,
- void *buf, unsigned long size, const char *type,
- struct object_id *real_oidp);
+ void *map, unsigned long size,
+ enum object_type type);
+
+/**
+ * A streaming version of check_object_signature().
+ * Try reading the object named with "oid" using
+ * the streaming interface and rehash it to do the same.
+ */
+int stream_object_signature(struct repository *r, const struct object_id *oid);
int finalize_object_file(const char *tmpfile, const char *filename);
@@ -1375,6 +1438,7 @@ struct object_context {
#define GET_OID_FOLLOW_SYMLINKS 0100
#define GET_OID_RECORD_PATH 0200
#define GET_OID_ONLY_TO_DIE 04000
+#define GET_OID_REQUIRE_PATH 010000
#define GET_OID_DISAMBIGUATORS \
(GET_OID_COMMIT | GET_OID_COMMITTISH | \
@@ -1547,7 +1611,7 @@ int cache_name_stage_compare(const char *name1, int len1, int stage1, const char
void *read_object_with_reference(struct repository *r,
const struct object_id *oid,
- const char *required_type,
+ enum object_type required_type,
unsigned long *size,
struct object_id *oid_ret);
@@ -1557,48 +1621,6 @@ struct object *repo_peel_to_type(struct repository *r,
#define peel_to_type(name, namelen, obj, type) \
repo_peel_to_type(the_repository, name, namelen, obj, type)
-enum date_mode_type {
- DATE_NORMAL = 0,
- DATE_HUMAN,
- DATE_RELATIVE,
- DATE_SHORT,
- DATE_ISO8601,
- DATE_ISO8601_STRICT,
- DATE_RFC2822,
- DATE_STRFTIME,
- DATE_RAW,
- DATE_UNIX
-};
-
-struct date_mode {
- enum date_mode_type type;
- const char *strftime_fmt;
- int local;
-};
-
-/*
- * Convenience helper for passing a constant type, like:
- *
- * show_date(t, tz, DATE_MODE(NORMAL));
- */
-#define DATE_MODE(t) date_mode_from_type(DATE_##t)
-struct date_mode *date_mode_from_type(enum date_mode_type type);
-
-const char *show_date(timestamp_t time, int timezone, const struct date_mode *mode);
-void show_date_relative(timestamp_t time, struct strbuf *timebuf);
-void show_date_human(timestamp_t time, int tz, const struct timeval *now,
- struct strbuf *timebuf);
-int parse_date(const char *date, struct strbuf *out);
-int parse_date_basic(const char *date, timestamp_t *timestamp, int *offset);
-int parse_expiry_date(const char *date, timestamp_t *timestamp);
-void datestamp(struct strbuf *out);
-#define approxidate(s) approxidate_careful((s), NULL)
-timestamp_t approxidate_careful(const char *, int *);
-timestamp_t approxidate_relative(const char *date);
-void parse_date_format(const char *format, struct date_mode *mode);
-int date_overflows(timestamp_t date);
-time_t tm_to_time_t(const struct tm *tm);
-
#define IDENT_STRICT 1
#define IDENT_NO_DATE 2
#define IDENT_NO_NAME 4
@@ -1645,14 +1667,6 @@ struct ident_split {
int split_ident_line(struct ident_split *, const char *, int);
/*
- * Like show_date, but pull the timestamp and tz parameters from
- * the ident_split. It will also sanity-check the values and produce
- * a well-known sentinel date if they appear bogus.
- */
-const char *show_ident_date(const struct ident_split *id,
- const struct date_mode *mode);
-
-/*
* Compare split idents for equality or strict ordering. Note that we
* compare only the ident part of the line, ignoring any timestamp.
*
@@ -1749,6 +1763,8 @@ int copy_file_with_time(const char *dst, const char *src, int mode);
void write_or_die(int fd, const void *buf, size_t count);
void fsync_or_die(int fd, const char *);
+int fsync_component(enum fsync_component component, int fd);
+void fsync_component_or_die(enum fsync_component component, int fd, const char *msg);
ssize_t read_in_full(int fd, void *buf, size_t count);
ssize_t write_in_full(int fd, const void *buf, size_t count);
diff --git a/ci/lib.sh b/ci/lib.sh
index 9d28ab5..cbc2f8f 100755
--- a/ci/lib.sh
+++ b/ci/lib.sh
@@ -197,7 +197,6 @@ esac
case "$jobname" in
linux32)
CC=gcc
- MAKEFLAGS="$MAKEFLAGS NO_UNCOMPRESS2=1"
;;
linux-musl)
CC=gcc
diff --git a/command-list.txt b/command-list.txt
index 675c28f..9bd6f3c 100644
--- a/command-list.txt
+++ b/command-list.txt
@@ -103,6 +103,7 @@ git-grep mainporcelain info
git-gui mainporcelain
git-hash-object plumbingmanipulators
git-help ancillaryinterrogators complete
+git-hook purehelpers
git-http-backend synchingrepositories
git-http-fetch synchelpers
git-http-push synchelpers
diff --git a/commit-graph.c b/commit-graph.c
index 265c010..441b360 100644
--- a/commit-graph.c
+++ b/commit-graph.c
@@ -39,8 +39,8 @@ void git_test_write_commit_graph_or_die(void)
#define GRAPH_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */
#define GRAPH_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */
#define GRAPH_CHUNKID_DATA 0x43444154 /* "CDAT" */
-#define GRAPH_CHUNKID_GENERATION_DATA 0x47444154 /* "GDAT" */
-#define GRAPH_CHUNKID_GENERATION_DATA_OVERFLOW 0x47444f56 /* "GDOV" */
+#define GRAPH_CHUNKID_GENERATION_DATA 0x47444132 /* "GDA2" */
+#define GRAPH_CHUNKID_GENERATION_DATA_OVERFLOW 0x47444f32 /* "GDO2" */
#define GRAPH_CHUNKID_EXTRAEDGES 0x45444745 /* "EDGE" */
#define GRAPH_CHUNKID_BLOOMINDEXES 0x42494458 /* "BIDX" */
#define GRAPH_CHUNKID_BLOOMDATA 0x42444154 /* "BDAT" */
@@ -407,6 +407,9 @@ struct commit_graph *parse_commit_graph(struct repository *r,
&graph->chunk_generation_data);
pair_chunk(cf, GRAPH_CHUNKID_GENERATION_DATA_OVERFLOW,
&graph->chunk_generation_data_overflow);
+
+ if (graph->chunk_generation_data)
+ graph->read_generation_data = 1;
}
if (r->settings.commit_graph_read_changed_paths) {
@@ -803,7 +806,7 @@ static void fill_commit_graph_info(struct commit *item, struct commit_graph *g,
die(_("commit-graph requires overflow generation data but has none"));
offset_pos = offset ^ CORRECTED_COMMIT_DATE_OFFSET_OVERFLOW;
- graph_data->generation = get_be64(g->chunk_generation_data_overflow + 8 * offset_pos);
+ graph_data->generation = item->date + get_be64(g->chunk_generation_data_overflow + 8 * offset_pos);
} else
graph_data->generation = item->date + offset;
} else
@@ -1556,12 +1559,16 @@ static void compute_generation_numbers(struct write_commit_graph_context *ctx)
if (current->date && current->date > max_corrected_commit_date)
max_corrected_commit_date = current->date - 1;
commit_graph_data_at(current)->generation = max_corrected_commit_date + 1;
-
- if (commit_graph_data_at(current)->generation - current->date > GENERATION_NUMBER_V2_OFFSET_MAX)
- ctx->num_generation_data_overflows++;
}
}
}
+
+ for (i = 0; i < ctx->commits.nr; i++) {
+ struct commit *c = ctx->commits.list[i];
+ timestamp_t offset = commit_graph_data_at(c)->generation - c->date;
+ if (offset > GENERATION_NUMBER_V2_OFFSET_MAX)
+ ctx->num_generation_data_overflows++;
+ }
stop_progress(&ctx->progress);
}
@@ -1679,21 +1686,22 @@ int write_commit_graph_reachable(struct object_directory *odb,
}
static int fill_oids_from_packs(struct write_commit_graph_context *ctx,
- struct string_list *pack_indexes)
+ const struct string_list *pack_indexes)
{
uint32_t i;
struct strbuf progress_title = STRBUF_INIT;
struct strbuf packname = STRBUF_INIT;
int dirlen;
+ int ret = 0;
strbuf_addf(&packname, "%s/pack/", ctx->odb->path);
dirlen = packname.len;
if (ctx->report_progress) {
strbuf_addf(&progress_title,
- Q_("Finding commits for commit graph in %d pack",
- "Finding commits for commit graph in %d packs",
+ Q_("Finding commits for commit graph in %"PRIuMAX" pack",
+ "Finding commits for commit graph in %"PRIuMAX" packs",
pack_indexes->nr),
- pack_indexes->nr);
+ (uintmax_t)pack_indexes->nr);
ctx->progress = start_delayed_progress(progress_title.buf, 0);
ctx->progress_done = 0;
}
@@ -1703,12 +1711,12 @@ static int fill_oids_from_packs(struct write_commit_graph_context *ctx,
strbuf_addstr(&packname, pack_indexes->items[i].string);
p = add_packed_git(packname.buf, packname.len, 1);
if (!p) {
- error(_("error adding pack %s"), packname.buf);
- return -1;
+ ret = error(_("error adding pack %s"), packname.buf);
+ goto cleanup;
}
if (open_pack_index(p)) {
- error(_("error opening index for %s"), packname.buf);
- return -1;
+ ret = error(_("error opening index for %s"), packname.buf);
+ goto cleanup;
}
for_each_object_in_pack(p, add_packed_commits, ctx,
FOR_EACH_OBJECT_PACK_ORDER);
@@ -1716,11 +1724,12 @@ static int fill_oids_from_packs(struct write_commit_graph_context *ctx,
free(p);
}
+cleanup:
stop_progress(&ctx->progress);
strbuf_release(&progress_title);
strbuf_release(&packname);
- return 0;
+ return ret;
}
static int fill_oids_from_commits(struct write_commit_graph_context *ctx,
@@ -1852,6 +1861,7 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
hold_lock_file_for_update_mode(&lk, lock_name,
LOCK_DIE_ON_ERROR, 0444);
+ free(lock_name);
fd = git_mkstemp_mode(ctx->graph_name, 0444);
if (fd < 0) {
@@ -1942,7 +1952,8 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
}
close_commit_graph(ctx->r->objects);
- finalize_hashfile(f, file_hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC);
+ finalize_hashfile(f, file_hash, FSYNC_COMPONENT_COMMIT_GRAPH,
+ CSUM_HASH_IN_STREAM | CSUM_FSYNC);
free_chunkfile(cf);
if (ctx->split) {
@@ -1976,6 +1987,7 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
} else {
char *graph_name = get_commit_graph_filename(ctx->odb);
unlink(graph_name);
+ free(graph_name);
}
ctx->commit_graph_hash_after[ctx->num_commit_graphs_after - 1] = xstrdup(hash_to_hex(file_hash));
@@ -2259,7 +2271,7 @@ out:
}
int write_commit_graph(struct object_directory *odb,
- struct string_list *pack_indexes,
+ const struct string_list *const pack_indexes,
struct oidset *commits,
enum commit_graph_write_flags flags,
const struct commit_graph_opts *opts)
diff --git a/commit-graph.h b/commit-graph.h
index 04a94e1..2e3ac35 100644
--- a/commit-graph.h
+++ b/commit-graph.h
@@ -142,7 +142,7 @@ int write_commit_graph_reachable(struct object_directory *odb,
enum commit_graph_write_flags flags,
const struct commit_graph_opts *opts);
int write_commit_graph(struct object_directory *odb,
- struct string_list *pack_indexes,
+ const struct string_list *pack_indexes,
struct oidset *commits,
enum commit_graph_write_flags flags,
const struct commit_graph_opts *opts);
diff --git a/commit.c b/commit.c
index a348f08..59b6c3e 100644
--- a/commit.c
+++ b/commit.c
@@ -21,6 +21,7 @@
#include "commit-reach.h"
#include "run-command.h"
#include "shallow.h"
+#include "hook.h"
static struct commit_extra_header *read_commit_extra_header_lines(const char *buf, size_t len, const char **);
@@ -248,6 +249,16 @@ int for_each_commit_graft(each_commit_graft_fn fn, void *cb_data)
return ret;
}
+void reset_commit_grafts(struct repository *r)
+{
+ int i;
+
+ for (i = 0; i < r->parsed_objects->grafts_nr; i++)
+ free(r->parsed_objects->grafts[i]);
+ r->parsed_objects->grafts_nr = 0;
+ r->parsed_objects->commit_graft_prepared = 0;
+}
+
struct commit_buffer {
void *buffer;
unsigned long size;
@@ -1567,7 +1578,7 @@ int commit_tree_extended(const char *msg, size_t msg_len,
goto out;
}
- result = write_object_file(buffer.buf, buffer.len, commit_type, ret);
+ result = write_object_file(buffer.buf, buffer.len, OBJ_COMMIT, ret);
out:
strbuf_release(&buffer);
return result;
@@ -1631,12 +1642,20 @@ struct commit_list **commit_list_append(struct commit *commit,
return &new_commit->next;
}
-const char *find_commit_header(const char *msg, const char *key, size_t *out_len)
+const char *find_header_mem(const char *msg, size_t len,
+ const char *key, size_t *out_len)
{
int key_len = strlen(key);
const char *line = msg;
- while (line) {
+ /*
+ * NEEDSWORK: It's possible for strchrnul() to scan beyond the range
+ * given by len. However, current callers are safe because they compute
+ * len by scanning a NUL-terminated block of memory starting at msg.
+ * Nonetheless, it would be better to ensure the function does not look
+ * at msg beyond the len provided by the caller.
+ */
+ while (line && line < msg + len) {
const char *eol = strchrnul(line, '\n');
if (line == eol)
@@ -1653,6 +1672,10 @@ const char *find_commit_header(const char *msg, const char *key, size_t *out_len
return NULL;
}
+const char *find_commit_header(const char *msg, const char *key, size_t *out_len)
+{
+ return find_header_mem(msg, strlen(msg), key, out_len);
+}
/*
* Inspect the given string and determine the true "end" of the log message, in
* order to find where to put a new Signed-off-by trailer. Ignored are
@@ -1700,24 +1723,25 @@ size_t ignore_non_trailer(const char *buf, size_t len)
}
int run_commit_hook(int editor_is_used, const char *index_file,
- const char *name, ...)
+ int *invoked_hook, const char *name, ...)
{
- struct strvec hook_env = STRVEC_INIT;
+ struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
va_list args;
- int ret;
+ const char *arg;
- strvec_pushf(&hook_env, "GIT_INDEX_FILE=%s", index_file);
+ strvec_pushf(&opt.env, "GIT_INDEX_FILE=%s", index_file);
/*
* Let the hook know that no editor will be launched.
*/
if (!editor_is_used)
- strvec_push(&hook_env, "GIT_EDITOR=:");
+ strvec_push(&opt.env, "GIT_EDITOR=:");
va_start(args, name);
- ret = run_hook_ve(hook_env.v, name, args);
+ while ((arg = va_arg(args, const char *)))
+ strvec_push(&opt.args, arg);
va_end(args);
- strvec_clear(&hook_env);
- return ret;
+ opt.invoked_hook = invoked_hook;
+ return run_hooks_opt(name, &opt);
}
diff --git a/commit.h b/commit.h
index 3ea3276..21e4d25 100644
--- a/commit.h
+++ b/commit.h
@@ -249,6 +249,7 @@ int commit_graft_pos(struct repository *r, const struct object_id *oid);
int register_commit_graft(struct repository *r, struct commit_graft *, int);
void prepare_commit_graft(struct repository *r);
struct commit_graft *lookup_commit_graft(struct repository *r, const struct object_id *oid);
+void reset_commit_grafts(struct repository *r);
struct commit *get_fork_point(const char *refname, struct commit *commit);
@@ -290,12 +291,17 @@ void free_commit_extra_headers(struct commit_extra_header *extra);
/*
* Search the commit object contents given by "msg" for the header "key".
+ * Reads up to "len" bytes of "msg".
* Returns a pointer to the start of the header contents, or NULL. The length
* of the header, up to the first newline, is returned via out_len.
*
* Note that some headers (like mergetag) may be multi-line. It is the caller's
* responsibility to parse further in this case!
*/
+const char *find_header_mem(const char *msg, size_t len,
+ const char *key,
+ size_t *out_len);
+
const char *find_commit_header(const char *msg, const char *key,
size_t *out_len);
@@ -364,7 +370,8 @@ int compare_commits_by_commit_date(const void *a_, const void *b_, void *unused)
int compare_commits_by_gen_then_commit_date(const void *a_, const void *b_, void *unused);
LAST_ARG_MUST_BE_NULL
-int run_commit_hook(int editor_is_used, const char *index_file, const char *name, ...);
+int run_commit_hook(int editor_is_used, const char *index_file,
+ int *invoked_hook, const char *name, ...);
/* Sign a commit or tag buffer, storing the result in a header. */
int sign_with_header(struct strbuf *buf, const char *keyid);
diff --git a/compat/fsmonitor/fsm-darwin-gcc.h b/compat/fsmonitor/fsm-darwin-gcc.h
new file mode 100644
index 0000000..1c75c3d
--- /dev/null
+++ b/compat/fsmonitor/fsm-darwin-gcc.h
@@ -0,0 +1,92 @@
+#ifndef FSM_DARWIN_GCC_H
+#define FSM_DARWIN_GCC_H
+
+#ifndef __clang__
+/*
+ * It is possible to #include CoreFoundation/CoreFoundation.h when compiling
+ * with clang, but not with GCC as of time of writing.
+ *
+ * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93082 for details.
+ */
+typedef unsigned int FSEventStreamCreateFlags;
+#define kFSEventStreamEventFlagNone 0x00000000
+#define kFSEventStreamEventFlagMustScanSubDirs 0x00000001
+#define kFSEventStreamEventFlagUserDropped 0x00000002
+#define kFSEventStreamEventFlagKernelDropped 0x00000004
+#define kFSEventStreamEventFlagEventIdsWrapped 0x00000008
+#define kFSEventStreamEventFlagHistoryDone 0x00000010
+#define kFSEventStreamEventFlagRootChanged 0x00000020
+#define kFSEventStreamEventFlagMount 0x00000040
+#define kFSEventStreamEventFlagUnmount 0x00000080
+#define kFSEventStreamEventFlagItemCreated 0x00000100
+#define kFSEventStreamEventFlagItemRemoved 0x00000200
+#define kFSEventStreamEventFlagItemInodeMetaMod 0x00000400
+#define kFSEventStreamEventFlagItemRenamed 0x00000800
+#define kFSEventStreamEventFlagItemModified 0x00001000
+#define kFSEventStreamEventFlagItemFinderInfoMod 0x00002000
+#define kFSEventStreamEventFlagItemChangeOwner 0x00004000
+#define kFSEventStreamEventFlagItemXattrMod 0x00008000
+#define kFSEventStreamEventFlagItemIsFile 0x00010000
+#define kFSEventStreamEventFlagItemIsDir 0x00020000
+#define kFSEventStreamEventFlagItemIsSymlink 0x00040000
+#define kFSEventStreamEventFlagOwnEvent 0x00080000
+#define kFSEventStreamEventFlagItemIsHardlink 0x00100000
+#define kFSEventStreamEventFlagItemIsLastHardlink 0x00200000
+#define kFSEventStreamEventFlagItemCloned 0x00400000
+
+typedef struct __FSEventStream *FSEventStreamRef;
+typedef const FSEventStreamRef ConstFSEventStreamRef;
+
+typedef unsigned int CFStringEncoding;
+#define kCFStringEncodingUTF8 0x08000100
+
+typedef const struct __CFString *CFStringRef;
+typedef const struct __CFArray *CFArrayRef;
+typedef const struct __CFRunLoop *CFRunLoopRef;
+
+struct FSEventStreamContext {
+ long long version;
+ void *cb_data, *retain, *release, *copy_description;
+};
+
+typedef struct FSEventStreamContext FSEventStreamContext;
+typedef unsigned int FSEventStreamEventFlags;
+#define kFSEventStreamCreateFlagNoDefer 0x02
+#define kFSEventStreamCreateFlagWatchRoot 0x04
+#define kFSEventStreamCreateFlagFileEvents 0x10
+
+typedef unsigned long long FSEventStreamEventId;
+#define kFSEventStreamEventIdSinceNow 0xFFFFFFFFFFFFFFFFULL
+
+typedef void (*FSEventStreamCallback)(ConstFSEventStreamRef streamRef,
+ void *context,
+ __SIZE_TYPE__ num_of_events,
+ void *event_paths,
+ const FSEventStreamEventFlags event_flags[],
+ const FSEventStreamEventId event_ids[]);
+typedef double CFTimeInterval;
+FSEventStreamRef FSEventStreamCreate(void *allocator,
+ FSEventStreamCallback callback,
+ FSEventStreamContext *context,
+ CFArrayRef paths_to_watch,
+ FSEventStreamEventId since_when,
+ CFTimeInterval latency,
+ FSEventStreamCreateFlags flags);
+CFStringRef CFStringCreateWithCString(void *allocator, const char *string,
+ CFStringEncoding encoding);
+CFArrayRef CFArrayCreate(void *allocator, const void **items, long long count,
+ void *callbacks);
+void CFRunLoopRun(void);
+void CFRunLoopStop(CFRunLoopRef run_loop);
+CFRunLoopRef CFRunLoopGetCurrent(void);
+extern CFStringRef kCFRunLoopDefaultMode;
+void FSEventStreamScheduleWithRunLoop(FSEventStreamRef stream,
+ CFRunLoopRef run_loop,
+ CFStringRef run_loop_mode);
+unsigned char FSEventStreamStart(FSEventStreamRef stream);
+void FSEventStreamStop(FSEventStreamRef stream);
+void FSEventStreamInvalidate(FSEventStreamRef stream);
+void FSEventStreamRelease(FSEventStreamRef stream);
+
+#endif /* !clang */
+#endif /* FSM_DARWIN_GCC_H */
diff --git a/compat/fsmonitor/fsm-listen-darwin.c b/compat/fsmonitor/fsm-listen-darwin.c
new file mode 100644
index 0000000..0741fe8
--- /dev/null
+++ b/compat/fsmonitor/fsm-listen-darwin.c
@@ -0,0 +1,427 @@
+#ifndef __clang__
+#include "fsm-darwin-gcc.h"
+#else
+#include <CoreFoundation/CoreFoundation.h>
+#include <CoreServices/CoreServices.h>
+
+#ifndef AVAILABLE_MAC_OS_X_VERSION_10_13_AND_LATER
+/*
+ * This enum value was added in 10.13 to:
+ *
+ * /Applications/Xcode.app/Contents/Developer/Platforms/ \
+ * MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/ \
+ * Library/Frameworks/CoreServices.framework/Frameworks/ \
+ * FSEvents.framework/Versions/Current/Headers/FSEvents.h
+ *
+ * If we're compiling against an older SDK, this symbol won't be
+ * present. Silently define it here so that we don't have to ifdef
+ * the logging or masking below. This should be harmless since older
+ * versions of macOS won't ever emit this FS event anyway.
+ */
+#define kFSEventStreamEventFlagItemCloned 0x00400000
+#endif
+#endif
+
+#include "cache.h"
+#include "fsmonitor.h"
+#include "fsm-listen.h"
+#include "fsmonitor--daemon.h"
+
+struct fsmonitor_daemon_backend_data
+{
+ CFStringRef cfsr_worktree_path;
+ CFStringRef cfsr_gitdir_path;
+
+ CFArrayRef cfar_paths_to_watch;
+ int nr_paths_watching;
+
+ FSEventStreamRef stream;
+
+ CFRunLoopRef rl;
+
+ enum shutdown_style {
+ SHUTDOWN_EVENT = 0,
+ FORCE_SHUTDOWN,
+ FORCE_ERROR_STOP,
+ } shutdown_style;
+
+ unsigned int stream_scheduled:1;
+ unsigned int stream_started:1;
+};
+
+static void log_flags_set(const char *path, const FSEventStreamEventFlags flag)
+{
+ struct strbuf msg = STRBUF_INIT;
+
+ if (flag & kFSEventStreamEventFlagMustScanSubDirs)
+ strbuf_addstr(&msg, "MustScanSubDirs|");
+ if (flag & kFSEventStreamEventFlagUserDropped)
+ strbuf_addstr(&msg, "UserDropped|");
+ if (flag & kFSEventStreamEventFlagKernelDropped)
+ strbuf_addstr(&msg, "KernelDropped|");
+ if (flag & kFSEventStreamEventFlagEventIdsWrapped)
+ strbuf_addstr(&msg, "EventIdsWrapped|");
+ if (flag & kFSEventStreamEventFlagHistoryDone)
+ strbuf_addstr(&msg, "HistoryDone|");
+ if (flag & kFSEventStreamEventFlagRootChanged)
+ strbuf_addstr(&msg, "RootChanged|");
+ if (flag & kFSEventStreamEventFlagMount)
+ strbuf_addstr(&msg, "Mount|");
+ if (flag & kFSEventStreamEventFlagUnmount)
+ strbuf_addstr(&msg, "Unmount|");
+ if (flag & kFSEventStreamEventFlagItemChangeOwner)
+ strbuf_addstr(&msg, "ItemChangeOwner|");
+ if (flag & kFSEventStreamEventFlagItemCreated)
+ strbuf_addstr(&msg, "ItemCreated|");
+ if (flag & kFSEventStreamEventFlagItemFinderInfoMod)
+ strbuf_addstr(&msg, "ItemFinderInfoMod|");
+ if (flag & kFSEventStreamEventFlagItemInodeMetaMod)
+ strbuf_addstr(&msg, "ItemInodeMetaMod|");
+ if (flag & kFSEventStreamEventFlagItemIsDir)
+ strbuf_addstr(&msg, "ItemIsDir|");
+ if (flag & kFSEventStreamEventFlagItemIsFile)
+ strbuf_addstr(&msg, "ItemIsFile|");
+ if (flag & kFSEventStreamEventFlagItemIsHardlink)
+ strbuf_addstr(&msg, "ItemIsHardlink|");
+ if (flag & kFSEventStreamEventFlagItemIsLastHardlink)
+ strbuf_addstr(&msg, "ItemIsLastHardlink|");
+ if (flag & kFSEventStreamEventFlagItemIsSymlink)
+ strbuf_addstr(&msg, "ItemIsSymlink|");
+ if (flag & kFSEventStreamEventFlagItemModified)
+ strbuf_addstr(&msg, "ItemModified|");
+ if (flag & kFSEventStreamEventFlagItemRemoved)
+ strbuf_addstr(&msg, "ItemRemoved|");
+ if (flag & kFSEventStreamEventFlagItemRenamed)
+ strbuf_addstr(&msg, "ItemRenamed|");
+ if (flag & kFSEventStreamEventFlagItemXattrMod)
+ strbuf_addstr(&msg, "ItemXattrMod|");
+ if (flag & kFSEventStreamEventFlagOwnEvent)
+ strbuf_addstr(&msg, "OwnEvent|");
+ if (flag & kFSEventStreamEventFlagItemCloned)
+ strbuf_addstr(&msg, "ItemCloned|");
+
+ trace_printf_key(&trace_fsmonitor, "fsevent: '%s', flags=%u %s",
+ path, flag, msg.buf);
+
+ strbuf_release(&msg);
+}
+
+static int ef_is_root_delete(const FSEventStreamEventFlags ef)
+{
+ return (ef & kFSEventStreamEventFlagItemIsDir &&
+ ef & kFSEventStreamEventFlagItemRemoved);
+}
+
+static int ef_is_root_renamed(const FSEventStreamEventFlags ef)
+{
+ return (ef & kFSEventStreamEventFlagItemIsDir &&
+ ef & kFSEventStreamEventFlagItemRenamed);
+}
+
+static int ef_is_dropped(const FSEventStreamEventFlags ef)
+{
+ return (ef & kFSEventStreamEventFlagMustScanSubDirs ||
+ ef & kFSEventStreamEventFlagKernelDropped ||
+ ef & kFSEventStreamEventFlagUserDropped);
+}
+
+static void fsevent_callback(ConstFSEventStreamRef streamRef,
+ void *ctx,
+ size_t num_of_events,
+ void *event_paths,
+ const FSEventStreamEventFlags event_flags[],
+ const FSEventStreamEventId event_ids[])
+{
+ struct fsmonitor_daemon_state *state = ctx;
+ struct fsmonitor_daemon_backend_data *data = state->backend_data;
+ char **paths = (char **)event_paths;
+ struct fsmonitor_batch *batch = NULL;
+ struct string_list cookie_list = STRING_LIST_INIT_DUP;
+ const char *path_k;
+ const char *slash;
+ int k;
+ struct strbuf tmp = STRBUF_INIT;
+
+ /*
+ * Build a list of all filesystem changes into a private/local
+ * list and without holding any locks.
+ */
+ for (k = 0; k < num_of_events; k++) {
+ /*
+ * On Mac, we receive an array of absolute paths.
+ */
+ path_k = paths[k];
+
+ /*
+ * If you want to debug FSEvents, log them to GIT_TRACE_FSMONITOR.
+ * Please don't log them to Trace2.
+ *
+ * trace_printf_key(&trace_fsmonitor, "Path: '%s'", path_k);
+ */
+
+ /*
+ * If event[k] is marked as dropped, we assume that we have
+ * lost sync with the filesystem and should flush our cached
+ * data. We need to:
+ *
+ * [1] Abort/wake any client threads waiting for a cookie and
+ * flush the cached state data (the current token), and
+ * create a new token.
+ *
+ * [2] Discard the batch that we were locally building (since
+ * they are conceptually relative to the just flushed
+ * token).
+ */
+ if (ef_is_dropped(event_flags[k])) {
+ if (trace_pass_fl(&trace_fsmonitor))
+ log_flags_set(path_k, event_flags[k]);
+
+ fsmonitor_force_resync(state);
+ fsmonitor_batch__free_list(batch);
+ string_list_clear(&cookie_list, 0);
+
+ /*
+ * We assume that any events that we received
+ * in this callback after this dropped event
+ * may still be valid, so we continue rather
+ * than break. (And just in case there is a
+ * delete of ".git" hiding in there.)
+ */
+ continue;
+ }
+
+ switch (fsmonitor_classify_path_absolute(state, path_k)) {
+
+ case IS_INSIDE_DOT_GIT_WITH_COOKIE_PREFIX:
+ case IS_INSIDE_GITDIR_WITH_COOKIE_PREFIX:
+ /* special case cookie files within .git or gitdir */
+
+ /* Use just the filename of the cookie file. */
+ slash = find_last_dir_sep(path_k);
+ string_list_append(&cookie_list,
+ slash ? slash + 1 : path_k);
+ break;
+
+ case IS_INSIDE_DOT_GIT:
+ case IS_INSIDE_GITDIR:
+ /* ignore all other paths inside of .git or gitdir */
+ break;
+
+ case IS_DOT_GIT:
+ case IS_GITDIR:
+ /*
+ * If .git directory is deleted or renamed away,
+ * we have to quit.
+ */
+ if (ef_is_root_delete(event_flags[k])) {
+ trace_printf_key(&trace_fsmonitor,
+ "event: gitdir removed");
+ goto force_shutdown;
+ }
+ if (ef_is_root_renamed(event_flags[k])) {
+ trace_printf_key(&trace_fsmonitor,
+ "event: gitdir renamed");
+ goto force_shutdown;
+ }
+ break;
+
+ case IS_WORKDIR_PATH:
+ /* try to queue normal pathnames */
+
+ if (trace_pass_fl(&trace_fsmonitor))
+ log_flags_set(path_k, event_flags[k]);
+
+ /*
+ * Because of the implicit "binning" (the
+ * kernel calls us at a given frequency) and
+ * de-duping (the kernel is free to combine
+ * multiple events for a given pathname), an
+ * individual fsevent could be marked as both
+ * a file and directory. Add it to the queue
+ * with both spellings so that the client will
+ * know how much to invalidate/refresh.
+ */
+
+ if (event_flags[k] & kFSEventStreamEventFlagItemIsFile) {
+ const char *rel = path_k +
+ state->path_worktree_watch.len + 1;
+
+ if (!batch)
+ batch = fsmonitor_batch__new();
+ fsmonitor_batch__add_path(batch, rel);
+ }
+
+ if (event_flags[k] & kFSEventStreamEventFlagItemIsDir) {
+ const char *rel = path_k +
+ state->path_worktree_watch.len + 1;
+
+ strbuf_reset(&tmp);
+ strbuf_addstr(&tmp, rel);
+ strbuf_addch(&tmp, '/');
+
+ if (!batch)
+ batch = fsmonitor_batch__new();
+ fsmonitor_batch__add_path(batch, tmp.buf);
+ }
+
+ break;
+
+ case IS_OUTSIDE_CONE:
+ default:
+ trace_printf_key(&trace_fsmonitor,
+ "ignoring '%s'", path_k);
+ break;
+ }
+ }
+
+ fsmonitor_publish(state, batch, &cookie_list);
+ string_list_clear(&cookie_list, 0);
+ strbuf_release(&tmp);
+ return;
+
+force_shutdown:
+ fsmonitor_batch__free_list(batch);
+ string_list_clear(&cookie_list, 0);
+
+ data->shutdown_style = FORCE_SHUTDOWN;
+ CFRunLoopStop(data->rl);
+ strbuf_release(&tmp);
+ return;
+}
+
+/*
+ * In the call to `FSEventStreamCreate()` to setup our watch, the
+ * `latency` argument determines the frequency of calls to our callback
+ * with new FS events. Too slow and events get dropped; too fast and
+ * we burn CPU unnecessarily. Since it is rather obscure, I don't
+ * think this needs to be a config setting. I've done extensive
+ * testing on my systems and chosen the value below. It gives good
+ * results and I've not seen any dropped events.
+ *
+ * With a latency of 0.1, I was seeing lots of dropped events during
+ * the "touch 100000" files test within t/perf/p7519, but with a
+ * latency of 0.001 I did not see any dropped events. So I'm going
+ * to assume that this is the "correct" value.
+ *
+ * https://developer.apple.com/documentation/coreservices/1443980-fseventstreamcreate
+ */
+
+int fsm_listen__ctor(struct fsmonitor_daemon_state *state)
+{
+ FSEventStreamCreateFlags flags = kFSEventStreamCreateFlagNoDefer |
+ kFSEventStreamCreateFlagWatchRoot |
+ kFSEventStreamCreateFlagFileEvents;
+ FSEventStreamContext ctx = {
+ 0,
+ state,
+ NULL,
+ NULL,
+ NULL
+ };
+ struct fsmonitor_daemon_backend_data *data;
+ const void *dir_array[2];
+
+ CALLOC_ARRAY(data, 1);
+ state->backend_data = data;
+
+ data->cfsr_worktree_path = CFStringCreateWithCString(
+ NULL, state->path_worktree_watch.buf, kCFStringEncodingUTF8);
+ dir_array[data->nr_paths_watching++] = data->cfsr_worktree_path;
+
+ if (state->nr_paths_watching > 1) {
+ data->cfsr_gitdir_path = CFStringCreateWithCString(
+ NULL, state->path_gitdir_watch.buf,
+ kCFStringEncodingUTF8);
+ dir_array[data->nr_paths_watching++] = data->cfsr_gitdir_path;
+ }
+
+ data->cfar_paths_to_watch = CFArrayCreate(NULL, dir_array,
+ data->nr_paths_watching,
+ NULL);
+ data->stream = FSEventStreamCreate(NULL, fsevent_callback, &ctx,
+ data->cfar_paths_to_watch,
+ kFSEventStreamEventIdSinceNow,
+ 0.001, flags);
+ if (data->stream == NULL)
+ goto failed;
+
+ /*
+ * `data->rl` needs to be set inside the listener thread.
+ */
+
+ return 0;
+
+failed:
+ error(_("Unable to create FSEventStream."));
+
+ FREE_AND_NULL(state->backend_data);
+ return -1;
+}
+
+void fsm_listen__dtor(struct fsmonitor_daemon_state *state)
+{
+ struct fsmonitor_daemon_backend_data *data;
+
+ if (!state || !state->backend_data)
+ return;
+
+ data = state->backend_data;
+
+ if (data->stream) {
+ if (data->stream_started)
+ FSEventStreamStop(data->stream);
+ if (data->stream_scheduled)
+ FSEventStreamInvalidate(data->stream);
+ FSEventStreamRelease(data->stream);
+ }
+
+ FREE_AND_NULL(state->backend_data);
+}
+
+void fsm_listen__stop_async(struct fsmonitor_daemon_state *state)
+{
+ struct fsmonitor_daemon_backend_data *data;
+
+ data = state->backend_data;
+ data->shutdown_style = SHUTDOWN_EVENT;
+
+ CFRunLoopStop(data->rl);
+}
+
+void fsm_listen__loop(struct fsmonitor_daemon_state *state)
+{
+ struct fsmonitor_daemon_backend_data *data;
+
+ data = state->backend_data;
+
+ data->rl = CFRunLoopGetCurrent();
+
+ FSEventStreamScheduleWithRunLoop(data->stream, data->rl, kCFRunLoopDefaultMode);
+ data->stream_scheduled = 1;
+
+ if (!FSEventStreamStart(data->stream)) {
+ error(_("Failed to start the FSEventStream"));
+ goto force_error_stop_without_loop;
+ }
+ data->stream_started = 1;
+
+ CFRunLoopRun();
+
+ switch (data->shutdown_style) {
+ case FORCE_ERROR_STOP:
+ state->error_code = -1;
+ /* fall thru */
+ case FORCE_SHUTDOWN:
+ ipc_server_stop_async(state->ipc_server_data);
+ /* fall thru */
+ case SHUTDOWN_EVENT:
+ default:
+ break;
+ }
+ return;
+
+force_error_stop_without_loop:
+ state->error_code = -1;
+ ipc_server_stop_async(state->ipc_server_data);
+ return;
+}
diff --git a/compat/fsmonitor/fsm-listen-win32.c b/compat/fsmonitor/fsm-listen-win32.c
new file mode 100644
index 0000000..5b928ab
--- /dev/null
+++ b/compat/fsmonitor/fsm-listen-win32.c
@@ -0,0 +1,586 @@
+#include "cache.h"
+#include "config.h"
+#include "fsmonitor.h"
+#include "fsm-listen.h"
+#include "fsmonitor--daemon.h"
+
+/*
+ * The documentation of ReadDirectoryChangesW() states that the maximum
+ * buffer size is 64K when the monitored directory is remote.
+ *
+ * Larger buffers may be used when the monitored directory is local and
+ * will help us receive events faster from the kernel and avoid dropped
+ * events.
+ *
+ * So we try to use a very large buffer and silently fallback to 64K if
+ * we get an error.
+ */
+#define MAX_RDCW_BUF_FALLBACK (65536)
+#define MAX_RDCW_BUF (65536 * 8)
+
+struct one_watch
+{
+ char buffer[MAX_RDCW_BUF];
+ DWORD buf_len;
+ DWORD count;
+
+ struct strbuf path;
+ HANDLE hDir;
+ HANDLE hEvent;
+ OVERLAPPED overlapped;
+
+ /*
+ * Is there an active ReadDirectoryChangesW() call pending. If so, we
+ * need to later call GetOverlappedResult() and possibly CancelIoEx().
+ */
+ BOOL is_active;
+};
+
+struct fsmonitor_daemon_backend_data
+{
+ struct one_watch *watch_worktree;
+ struct one_watch *watch_gitdir;
+
+ HANDLE hEventShutdown;
+
+ HANDLE hListener[3]; /* we don't own these handles */
+#define LISTENER_SHUTDOWN 0
+#define LISTENER_HAVE_DATA_WORKTREE 1
+#define LISTENER_HAVE_DATA_GITDIR 2
+ int nr_listener_handles;
+};
+
+/*
+ * Convert the WCHAR path from the notification into UTF8 and
+ * then normalize it.
+ */
+static int normalize_path_in_utf8(FILE_NOTIFY_INFORMATION *info,
+ struct strbuf *normalized_path)
+{
+ int reserve;
+ int len = 0;
+
+ strbuf_reset(normalized_path);
+ if (!info->FileNameLength)
+ goto normalize;
+
+ /*
+ * Pre-reserve enough space in the UTF8 buffer for
+ * each Unicode WCHAR character to be mapped into a
+ * sequence of 2 UTF8 characters. That should let us
+ * avoid ERROR_INSUFFICIENT_BUFFER 99.9+% of the time.
+ */
+ reserve = info->FileNameLength + 1;
+ strbuf_grow(normalized_path, reserve);
+
+ for (;;) {
+ len = WideCharToMultiByte(CP_UTF8, 0, info->FileName,
+ info->FileNameLength / sizeof(WCHAR),
+ normalized_path->buf,
+ strbuf_avail(normalized_path) - 1,
+ NULL, NULL);
+ if (len > 0)
+ goto normalize;
+ if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
+ error(_("[GLE %ld] could not convert path to UTF-8: '%.*ls'"),
+ GetLastError(),
+ (int)(info->FileNameLength / sizeof(WCHAR)),
+ info->FileName);
+ return -1;
+ }
+
+ strbuf_grow(normalized_path,
+ strbuf_avail(normalized_path) + reserve);
+ }
+
+normalize:
+ strbuf_setlen(normalized_path, len);
+ return strbuf_normalize_path(normalized_path);
+}
+
+void fsm_listen__stop_async(struct fsmonitor_daemon_state *state)
+{
+ SetEvent(state->backend_data->hListener[LISTENER_SHUTDOWN]);
+}
+
+static struct one_watch *create_watch(struct fsmonitor_daemon_state *state,
+ const char *path)
+{
+ struct one_watch *watch = NULL;
+ DWORD desired_access = FILE_LIST_DIRECTORY;
+ DWORD share_mode =
+ FILE_SHARE_WRITE | FILE_SHARE_READ | FILE_SHARE_DELETE;
+ HANDLE hDir;
+ wchar_t wpath[MAX_PATH];
+
+ if (xutftowcs_path(wpath, path) < 0) {
+ error(_("could not convert to wide characters: '%s'"), path);
+ return NULL;
+ }
+
+ hDir = CreateFileW(wpath,
+ desired_access, share_mode, NULL, OPEN_EXISTING,
+ FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OVERLAPPED,
+ NULL);
+ if (hDir == INVALID_HANDLE_VALUE) {
+ error(_("[GLE %ld] could not watch '%s'"),
+ GetLastError(), path);
+ return NULL;
+ }
+
+ CALLOC_ARRAY(watch, 1);
+
+ watch->buf_len = sizeof(watch->buffer); /* assume full MAX_RDCW_BUF */
+
+ strbuf_init(&watch->path, 0);
+ strbuf_addstr(&watch->path, path);
+
+ watch->hDir = hDir;
+ watch->hEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
+
+ return watch;
+}
+
+static void destroy_watch(struct one_watch *watch)
+{
+ if (!watch)
+ return;
+
+ strbuf_release(&watch->path);
+ if (watch->hDir != INVALID_HANDLE_VALUE)
+ CloseHandle(watch->hDir);
+ if (watch->hEvent != INVALID_HANDLE_VALUE)
+ CloseHandle(watch->hEvent);
+
+ free(watch);
+}
+
+static int start_rdcw_watch(struct fsmonitor_daemon_backend_data *data,
+ struct one_watch *watch)
+{
+ DWORD dwNotifyFilter =
+ FILE_NOTIFY_CHANGE_FILE_NAME |
+ FILE_NOTIFY_CHANGE_DIR_NAME |
+ FILE_NOTIFY_CHANGE_ATTRIBUTES |
+ FILE_NOTIFY_CHANGE_SIZE |
+ FILE_NOTIFY_CHANGE_LAST_WRITE |
+ FILE_NOTIFY_CHANGE_CREATION;
+
+ ResetEvent(watch->hEvent);
+
+ memset(&watch->overlapped, 0, sizeof(watch->overlapped));
+ watch->overlapped.hEvent = watch->hEvent;
+
+ /*
+ * Queue an async call using Overlapped IO. This returns immediately.
+ * Our event handle will be signalled when the real result is available.
+ *
+ * The return value here just means that we successfully queued it.
+ * We won't know if the Read...() actually produces data until later.
+ */
+ watch->is_active = ReadDirectoryChangesW(
+ watch->hDir, watch->buffer, watch->buf_len, TRUE,
+ dwNotifyFilter, &watch->count, &watch->overlapped, NULL);
+
+ if (watch->is_active)
+ return 0;
+
+ error(_("ReadDirectoryChangedW failed on '%s' [GLE %ld]"),
+ watch->path.buf, GetLastError());
+ return -1;
+}
+
+static int recv_rdcw_watch(struct one_watch *watch)
+{
+ DWORD gle;
+
+ watch->is_active = FALSE;
+
+ /*
+ * The overlapped result is ready. If the Read...() was successful
+ * we finally receive the actual result into our buffer.
+ */
+ if (GetOverlappedResult(watch->hDir, &watch->overlapped, &watch->count,
+ TRUE))
+ return 0;
+
+ gle = GetLastError();
+ if (gle == ERROR_INVALID_PARAMETER &&
+ /*
+ * The kernel throws an invalid parameter error when our
+ * buffer is too big and we are pointed at a remote
+ * directory (and possibly for other reasons). Quietly
+ * set it down and try again.
+ *
+ * See note about MAX_RDCW_BUF at the top.
+ */
+ watch->buf_len > MAX_RDCW_BUF_FALLBACK) {
+ watch->buf_len = MAX_RDCW_BUF_FALLBACK;
+ return -2;
+ }
+
+ /*
+ * NEEDSWORK: If an external <gitdir> is deleted, the above
+ * returns an error. I'm not sure that there's anything that
+ * we can do here other than failing -- the <worktree>/.git
+ * link file would be broken anyway. We might try to check
+ * for that and return a better error message, but I'm not
+ * sure it is worth it.
+ */
+
+ error(_("GetOverlappedResult failed on '%s' [GLE %ld]"),
+ watch->path.buf, gle);
+ return -1;
+}
+
+static void cancel_rdcw_watch(struct one_watch *watch)
+{
+ DWORD count;
+
+ if (!watch || !watch->is_active)
+ return;
+
+ /*
+ * The calls to ReadDirectoryChangesW() and GetOverlappedResult()
+ * form a "pair" (my term) where we queue an IO and promise to
+ * hang around and wait for the kernel to give us the result.
+ *
+ * If for some reason after we queue the IO, we have to quit
+ * or otherwise not stick around for the second half, we must
+ * tell the kernel to abort the IO. This prevents the kernel
+ * from writing to our buffer and/or signalling our event
+ * after we free them.
+ *
+ * (Ask me how much fun it was to track that one down).
+ */
+ CancelIoEx(watch->hDir, &watch->overlapped);
+ GetOverlappedResult(watch->hDir, &watch->overlapped, &count, TRUE);
+ watch->is_active = FALSE;
+}
+
+/*
+ * Process filesystem events that happen anywhere (recursively) under the
+ * <worktree> root directory. For a normal working directory, this includes
+ * both version controlled files and the contents of the .git/ directory.
+ *
+ * If <worktree>/.git is a file, then we only see events for the file
+ * itself.
+ */
+static int process_worktree_events(struct fsmonitor_daemon_state *state)
+{
+ struct fsmonitor_daemon_backend_data *data = state->backend_data;
+ struct one_watch *watch = data->watch_worktree;
+ struct strbuf path = STRBUF_INIT;
+ struct string_list cookie_list = STRING_LIST_INIT_DUP;
+ struct fsmonitor_batch *batch = NULL;
+ const char *p = watch->buffer;
+
+ /*
+ * If the kernel gets more events than will fit in the kernel
+ * buffer associated with our RDCW handle, it drops them and
+ * returns a count of zero.
+ *
+ * Yes, the call returns WITHOUT error and with length zero.
+ * This is the documented behavior. (My testing has confirmed
+ * that it also sets the last error to ERROR_NOTIFY_ENUM_DIR,
+ * but we do not rely on that since the function did not
+ * return an error and it is not documented.)
+ *
+ * (The "overflow" case is not ambiguous with the "no data" case
+ * because we did an INFINITE wait.)
+ *
+ * This means we have a gap in coverage. Tell the daemon layer
+ * to resync.
+ */
+ if (!watch->count) {
+ trace2_data_string("fsmonitor", NULL, "fsm-listen/kernel",
+ "overflow");
+ fsmonitor_force_resync(state);
+ return LISTENER_HAVE_DATA_WORKTREE;
+ }
+
+ /*
+ * On Windows, `info` contains an "array" of paths that are
+ * relative to the root of whichever directory handle received
+ * the event.
+ */
+ for (;;) {
+ FILE_NOTIFY_INFORMATION *info = (void *)p;
+ const char *slash;
+ enum fsmonitor_path_type t;
+
+ strbuf_reset(&path);
+ if (normalize_path_in_utf8(info, &path) == -1)
+ goto skip_this_path;
+
+ t = fsmonitor_classify_path_workdir_relative(path.buf);
+
+ switch (t) {
+ case IS_INSIDE_DOT_GIT_WITH_COOKIE_PREFIX:
+ /* special case cookie files within .git */
+
+ /* Use just the filename of the cookie file. */
+ slash = find_last_dir_sep(path.buf);
+ string_list_append(&cookie_list,
+ slash ? slash + 1 : path.buf);
+ break;
+
+ case IS_INSIDE_DOT_GIT:
+ /* ignore everything inside of "<worktree>/.git/" */
+ break;
+
+ case IS_DOT_GIT:
+ /* "<worktree>/.git" was deleted (or renamed away) */
+ if ((info->Action == FILE_ACTION_REMOVED) ||
+ (info->Action == FILE_ACTION_RENAMED_OLD_NAME)) {
+ trace2_data_string("fsmonitor", NULL,
+ "fsm-listen/dotgit",
+ "removed");
+ goto force_shutdown;
+ }
+ break;
+
+ case IS_WORKDIR_PATH:
+ /* queue normal pathname */
+ if (!batch)
+ batch = fsmonitor_batch__new();
+ fsmonitor_batch__add_path(batch, path.buf);
+ break;
+
+ case IS_GITDIR:
+ case IS_INSIDE_GITDIR:
+ case IS_INSIDE_GITDIR_WITH_COOKIE_PREFIX:
+ default:
+ BUG("unexpected path classification '%d' for '%s'",
+ t, path.buf);
+ }
+
+skip_this_path:
+ if (!info->NextEntryOffset)
+ break;
+ p += info->NextEntryOffset;
+ }
+
+ fsmonitor_publish(state, batch, &cookie_list);
+ batch = NULL;
+ string_list_clear(&cookie_list, 0);
+ strbuf_release(&path);
+ return LISTENER_HAVE_DATA_WORKTREE;
+
+force_shutdown:
+ fsmonitor_batch__free_list(batch);
+ string_list_clear(&cookie_list, 0);
+ strbuf_release(&path);
+ return LISTENER_SHUTDOWN;
+}
+
+/*
+ * Process filesystem events that happened anywhere (recursively) under the
+ * external <gitdir> (such as non-primary worktrees or submodules).
+ * We only care about cookie files that our client threads created here.
+ *
+ * Note that we DO NOT get filesystem events on the external <gitdir>
+ * itself (it is not inside something that we are watching). In particular,
+ * we do not get an event if the external <gitdir> is deleted.
+ */
+static int process_gitdir_events(struct fsmonitor_daemon_state *state)
+{
+ struct fsmonitor_daemon_backend_data *data = state->backend_data;
+ struct one_watch *watch = data->watch_gitdir;
+ struct strbuf path = STRBUF_INIT;
+ struct string_list cookie_list = STRING_LIST_INIT_DUP;
+ const char *p = watch->buffer;
+
+ if (!watch->count) {
+ trace2_data_string("fsmonitor", NULL, "fsm-listen/kernel",
+ "overflow");
+ fsmonitor_force_resync(state);
+ return LISTENER_HAVE_DATA_GITDIR;
+ }
+
+ for (;;) {
+ FILE_NOTIFY_INFORMATION *info = (void *)p;
+ const char *slash;
+ enum fsmonitor_path_type t;
+
+ strbuf_reset(&path);
+ if (normalize_path_in_utf8(info, &path) == -1)
+ goto skip_this_path;
+
+ t = fsmonitor_classify_path_gitdir_relative(path.buf);
+
+ switch (t) {
+ case IS_INSIDE_GITDIR_WITH_COOKIE_PREFIX:
+ /* special case cookie files within gitdir */
+
+ /* Use just the filename of the cookie file. */
+ slash = find_last_dir_sep(path.buf);
+ string_list_append(&cookie_list,
+ slash ? slash + 1 : path.buf);
+ break;
+
+ case IS_INSIDE_GITDIR:
+ goto skip_this_path;
+
+ default:
+ BUG("unexpected path classification '%d' for '%s'",
+ t, path.buf);
+ }
+
+skip_this_path:
+ if (!info->NextEntryOffset)
+ break;
+ p += info->NextEntryOffset;
+ }
+
+ fsmonitor_publish(state, NULL, &cookie_list);
+ string_list_clear(&cookie_list, 0);
+ strbuf_release(&path);
+ return LISTENER_HAVE_DATA_GITDIR;
+}
+
+void fsm_listen__loop(struct fsmonitor_daemon_state *state)
+{
+ struct fsmonitor_daemon_backend_data *data = state->backend_data;
+ DWORD dwWait;
+ int result;
+
+ state->error_code = 0;
+
+ if (start_rdcw_watch(data, data->watch_worktree) == -1)
+ goto force_error_stop;
+
+ if (data->watch_gitdir &&
+ start_rdcw_watch(data, data->watch_gitdir) == -1)
+ goto force_error_stop;
+
+ for (;;) {
+ dwWait = WaitForMultipleObjects(data->nr_listener_handles,
+ data->hListener,
+ FALSE, INFINITE);
+
+ if (dwWait == WAIT_OBJECT_0 + LISTENER_HAVE_DATA_WORKTREE) {
+ result = recv_rdcw_watch(data->watch_worktree);
+ if (result == -1) {
+ /* hard error */
+ goto force_error_stop;
+ }
+ if (result == -2) {
+ /* retryable error */
+ if (start_rdcw_watch(data, data->watch_worktree) == -1)
+ goto force_error_stop;
+ continue;
+ }
+
+ /* have data */
+ if (process_worktree_events(state) == LISTENER_SHUTDOWN)
+ goto force_shutdown;
+ if (start_rdcw_watch(data, data->watch_worktree) == -1)
+ goto force_error_stop;
+ continue;
+ }
+
+ if (dwWait == WAIT_OBJECT_0 + LISTENER_HAVE_DATA_GITDIR) {
+ result = recv_rdcw_watch(data->watch_gitdir);
+ if (result == -1) {
+ /* hard error */
+ goto force_error_stop;
+ }
+ if (result == -2) {
+ /* retryable error */
+ if (start_rdcw_watch(data, data->watch_gitdir) == -1)
+ goto force_error_stop;
+ continue;
+ }
+
+ /* have data */
+ if (process_gitdir_events(state) == LISTENER_SHUTDOWN)
+ goto force_shutdown;
+ if (start_rdcw_watch(data, data->watch_gitdir) == -1)
+ goto force_error_stop;
+ continue;
+ }
+
+ if (dwWait == WAIT_OBJECT_0 + LISTENER_SHUTDOWN)
+ goto clean_shutdown;
+
+ error(_("could not read directory changes [GLE %ld]"),
+ GetLastError());
+ goto force_error_stop;
+ }
+
+force_error_stop:
+ state->error_code = -1;
+
+force_shutdown:
+ /*
+ * Tell the IPC thead pool to stop (which completes the await
+ * in the main thread (which will also signal this thread (if
+ * we are still alive))).
+ */
+ ipc_server_stop_async(state->ipc_server_data);
+
+clean_shutdown:
+ cancel_rdcw_watch(data->watch_worktree);
+ cancel_rdcw_watch(data->watch_gitdir);
+}
+
+int fsm_listen__ctor(struct fsmonitor_daemon_state *state)
+{
+ struct fsmonitor_daemon_backend_data *data;
+
+ CALLOC_ARRAY(data, 1);
+
+ data->hEventShutdown = CreateEvent(NULL, TRUE, FALSE, NULL);
+
+ data->watch_worktree = create_watch(state,
+ state->path_worktree_watch.buf);
+ if (!data->watch_worktree)
+ goto failed;
+
+ if (state->nr_paths_watching > 1) {
+ data->watch_gitdir = create_watch(state,
+ state->path_gitdir_watch.buf);
+ if (!data->watch_gitdir)
+ goto failed;
+ }
+
+ data->hListener[LISTENER_SHUTDOWN] = data->hEventShutdown;
+ data->nr_listener_handles++;
+
+ data->hListener[LISTENER_HAVE_DATA_WORKTREE] =
+ data->watch_worktree->hEvent;
+ data->nr_listener_handles++;
+
+ if (data->watch_gitdir) {
+ data->hListener[LISTENER_HAVE_DATA_GITDIR] =
+ data->watch_gitdir->hEvent;
+ data->nr_listener_handles++;
+ }
+
+ state->backend_data = data;
+ return 0;
+
+failed:
+ CloseHandle(data->hEventShutdown);
+ destroy_watch(data->watch_worktree);
+ destroy_watch(data->watch_gitdir);
+
+ return -1;
+}
+
+void fsm_listen__dtor(struct fsmonitor_daemon_state *state)
+{
+ struct fsmonitor_daemon_backend_data *data;
+
+ if (!state || !state->backend_data)
+ return;
+
+ data = state->backend_data;
+
+ CloseHandle(data->hEventShutdown);
+ destroy_watch(data->watch_worktree);
+ destroy_watch(data->watch_gitdir);
+
+ FREE_AND_NULL(state->backend_data);
+}
diff --git a/compat/fsmonitor/fsm-listen.h b/compat/fsmonitor/fsm-listen.h
new file mode 100644
index 0000000..f053934
--- /dev/null
+++ b/compat/fsmonitor/fsm-listen.h
@@ -0,0 +1,49 @@
+#ifndef FSM_LISTEN_H
+#define FSM_LISTEN_H
+
+/* This needs to be implemented by each backend */
+
+#ifdef HAVE_FSMONITOR_DAEMON_BACKEND
+
+struct fsmonitor_daemon_state;
+
+/*
+ * Initialize platform-specific data for the fsmonitor listener thread.
+ * This will be called from the main thread PRIOR to staring the
+ * fsmonitor_fs_listener thread.
+ *
+ * Returns 0 if successful.
+ * Returns -1 otherwise.
+ */
+int fsm_listen__ctor(struct fsmonitor_daemon_state *state);
+
+/*
+ * Cleanup platform-specific data for the fsmonitor listener thread.
+ * This will be called from the main thread AFTER joining the listener.
+ */
+void fsm_listen__dtor(struct fsmonitor_daemon_state *state);
+
+/*
+ * The main body of the platform-specific event loop to watch for
+ * filesystem events. This will run in the fsmonitor_fs_listen thread.
+ *
+ * It should call `ipc_server_stop_async()` if the listener thread
+ * prematurely terminates (because of a filesystem error or if it
+ * detects that the .git directory has been deleted). (It should NOT
+ * do so if the listener thread receives a normal shutdown signal from
+ * the IPC layer.)
+ *
+ * It should set `state->error_code` to -1 if the daemon should exit
+ * with an error.
+ */
+void fsm_listen__loop(struct fsmonitor_daemon_state *state);
+
+/*
+ * Gently request that the fsmonitor listener thread shutdown.
+ * It does not wait for it to stop. The caller should do a JOIN
+ * to wait for it.
+ */
+void fsm_listen__stop_async(struct fsmonitor_daemon_state *state);
+
+#endif /* HAVE_FSMONITOR_DAEMON_BACKEND */
+#endif /* FSM_LISTEN_H */
diff --git a/compat/mingw.c b/compat/mingw.c
index 41fc163..6fe80fd 100644
--- a/compat/mingw.c
+++ b/compat/mingw.c
@@ -962,9 +962,11 @@ static inline void time_t_to_filetime(time_t t, FILETIME *ft)
int mingw_utime (const char *file_name, const struct utimbuf *times)
{
FILETIME mft, aft;
- int fh, rc;
+ int rc;
DWORD attrs;
wchar_t wfilename[MAX_PATH];
+ HANDLE osfilehandle;
+
if (xutftowcs_path(wfilename, file_name) < 0)
return -1;
@@ -976,7 +978,17 @@ int mingw_utime (const char *file_name, const struct utimbuf *times)
SetFileAttributesW(wfilename, attrs & ~FILE_ATTRIBUTE_READONLY);
}
- if ((fh = _wopen(wfilename, O_RDWR | O_BINARY)) < 0) {
+ osfilehandle = CreateFileW(wfilename,
+ FILE_WRITE_ATTRIBUTES,
+ 0 /*FileShare.None*/,
+ NULL,
+ OPEN_EXISTING,
+ (attrs != INVALID_FILE_ATTRIBUTES &&
+ (attrs & FILE_ATTRIBUTE_DIRECTORY)) ?
+ FILE_FLAG_BACKUP_SEMANTICS : 0,
+ NULL);
+ if (osfilehandle == INVALID_HANDLE_VALUE) {
+ errno = err_win_to_posix(GetLastError());
rc = -1;
goto revert_attrs;
}
@@ -988,12 +1000,15 @@ int mingw_utime (const char *file_name, const struct utimbuf *times)
GetSystemTimeAsFileTime(&mft);
aft = mft;
}
- if (!SetFileTime((HANDLE)_get_osfhandle(fh), NULL, &aft, &mft)) {
+
+ if (!SetFileTime(osfilehandle, NULL, &aft, &mft)) {
errno = EINVAL;
rc = -1;
} else
rc = 0;
- close(fh);
+
+ if (osfilehandle != INVALID_HANDLE_VALUE)
+ CloseHandle(osfilehandle);
revert_attrs:
if (attrs != INVALID_FILE_ATTRIBUTES &&
diff --git a/compat/mingw.h b/compat/mingw.h
index ffa53a4..494cc8d 100644
--- a/compat/mingw.h
+++ b/compat/mingw.h
@@ -329,6 +329,9 @@ int mingw_getpagesize(void);
#define getpagesize mingw_getpagesize
#endif
+int win32_fsync_no_flush(int fd);
+#define fsync_no_flush win32_fsync_no_flush
+
struct rlimit {
unsigned int rlim_cur;
};
diff --git a/compat/qsort_s.c b/compat/qsort_s.c
index 52d1f0a..0f7ff30 100644
--- a/compat/qsort_s.c
+++ b/compat/qsort_s.c
@@ -49,21 +49,15 @@ int git_qsort_s(void *b, size_t n, size_t s,
int (*cmp)(const void *, const void *, void *), void *ctx)
{
const size_t size = st_mult(n, s);
- char buf[1024];
+ char *tmp;
if (!n)
return 0;
if (!b || !cmp)
return -1;
- if (size < sizeof(buf)) {
- /* The temporary array fits on the small on-stack buffer. */
- msort_with_tmp(b, n, s, cmp, buf, ctx);
- } else {
- /* It's somewhat large, so malloc it. */
- char *tmp = xmalloc(size);
- msort_with_tmp(b, n, s, cmp, tmp, ctx);
- free(tmp);
- }
+ tmp = xmalloc(size);
+ msort_with_tmp(b, n, s, cmp, tmp, ctx);
+ free(tmp);
return 0;
}
diff --git a/compat/terminal.c b/compat/terminal.c
index 5b903e7..7db330c 100644
--- a/compat/terminal.c
+++ b/compat/terminal.c
@@ -1,4 +1,4 @@
-#include "git-compat-util.h"
+#include "cache.h"
#include "compat/terminal.h"
#include "sigchain.h"
#include "strbuf.h"
@@ -11,7 +11,7 @@
static void restore_term_on_signal(int sig)
{
restore_term();
- sigchain_pop(sig);
+ /* restore_term calls sigchain_pop_common */
raise(sig);
}
@@ -20,55 +20,227 @@ static void restore_term_on_signal(int sig)
#define INPUT_PATH "/dev/tty"
#define OUTPUT_PATH "/dev/tty"
+static volatile sig_atomic_t term_fd_needs_closing;
static int term_fd = -1;
static struct termios old_term;
+static const char *background_resume_msg;
+static const char *restore_error_msg;
+static volatile sig_atomic_t ttou_received;
+
+/* async safe error function for use by signal handlers. */
+static void write_err(const char *msg)
+{
+ write_in_full(2, "error: ", strlen("error: "));
+ write_in_full(2, msg, strlen(msg));
+ write_in_full(2, "\n", 1);
+}
+
+static void print_background_resume_msg(int signo)
+{
+ int saved_errno = errno;
+ sigset_t mask;
+ struct sigaction old_sa;
+ struct sigaction sa = { .sa_handler = SIG_DFL };
+
+ ttou_received = 1;
+ write_err(background_resume_msg);
+ sigaction(signo, &sa, &old_sa);
+ raise(signo);
+ sigemptyset(&mask);
+ sigaddset(&mask, signo);
+ sigprocmask(SIG_UNBLOCK, &mask, NULL);
+ /* Stopped here */
+ sigprocmask(SIG_BLOCK, &mask, NULL);
+ sigaction(signo, &old_sa, NULL);
+ errno = saved_errno;
+}
+
+static void restore_terminal_on_suspend(int signo)
+{
+ int saved_errno = errno;
+ int res;
+ struct termios t;
+ sigset_t mask;
+ struct sigaction old_sa;
+ struct sigaction sa = { .sa_handler = SIG_DFL };
+ int can_restore = 1;
+
+ if (tcgetattr(term_fd, &t) < 0)
+ can_restore = 0;
+
+ if (tcsetattr(term_fd, TCSAFLUSH, &old_term) < 0)
+ write_err(restore_error_msg);
+
+ sigaction(signo, &sa, &old_sa);
+ raise(signo);
+ sigemptyset(&mask);
+ sigaddset(&mask, signo);
+ sigprocmask(SIG_UNBLOCK, &mask, NULL);
+ /* Stopped here */
+ sigprocmask(SIG_BLOCK, &mask, NULL);
+ sigaction(signo, &old_sa, NULL);
+ if (!can_restore) {
+ write_err(restore_error_msg);
+ goto out;
+ }
+ /*
+ * If we resume in the background then we receive SIGTTOU when calling
+ * tcsetattr() below. Set up a handler to print an error message in that
+ * case.
+ */
+ sigemptyset(&mask);
+ sigaddset(&mask, SIGTTOU);
+ sa.sa_mask = old_sa.sa_mask;
+ sa.sa_handler = print_background_resume_msg;
+ sa.sa_flags = SA_RESTART;
+ sigaction(SIGTTOU, &sa, &old_sa);
+ again:
+ ttou_received = 0;
+ sigprocmask(SIG_UNBLOCK, &mask, NULL);
+ res = tcsetattr(term_fd, TCSAFLUSH, &t);
+ sigprocmask(SIG_BLOCK, &mask, NULL);
+ if (ttou_received)
+ goto again;
+ else if (res < 0)
+ write_err(restore_error_msg);
+ sigaction(SIGTTOU, &old_sa, NULL);
+ out:
+ errno = saved_errno;
+}
+
+static void reset_job_signals(void)
+{
+ if (restore_error_msg) {
+ signal(SIGTTIN, SIG_DFL);
+ signal(SIGTTOU, SIG_DFL);
+ signal(SIGTSTP, SIG_DFL);
+ restore_error_msg = NULL;
+ background_resume_msg = NULL;
+ }
+}
+
+static void close_term_fd(void)
+{
+ if (term_fd_needs_closing)
+ close(term_fd);
+ term_fd_needs_closing = 0;
+ term_fd = -1;
+}
+
void restore_term(void)
{
if (term_fd < 0)
return;
tcsetattr(term_fd, TCSAFLUSH, &old_term);
- close(term_fd);
- term_fd = -1;
+ close_term_fd();
+ sigchain_pop_common();
+ reset_job_signals();
}
-int save_term(int full_duplex)
+int save_term(enum save_term_flags flags)
{
+ struct sigaction sa;
+
if (term_fd < 0)
- term_fd = open("/dev/tty", O_RDWR);
+ term_fd = ((flags & SAVE_TERM_STDIN)
+ ? 0
+ : open("/dev/tty", O_RDWR));
+ if (term_fd < 0)
+ return -1;
+ term_fd_needs_closing = !(flags & SAVE_TERM_STDIN);
+ if (tcgetattr(term_fd, &old_term) < 0) {
+ close_term_fd();
+ return -1;
+ }
+ sigchain_push_common(restore_term_on_signal);
+ /*
+ * If job control is disabled then the shell will have set the
+ * disposition of SIGTSTP to SIG_IGN.
+ */
+ sigaction(SIGTSTP, NULL, &sa);
+ if (sa.sa_handler == SIG_IGN)
+ return 0;
+
+ /* avoid calling gettext() from signal handler */
+ background_resume_msg = _("cannot resume in the background, please use 'fg' to resume");
+ restore_error_msg = _("cannot restore terminal settings");
+ sa.sa_handler = restore_terminal_on_suspend;
+ sa.sa_flags = SA_RESTART;
+ sigemptyset(&sa.sa_mask);
+ sigaddset(&sa.sa_mask, SIGTSTP);
+ sigaddset(&sa.sa_mask, SIGTTIN);
+ sigaddset(&sa.sa_mask, SIGTTOU);
+ sigaction(SIGTSTP, &sa, NULL);
+ sigaction(SIGTTIN, &sa, NULL);
+ sigaction(SIGTTOU, &sa, NULL);
- return (term_fd < 0) ? -1 : tcgetattr(term_fd, &old_term);
+ return 0;
}
-static int disable_bits(tcflag_t bits)
+static int disable_bits(enum save_term_flags flags, tcflag_t bits)
{
struct termios t;
- if (save_term(0) < 0)
- goto error;
+ if (save_term(flags) < 0)
+ return -1;
t = old_term;
- sigchain_push_common(restore_term_on_signal);
t.c_lflag &= ~bits;
+ if (bits & ICANON) {
+ t.c_cc[VMIN] = 1;
+ t.c_cc[VTIME] = 0;
+ }
if (!tcsetattr(term_fd, TCSAFLUSH, &t))
return 0;
-error:
- close(term_fd);
- term_fd = -1;
+ sigchain_pop_common();
+ reset_job_signals();
+ close_term_fd();
return -1;
}
-static int disable_echo(void)
+static int disable_echo(enum save_term_flags flags)
{
- return disable_bits(ECHO);
+ return disable_bits(flags, ECHO);
}
-static int enable_non_canonical(void)
+static int enable_non_canonical(enum save_term_flags flags)
{
- return disable_bits(ICANON | ECHO);
+ return disable_bits(flags, ICANON | ECHO);
+}
+
+/*
+ * On macos it is not possible to use poll() with a terminal so use select
+ * instead.
+ */
+static int getchar_with_timeout(int timeout)
+{
+ struct timeval tv, *tvp = NULL;
+ fd_set readfds;
+ int res;
+
+ again:
+ if (timeout >= 0) {
+ tv.tv_sec = timeout / 1000;
+ tv.tv_usec = (timeout % 1000) * 1000;
+ tvp = &tv;
+ }
+
+ FD_ZERO(&readfds);
+ FD_SET(0, &readfds);
+ res = select(1, &readfds, NULL, NULL, tvp);
+ if (!res)
+ return EOF;
+ if (res < 0) {
+ if (errno == EINTR)
+ goto again;
+ else
+ return EOF;
+ }
+ return getchar();
}
#elif defined(GIT_WINDOWS_NATIVE)
@@ -100,6 +272,8 @@ void restore_term(void)
return;
}
+ sigchain_pop_common();
+
if (hconin == INVALID_HANDLE_VALUE)
return;
@@ -114,7 +288,7 @@ void restore_term(void)
hconin = hconout = INVALID_HANDLE_VALUE;
}
-int save_term(int full_duplex)
+int save_term(enum save_term_flags flags)
{
hconin = CreateFileA("CONIN$", GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ, NULL, OPEN_EXISTING,
@@ -122,7 +296,7 @@ int save_term(int full_duplex)
if (hconin == INVALID_HANDLE_VALUE)
return -1;
- if (full_duplex) {
+ if (flags & SAVE_TERM_DUPLEX) {
hconout = CreateFileA("CONOUT$", GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_WRITE, NULL, OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL, NULL);
@@ -134,6 +308,7 @@ int save_term(int full_duplex)
GetConsoleMode(hconin, &cmode_in);
use_stty = 0;
+ sigchain_push_common(restore_term_on_signal);
return 0;
error:
CloseHandle(hconin);
@@ -141,7 +316,7 @@ error:
return -1;
}
-static int disable_bits(DWORD bits)
+static int disable_bits(enum save_term_flags flags, DWORD bits)
{
if (use_stty) {
struct child_process cp = CHILD_PROCESS_INIT;
@@ -150,7 +325,11 @@ static int disable_bits(DWORD bits)
if (bits & ENABLE_LINE_INPUT) {
string_list_append(&stty_restore, "icanon");
- strvec_push(&cp.args, "-icanon");
+ /*
+ * POSIX allows VMIN and VTIME to overlap with VEOF and
+ * VEOL - let's hope that is not the case on windows.
+ */
+ strvec_pushl(&cp.args, "-icanon", "min", "1", "time", "0", NULL);
}
if (bits & ENABLE_ECHO_INPUT) {
@@ -174,27 +353,28 @@ static int disable_bits(DWORD bits)
use_stty = 0;
}
- if (save_term(0) < 0)
+ if (save_term(flags) < 0)
return -1;
- sigchain_push_common(restore_term_on_signal);
if (!SetConsoleMode(hconin, cmode_in & ~bits)) {
CloseHandle(hconin);
hconin = INVALID_HANDLE_VALUE;
+ sigchain_pop_common();
return -1;
}
return 0;
}
-static int disable_echo(void)
+static int disable_echo(enum save_term_flags flags)
{
- return disable_bits(ENABLE_ECHO_INPUT);
+ return disable_bits(flags, ENABLE_ECHO_INPUT);
}
-static int enable_non_canonical(void)
+static int enable_non_canonical(enum save_term_flags flags)
{
- return disable_bits(ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT);
+ return disable_bits(flags,
+ ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT);
}
/*
@@ -228,6 +408,16 @@ static int mingw_getchar(void)
}
#define getchar mingw_getchar
+static int getchar_with_timeout(int timeout)
+{
+ struct pollfd pfd = { .fd = 0, .events = POLLIN };
+
+ if (poll(&pfd, 1, timeout) < 1)
+ return EOF;
+
+ return getchar();
+}
+
#endif
#ifndef FORCE_TEXT
@@ -250,7 +440,7 @@ char *git_terminal_prompt(const char *prompt, int echo)
return NULL;
}
- if (!echo && disable_echo()) {
+ if (!echo && disable_echo(0)) {
fclose(input_fh);
fclose(output_fh);
return NULL;
@@ -344,7 +534,7 @@ int read_key_without_echo(struct strbuf *buf)
static int warning_displayed;
int ch;
- if (warning_displayed || enable_non_canonical() < 0) {
+ if (warning_displayed || enable_non_canonical(SAVE_TERM_STDIN) < 0) {
if (!warning_displayed) {
warning("reading single keystrokes not supported on "
"this platform; reading line instead");
@@ -378,14 +568,9 @@ int read_key_without_echo(struct strbuf *buf)
* half a second when we know that the sequence is complete.
*/
while (!is_known_escape_sequence(buf->buf)) {
- struct pollfd pfd = { .fd = 0, .events = POLLIN };
-
- if (poll(&pfd, 1, 500) < 1)
- break;
-
- ch = getchar();
+ ch = getchar_with_timeout(500);
if (ch == EOF)
- return 0;
+ break;
strbuf_addch(buf, ch);
}
}
@@ -396,10 +581,10 @@ int read_key_without_echo(struct strbuf *buf)
#else
-int save_term(int full_duplex)
+int save_term(enum save_term_flags flags)
{
- /* full_duplex == 1, but no support available */
- return -full_duplex;
+ /* no duplex support available */
+ return -!!(flags & SAVE_TERM_DUPLEX);
}
void restore_term(void)
diff --git a/compat/terminal.h b/compat/terminal.h
index e1770c5..79ed00c 100644
--- a/compat/terminal.h
+++ b/compat/terminal.h
@@ -1,7 +1,22 @@
#ifndef COMPAT_TERMINAL_H
#define COMPAT_TERMINAL_H
-int save_term(int full_duplex);
+enum save_term_flags {
+ /* Save input and output settings */
+ SAVE_TERM_DUPLEX = 1 << 0,
+ /* Save stdin rather than /dev/tty (fails if stdin is not a terminal) */
+ SAVE_TERM_STDIN = 1 << 1,
+};
+
+/*
+ * Save the terminal attributes so they can be restored later by a
+ * call to restore_term(). Note that every successful call to
+ * save_term() must be matched by a call to restore_term() even if the
+ * attributes have not been changed. Returns 0 on success, -1 on
+ * failure.
+ */
+int save_term(enum save_term_flags flags);
+/* Restore the terminal attributes that were saved with save_term() */
void restore_term(void);
char *git_terminal_prompt(const char *prompt, int echo);
diff --git a/compat/win32/flush.c b/compat/win32/flush.c
new file mode 100644
index 0000000..291f90e
--- /dev/null
+++ b/compat/win32/flush.c
@@ -0,0 +1,28 @@
+#include "git-compat-util.h"
+#include <winternl.h>
+#include "lazyload.h"
+
+int win32_fsync_no_flush(int fd)
+{
+ IO_STATUS_BLOCK io_status;
+
+#define FLUSH_FLAGS_FILE_DATA_ONLY 1
+
+ DECLARE_PROC_ADDR(ntdll.dll, NTSTATUS, NTAPI, NtFlushBuffersFileEx,
+ HANDLE FileHandle, ULONG Flags, PVOID Parameters, ULONG ParameterSize,
+ PIO_STATUS_BLOCK IoStatusBlock);
+
+ if (!INIT_PROC_ADDR(NtFlushBuffersFileEx)) {
+ errno = ENOSYS;
+ return -1;
+ }
+
+ memset(&io_status, 0, sizeof(io_status));
+ if (NtFlushBuffersFileEx((HANDLE)_get_osfhandle(fd), FLUSH_FLAGS_FILE_DATA_ONLY,
+ NULL, 0, &io_status)) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/compat/winansi.c b/compat/winansi.c
index 4fceecf..3abe8dd 100644
--- a/compat/winansi.c
+++ b/compat/winansi.c
@@ -3,6 +3,7 @@
*/
#undef NOGDI
+
#include "../git-compat-util.h"
#include <wingdi.h>
#include <winreg.h>
diff --git a/compat/zlib-uncompress2.c b/compat/zlib-uncompress2.c
index 722610b..77a1b08 100644
--- a/compat/zlib-uncompress2.c
+++ b/compat/zlib-uncompress2.c
@@ -1,3 +1,6 @@
+#include "git-compat-util.h"
+
+#if ZLIB_VERNUM < 0x1290
/* taken from zlib's uncompr.c
commit cacf7f1d4e3d44d871b605da3b647f07d718623f
@@ -8,16 +11,11 @@
*/
-#include "../reftable/system.h"
-#define z_const
-
/*
* Copyright (C) 1995-2003, 2010, 2014, 2016 Jean-loup Gailly, Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
-#include <zlib.h>
-
/* clang-format off */
/* ===========================================================================
@@ -93,3 +91,6 @@ int ZEXPORT uncompress2 (
err == Z_BUF_ERROR && left + stream.avail_out ? Z_DATA_ERROR :
err;
}
+#else
+static void *dummy_variable = &dummy_variable;
+#endif
diff --git a/config.c b/config.c
index 2bffa8d..a5e11aa 100644
--- a/config.c
+++ b/config.c
@@ -6,6 +6,7 @@
*
*/
#include "cache.h"
+#include "date.h"
#include "branch.h"
#include "config.h"
#include "environment.h"
@@ -21,6 +22,7 @@
#include "dir.h"
#include "color.h"
#include "refs.h"
+#include "worktree.h"
struct config_source {
struct config_source *prev;
@@ -120,6 +122,22 @@ static long config_buf_ftell(struct config_source *conf)
return conf->u.buf.pos;
}
+struct config_include_data {
+ int depth;
+ config_fn_t fn;
+ void *data;
+ const struct config_options *opts;
+ struct git_config_source *config_source;
+
+ /*
+ * All remote URLs discovered when reading all config files.
+ */
+ struct string_list *remote_urls;
+};
+#define CONFIG_INCLUDE_INIT { 0 }
+
+static int git_config_include(const char *var, const char *value, void *data);
+
#define MAX_INCLUDE_DEPTH 10
static const char include_depth_advice[] = N_(
"exceeded maximum include depth (%d) while including\n"
@@ -294,9 +312,92 @@ static int include_by_branch(const char *cond, size_t cond_len)
return ret;
}
-static int include_condition_is_true(const struct config_options *opts,
+static int add_remote_url(const char *var, const char *value, void *data)
+{
+ struct string_list *remote_urls = data;
+ const char *remote_name;
+ size_t remote_name_len;
+ const char *key;
+
+ if (!parse_config_key(var, "remote", &remote_name, &remote_name_len,
+ &key) &&
+ remote_name &&
+ !strcmp(key, "url"))
+ string_list_append(remote_urls, value);
+ return 0;
+}
+
+static void populate_remote_urls(struct config_include_data *inc)
+{
+ struct config_options opts;
+
+ struct config_source *store_cf = cf;
+ struct key_value_info *store_kvi = current_config_kvi;
+ enum config_scope store_scope = current_parsing_scope;
+
+ opts = *inc->opts;
+ opts.unconditional_remote_url = 1;
+
+ cf = NULL;
+ current_config_kvi = NULL;
+ current_parsing_scope = 0;
+
+ inc->remote_urls = xmalloc(sizeof(*inc->remote_urls));
+ string_list_init_dup(inc->remote_urls);
+ config_with_options(add_remote_url, inc->remote_urls, inc->config_source, &opts);
+
+ cf = store_cf;
+ current_config_kvi = store_kvi;
+ current_parsing_scope = store_scope;
+}
+
+static int forbid_remote_url(const char *var, const char *value, void *data)
+{
+ const char *remote_name;
+ size_t remote_name_len;
+ const char *key;
+
+ if (!parse_config_key(var, "remote", &remote_name, &remote_name_len,
+ &key) &&
+ remote_name &&
+ !strcmp(key, "url"))
+ die(_("remote URLs cannot be configured in file directly or indirectly included by includeIf.hasconfig:remote.*.url"));
+ return 0;
+}
+
+static int at_least_one_url_matches_glob(const char *glob, int glob_len,
+ struct string_list *remote_urls)
+{
+ struct strbuf pattern = STRBUF_INIT;
+ struct string_list_item *url_item;
+ int found = 0;
+
+ strbuf_add(&pattern, glob, glob_len);
+ for_each_string_list_item(url_item, remote_urls) {
+ if (!wildmatch(pattern.buf, url_item->string, WM_PATHNAME)) {
+ found = 1;
+ break;
+ }
+ }
+ strbuf_release(&pattern);
+ return found;
+}
+
+static int include_by_remote_url(struct config_include_data *inc,
+ const char *cond, size_t cond_len)
+{
+ if (inc->opts->unconditional_remote_url)
+ return 1;
+ if (!inc->remote_urls)
+ populate_remote_urls(inc);
+ return at_least_one_url_matches_glob(cond, cond_len,
+ inc->remote_urls);
+}
+
+static int include_condition_is_true(struct config_include_data *inc,
const char *cond, size_t cond_len)
{
+ const struct config_options *opts = inc->opts;
if (skip_prefix_mem(cond, cond_len, "gitdir:", &cond, &cond_len))
return include_by_gitdir(opts, cond, cond_len, 0);
@@ -304,12 +405,15 @@ static int include_condition_is_true(const struct config_options *opts,
return include_by_gitdir(opts, cond, cond_len, 1);
else if (skip_prefix_mem(cond, cond_len, "onbranch:", &cond, &cond_len))
return include_by_branch(cond, cond_len);
+ else if (skip_prefix_mem(cond, cond_len, "hasconfig:remote.*.url:", &cond,
+ &cond_len))
+ return include_by_remote_url(inc, cond, cond_len);
/* unknown conditionals are always false */
return 0;
}
-int git_config_include(const char *var, const char *value, void *data)
+static int git_config_include(const char *var, const char *value, void *data)
{
struct config_include_data *inc = data;
const char *cond, *key;
@@ -328,9 +432,15 @@ int git_config_include(const char *var, const char *value, void *data)
ret = handle_path_include(value, inc);
if (!parse_config_key(var, "includeif", &cond, &cond_len, &key) &&
- (cond && include_condition_is_true(inc->opts, cond, cond_len)) &&
- !strcmp(key, "path"))
+ cond && include_condition_is_true(inc, cond, cond_len) &&
+ !strcmp(key, "path")) {
+ config_fn_t old_fn = inc->fn;
+
+ if (inc->opts->unconditional_remote_url)
+ inc->fn = forbid_remote_url;
ret = handle_path_include(value, inc);
+ inc->fn = old_fn;
+ }
return ret;
}
@@ -1213,6 +1323,80 @@ static int git_parse_maybe_bool_text(const char *value)
return -1;
}
+static const struct fsync_component_name {
+ const char *name;
+ enum fsync_component component_bits;
+} fsync_component_names[] = {
+ { "loose-object", FSYNC_COMPONENT_LOOSE_OBJECT },
+ { "pack", FSYNC_COMPONENT_PACK },
+ { "pack-metadata", FSYNC_COMPONENT_PACK_METADATA },
+ { "commit-graph", FSYNC_COMPONENT_COMMIT_GRAPH },
+ { "index", FSYNC_COMPONENT_INDEX },
+ { "objects", FSYNC_COMPONENTS_OBJECTS },
+ { "reference", FSYNC_COMPONENT_REFERENCE },
+ { "derived-metadata", FSYNC_COMPONENTS_DERIVED_METADATA },
+ { "committed", FSYNC_COMPONENTS_COMMITTED },
+ { "added", FSYNC_COMPONENTS_ADDED },
+ { "all", FSYNC_COMPONENTS_ALL },
+};
+
+static enum fsync_component parse_fsync_components(const char *var, const char *string)
+{
+ enum fsync_component current = FSYNC_COMPONENTS_DEFAULT;
+ enum fsync_component positive = 0, negative = 0;
+
+ while (string) {
+ int i;
+ size_t len;
+ const char *ep;
+ int negated = 0;
+ int found = 0;
+
+ string = string + strspn(string, ", \t\n\r");
+ ep = strchrnul(string, ',');
+ len = ep - string;
+ if (!strcmp(string, "none")) {
+ current = FSYNC_COMPONENT_NONE;
+ goto next_name;
+ }
+
+ if (*string == '-') {
+ negated = 1;
+ string++;
+ len--;
+ if (!len)
+ warning(_("invalid value for variable %s"), var);
+ }
+
+ if (!len)
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(fsync_component_names); ++i) {
+ const struct fsync_component_name *n = &fsync_component_names[i];
+
+ if (strncmp(n->name, string, len))
+ continue;
+
+ found = 1;
+ if (negated)
+ negative |= n->component_bits;
+ else
+ positive |= n->component_bits;
+ }
+
+ if (!found) {
+ char *component = xstrndup(string, len);
+ warning(_("ignoring unknown core.fsync component '%s'"), component);
+ free(component);
+ }
+
+next_name:
+ string = ep;
+ }
+
+ return (current & ~negative) | positive;
+}
+
int git_parse_maybe_bool(const char *value)
{
int v = git_parse_maybe_bool_text(value);
@@ -1490,7 +1674,28 @@ static int git_default_core_config(const char *var, const char *value, void *cb)
return 0;
}
+ if (!strcmp(var, "core.fsync")) {
+ if (!value)
+ return config_error_nonbool(var);
+ fsync_components = parse_fsync_components(var, value);
+ return 0;
+ }
+
+ if (!strcmp(var, "core.fsyncmethod")) {
+ if (!value)
+ return config_error_nonbool(var);
+ if (!strcmp(value, "fsync"))
+ fsync_method = FSYNC_METHOD_FSYNC;
+ else if (!strcmp(value, "writeout-only"))
+ fsync_method = FSYNC_METHOD_WRITEOUT_ONLY;
+ else
+ warning(_("ignoring unknown core.fsyncMethod value '%s'"), value);
+
+ }
+
if (!strcmp(var, "core.fsyncobjectfiles")) {
+ if (fsync_object_files < 0)
+ warning(_("core.fsyncObjectFiles is deprecated; use core.fsync instead"));
fsync_object_files = git_config_bool(var, value);
return 0;
}
@@ -1544,6 +1749,17 @@ static int git_default_core_config(const char *var, const char *value, void *cb)
return platform_core_config(var, value, cb);
}
+static int git_default_sparse_config(const char *var, const char *value)
+{
+ if (!strcmp(var, "sparse.expectfilesoutsideofpatterns")) {
+ sparse_expect_files_outside_of_patterns = git_config_bool(var, value);
+ return 0;
+ }
+
+ /* Add other config variables here and to Documentation/config/sparse.txt. */
+ return 0;
+}
+
static int git_default_i18n_config(const char *var, const char *value)
{
if (!strcmp(var, "i18n.commitencoding"))
@@ -1675,6 +1891,9 @@ int git_default_config(const char *var, const char *value, void *cb)
return 0;
}
+ if (starts_with(var, "sparse."))
+ return git_default_sparse_config(var, value);
+
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
@@ -1929,11 +2148,13 @@ int config_with_options(config_fn_t fn, void *data,
const struct config_options *opts)
{
struct config_include_data inc = CONFIG_INCLUDE_INIT;
+ int ret;
if (opts->respect_includes) {
inc.fn = fn;
inc.data = data;
inc.opts = opts;
+ inc.config_source = config_source;
fn = git_config_include;
data = &inc;
}
@@ -1946,17 +2167,23 @@ int config_with_options(config_fn_t fn, void *data,
* regular lookup sequence.
*/
if (config_source && config_source->use_stdin) {
- return git_config_from_stdin(fn, data);
+ ret = git_config_from_stdin(fn, data);
} else if (config_source && config_source->file) {
- return git_config_from_file(fn, config_source->file, data);
+ ret = git_config_from_file(fn, config_source->file, data);
} else if (config_source && config_source->blob) {
struct repository *repo = config_source->repo ?
config_source->repo : the_repository;
- return git_config_from_blob_ref(fn, repo, config_source->blob,
+ ret = git_config_from_blob_ref(fn, repo, config_source->blob,
data);
+ } else {
+ ret = do_git_config_sequence(opts, fn, data);
}
- return do_git_config_sequence(opts, fn, data);
+ if (inc.remote_urls) {
+ string_list_clear(inc.remote_urls, 0);
+ FREE_AND_NULL(inc.remote_urls);
+ }
+ return ret;
}
static void configset_iter(struct config_set *cs, config_fn_t fn, void *data)
@@ -2178,8 +2405,8 @@ int git_configset_get_string(struct config_set *cs, const char *key, char **dest
return 1;
}
-int git_configset_get_string_tmp(struct config_set *cs, const char *key,
- const char **dest)
+static int git_configset_get_string_tmp(struct config_set *cs, const char *key,
+ const char **dest)
{
const char *value;
if (!git_configset_get_value(cs, key, &value)) {
@@ -2508,20 +2735,6 @@ int git_config_get_max_percent_split_change(void)
return -1; /* default value */
}
-int git_config_get_fsmonitor(void)
-{
- if (git_config_get_pathname("core.fsmonitor", &core_fsmonitor))
- core_fsmonitor = getenv("GIT_TEST_FSMONITOR");
-
- if (core_fsmonitor && !*core_fsmonitor)
- core_fsmonitor = NULL;
-
- if (core_fsmonitor)
- return 1;
-
- return 0;
-}
-
int git_config_get_index_threads(int *dest)
{
int is_bool, val;
@@ -2884,6 +3097,20 @@ int git_config_set_gently(const char *key, const char *value)
return git_config_set_multivar_gently(key, value, NULL, 0);
}
+int repo_config_set_worktree_gently(struct repository *r,
+ const char *key, const char *value)
+{
+ /* Only use worktree-specific config if it is is already enabled. */
+ if (repository_format_worktree_config) {
+ char *file = repo_git_path(r, "config.worktree");
+ int ret = git_config_set_multivar_in_file_gently(
+ file, key, value, NULL, 0);
+ free(file);
+ return ret;
+ }
+ return repo_config_set_multivar_gently(r, key, value, NULL, 0);
+}
+
void git_config_set(const char *key, const char *value)
{
git_config_set_multivar(key, value, NULL, 0);
@@ -3181,14 +3408,28 @@ void git_config_set_multivar_in_file(const char *config_filename,
int git_config_set_multivar_gently(const char *key, const char *value,
const char *value_pattern, unsigned flags)
{
- return git_config_set_multivar_in_file_gently(NULL, key, value, value_pattern,
- flags);
+ return repo_config_set_multivar_gently(the_repository, key, value,
+ value_pattern, flags);
+}
+
+int repo_config_set_multivar_gently(struct repository *r, const char *key,
+ const char *value,
+ const char *value_pattern, unsigned flags)
+{
+ char *file = repo_git_path(r, "config");
+ int res = git_config_set_multivar_in_file_gently(file,
+ key, value,
+ value_pattern,
+ flags);
+ free(file);
+ return res;
}
void git_config_set_multivar(const char *key, const char *value,
const char *value_pattern, unsigned flags)
{
- git_config_set_multivar_in_file(NULL, key, value, value_pattern,
+ git_config_set_multivar_in_file(git_path("config"),
+ key, value, value_pattern,
flags);
}
diff --git a/config.h b/config.h
index f119de0..7654f61 100644
--- a/config.h
+++ b/config.h
@@ -89,6 +89,15 @@ struct config_options {
unsigned int ignore_worktree : 1;
unsigned int ignore_cmdline : 1;
unsigned int system_gently : 1;
+
+ /*
+ * For internal use. Include all includeif.hasremoteurl paths without
+ * checking if the repo has that remote URL, and when doing so, verify
+ * that files included in this way do not configure any remote URLs
+ * themselves.
+ */
+ unsigned int unconditional_remote_url : 1;
+
const char *commondir;
const char *git_dir;
config_parser_event_fn_t event_fn;
@@ -126,6 +135,8 @@ int git_default_config(const char *, const char *, void *);
/**
* Read a specific file in git-config format.
* This function takes the same callback and data parameters as `git_config`.
+ *
+ * Unlike git_config(), this function does not respect includes.
*/
int git_config_from_file(config_fn_t fn, const char *, void *);
@@ -158,6 +169,8 @@ void read_very_early_config(config_fn_t cb, void *data);
* will first feed the user-wide one to the callback, and then the
* repo-specific one; by overwriting, the higher-priority repo-specific
* value is left at the end).
+ *
+ * Unlike git_config_from_file(), this function respects includes.
*/
void git_config(config_fn_t fn, void *);
@@ -254,6 +267,13 @@ void git_config_set_in_file(const char *, const char *, const char *);
int git_config_set_gently(const char *, const char *);
/**
+ * Write a config value that should apply to the current worktree. If
+ * extensions.worktreeConfig is enabled, then the write will happen in the
+ * current worktree's config. Otherwise, write to the common config file.
+ */
+int repo_config_set_worktree_gently(struct repository *, const char *, const char *);
+
+/**
* write config values to `.git/config`, takes a key/value pair as parameter.
*/
void git_config_set(const char *, const char *);
@@ -281,6 +301,7 @@ int git_config_parse_key(const char *, char **, size_t *);
int git_config_set_multivar_gently(const char *, const char *, const char *, unsigned);
void git_config_set_multivar(const char *, const char *, const char *, unsigned);
+int repo_config_set_multivar_gently(struct repository *, const char *, const char *, const char *, unsigned);
int git_config_set_multivar_in_file_gently(const char *, const char *, const char *, const char *, unsigned);
/**
@@ -338,39 +359,6 @@ const char *current_config_origin_type(void);
const char *current_config_name(void);
int current_config_line(void);
-/**
- * Include Directives
- * ------------------
- *
- * By default, the config parser does not respect include directives.
- * However, a caller can use the special `git_config_include` wrapper
- * callback to support them. To do so, you simply wrap your "real" callback
- * function and data pointer in a `struct config_include_data`, and pass
- * the wrapper to the regular config-reading functions. For example:
- *
- * -------------------------------------------
- * int read_file_with_include(const char *file, config_fn_t fn, void *data)
- * {
- * struct config_include_data inc = CONFIG_INCLUDE_INIT;
- * inc.fn = fn;
- * inc.data = data;
- * return git_config_from_file(git_config_include, file, &inc);
- * }
- * -------------------------------------------
- *
- * `git_config` respects includes automatically. The lower-level
- * `git_config_from_file` does not.
- *
- */
-struct config_include_data {
- int depth;
- config_fn_t fn;
- void *data;
- const struct config_options *opts;
-};
-#define CONFIG_INCLUDE_INIT { 0 }
-int git_config_include(const char *name, const char *value, void *data);
-
/*
* Match and parse a config key of the form:
*
@@ -486,7 +474,6 @@ void git_configset_clear(struct config_set *cs);
int git_configset_get_value(struct config_set *cs, const char *key, const char **dest);
int git_configset_get_string(struct config_set *cs, const char *key, char **dest);
-int git_configset_get_string_tmp(struct config_set *cs, const char *key, const char **dest);
int git_configset_get_int(struct config_set *cs, const char *key, int *dest);
int git_configset_get_ulong(struct config_set *cs, const char *key, unsigned long *dest);
int git_configset_get_bool(struct config_set *cs, const char *key, int *dest);
@@ -610,7 +597,6 @@ int git_config_get_pathname(const char *key, const char **dest);
int git_config_get_index_threads(int *dest);
int git_config_get_split_index(void);
int git_config_get_max_percent_split_change(void);
-int git_config_get_fsmonitor(void);
/* This dies if the configured or default date is in the future */
int git_config_get_expiry(const char *key, const char **output);
diff --git a/config.mak.uname b/config.mak.uname
index c48db45..259d151 100644
--- a/config.mak.uname
+++ b/config.mak.uname
@@ -57,6 +57,7 @@ ifeq ($(uname_S),Linux)
HAVE_CLOCK_MONOTONIC = YesPlease
# -lrt is needed for clock_gettime on glibc <= 2.16
NEEDS_LIBRT = YesPlease
+ HAVE_SYNC_FILE_RANGE = YesPlease
HAVE_GETDELIM = YesPlease
FREAD_READS_DIRECTORIES = UnfortunatelyYes
BASIC_CFLAGS += -DHAVE_SYSINFO
@@ -66,7 +67,6 @@ ifeq ($(uname_S),Linux)
# centos7/rhel7 provides gcc 4.8.5 and zlib 1.2.7.
ifneq ($(findstring .el7.,$(uname_R)),)
BASIC_CFLAGS += -std=c99
- NO_UNCOMPRESS2 = YesPlease
endif
endif
ifeq ($(uname_S),GNU/kFreeBSD)
@@ -146,6 +146,7 @@ ifeq ($(uname_S),Darwin)
HAVE_BSD_SYSCTL = YesPlease
FREAD_READS_DIRECTORIES = UnfortunatelyYes
HAVE_NS_GET_EXECUTABLE_PATH = YesPlease
+ CSPRNG_METHOD = arc4random
# Workaround for `gettext` being keg-only and not even being linked via
# `brew link --force gettext`, should be obsolete as of
@@ -157,6 +158,16 @@ ifeq ($(uname_S),Darwin)
MSGFMT = /usr/local/opt/gettext/bin/msgfmt
endif
endif
+
+ # The builtin FSMonitor on MacOS builds upon Simple-IPC. Both require
+ # Unix domain sockets and PThreads.
+ ifndef NO_PTHREADS
+ ifndef NO_UNIX_SOCKETS
+ FSMONITOR_DAEMON_BACKEND = darwin
+ endif
+ endif
+
+ BASIC_LDFLAGS += -framework CoreServices
endif
ifeq ($(uname_S),SunOS)
NEEDS_SOCKET = YesPlease
@@ -261,15 +272,12 @@ ifeq ($(uname_S),FreeBSD)
HAVE_PATHS_H = YesPlease
HAVE_BSD_SYSCTL = YesPlease
HAVE_BSD_KERN_PROC_SYSCTL = YesPlease
+ CSPRNG_METHOD = arc4random
PAGER_ENV = LESS=FRX LV=-c MORE=FRX
FREAD_READS_DIRECTORIES = UnfortunatelyYes
FILENO_IS_A_MACRO = UnfortunatelyYes
endif
ifeq ($(uname_S),OpenBSD)
- # Versions < 7.0 need compatibility layer
- ifeq ($(shell expr "$(uname_R)" : "[1-6]\."),2)
- NO_UNCOMPRESS2 = UnfortunatelyYes
- endif
NO_STRCASESTR = YesPlease
NO_MEMMEM = YesPlease
USE_ST_TIMESPEC = YesPlease
@@ -279,6 +287,7 @@ ifeq ($(uname_S),OpenBSD)
HAVE_PATHS_H = YesPlease
HAVE_BSD_SYSCTL = YesPlease
HAVE_BSD_KERN_PROC_SYSCTL = YesPlease
+ CSPRNG_METHOD = arc4random
PROCFS_EXECUTABLE_PATH = /proc/curproc/file
FREAD_READS_DIRECTORIES = UnfortunatelyYes
FILENO_IS_A_MACRO = UnfortunatelyYes
@@ -290,6 +299,7 @@ ifeq ($(uname_S),MirBSD)
NEEDS_LIBICONV = YesPlease
HAVE_PATHS_H = YesPlease
HAVE_BSD_SYSCTL = YesPlease
+ CSPRNG_METHOD = arc4random
endif
ifeq ($(uname_S),NetBSD)
ifeq ($(shell expr "$(uname_R)" : '[01]\.'),2)
@@ -301,6 +311,7 @@ ifeq ($(uname_S),NetBSD)
HAVE_PATHS_H = YesPlease
HAVE_BSD_SYSCTL = YesPlease
HAVE_BSD_KERN_PROC_SYSCTL = YesPlease
+ CSPRNG_METHOD = arc4random
PROCFS_EXECUTABLE_PATH = /proc/curproc/exe
endif
ifeq ($(uname_S),AIX)
@@ -430,10 +441,16 @@ ifeq ($(uname_S),Windows)
NO_STRTOUMAX = YesPlease
NO_MKDTEMP = YesPlease
NO_INTTYPES_H = YesPlease
+ CSPRNG_METHOD = rtlgenrandom
# VS2015 with UCRT claims that snprintf and friends are C99 compliant,
# so we don't need this:
#
# SNPRINTF_RETURNS_BOGUS = YesPlease
+
+ # The builtin FSMonitor requires Named Pipes and Threads on Windows.
+ # These are always available, so we do not have to conditionally
+ # support it.
+ FSMONITOR_DAEMON_BACKEND = win32
NO_SVN_TESTS = YesPlease
RUNTIME_PREFIX = YesPlease
HAVE_WPGMPTR = YesWeDo
@@ -462,6 +479,7 @@ endif
CFLAGS =
BASIC_CFLAGS = -nologo -I. -Icompat/vcbuild/include -DWIN32 -D_CONSOLE -DHAVE_STRING_H -D_CRT_SECURE_NO_WARNINGS -D_CRT_NONSTDC_NO_DEPRECATE
COMPAT_OBJS = compat/msvc.o compat/winansi.o \
+ compat/win32/flush.o \
compat/win32/path-utils.o \
compat/win32/pthread.o compat/win32/syslog.o \
compat/win32/trace2_win32_process_info.o \
@@ -525,7 +543,6 @@ ifeq ($(uname_S),Interix)
endif
endif
ifeq ($(uname_S),Minix)
- NO_UNCOMPRESS2 = YesPlease
NO_IPV6 = YesPlease
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
NO_NSEC = YesPlease
@@ -581,7 +598,6 @@ ifeq ($(uname_S),NONSTOP_KERNEL)
NO_SETENV = YesPlease
NO_UNSETENV = YesPlease
NO_MKDTEMP = YesPlease
- NO_UNCOMPRESS2 = YesPlease
# Currently libiconv-1.9.1.
OLD_ICONV = UnfortunatelyYes
NO_REGEX = NeedsStartEnd
@@ -599,6 +615,7 @@ ifeq ($(uname_S),NONSTOP_KERNEL)
NO_MMAP = YesPlease
NO_POLL = YesPlease
NO_INTPTR_T = UnfortunatelyYes
+ CSPRNG_METHOD = openssl
SANE_TOOL_PATH = /usr/coreutils/bin:/usr/local/bin
SHELL_PATH = /usr/coreutils/bin/bash
endif
@@ -619,6 +636,11 @@ ifeq ($(uname_S),MINGW)
NO_STRTOUMAX = YesPlease
NO_MKDTEMP = YesPlease
NO_SVN_TESTS = YesPlease
+
+ # The builtin FSMonitor requires Named Pipes and Threads on Windows.
+ # These are always available, so we do not have to conditionally
+ # support it.
+ FSMONITOR_DAEMON_BACKEND = win32
RUNTIME_PREFIX = YesPlease
HAVE_WPGMPTR = YesWeDo
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
@@ -634,11 +656,13 @@ ifeq ($(uname_S),MINGW)
NO_POSIX_GOODIES = UnfortunatelyYes
DEFAULT_HELP_FORMAT = html
HAVE_PLATFORM_PROCINFO = YesPlease
+ CSPRNG_METHOD = rtlgenrandom
BASIC_LDFLAGS += -municode
COMPAT_CFLAGS += -DNOGDI -Icompat -Icompat/win32
COMPAT_CFLAGS += -DSTRIP_EXTENSION=\".exe\"
COMPAT_OBJS += compat/mingw.o compat/winansi.o \
compat/win32/trace2_win32_process_info.o \
+ compat/win32/flush.o \
compat/win32/path-utils.o \
compat/win32/pthread.o compat/win32/syslog.o \
compat/win32/dirent.o
@@ -726,7 +750,6 @@ vcxproj:
git diff-index --cached --quiet HEAD --
# Make .vcxproj files and add them
- unset QUIET_GEN QUIET_BUILT_IN; \
perl contrib/buildsystems/generate -g Vcxproj
git add -f git.sln {*,*/lib,t/helper/*}/*.vcxproj
diff --git a/configure.ac b/configure.ac
index d60d494..316a31d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -664,22 +664,9 @@ AC_LINK_IFELSE([ZLIBTEST_SRC],
NO_DEFLATE_BOUND=yes])
LIBS="$old_LIBS"
-AC_DEFUN([ZLIBTEST_UNCOMPRESS2_SRC], [
-AC_LANG_PROGRAM([#include <zlib.h>],
- [uncompress2(NULL,NULL,NULL,NULL);])])
-AC_MSG_CHECKING([for uncompress2 in -lz])
-old_LIBS="$LIBS"
-LIBS="$LIBS -lz"
-AC_LINK_IFELSE([ZLIBTEST_UNCOMPRESS2_SRC],
- [AC_MSG_RESULT([yes])],
- [AC_MSG_RESULT([no])
- NO_UNCOMPRESS2=yes])
-LIBS="$old_LIBS"
-
GIT_UNSTASH_FLAGS($ZLIB_PATH)
GIT_CONF_SUBST([NO_DEFLATE_BOUND])
-GIT_CONF_SUBST([NO_UNCOMPRESS2])
#
# Define NEEDS_SOCKET if linking with libc is not enough (SunOS,
@@ -1095,6 +1082,14 @@ AC_COMPILE_IFELSE([CLOCK_MONOTONIC_SRC],
[AC_MSG_RESULT([no])
HAVE_CLOCK_MONOTONIC=])
GIT_CONF_SUBST([HAVE_CLOCK_MONOTONIC])
+
+#
+# Define HAVE_SYNC_FILE_RANGE=YesPlease if sync_file_range is available.
+GIT_CHECK_FUNC(sync_file_range,
+ [HAVE_SYNC_FILE_RANGE=YesPlease],
+ [HAVE_SYNC_FILE_RANGE=])
+GIT_CONF_SUBST([HAVE_SYNC_FILE_RANGE])
+
#
# Define NO_SETITIMER if you don't have setitimer.
GIT_CHECK_FUNC(setitimer,
diff --git a/connect.c b/connect.c
index eaf7d6d..afc79a6 100644
--- a/connect.c
+++ b/connect.c
@@ -379,7 +379,7 @@ struct ref **get_remote_heads(struct packet_reader *reader,
/* Returns 1 when a valid ref has been added to `list`, 0 otherwise */
static int process_ref_v2(struct packet_reader *reader, struct ref ***list,
- char **unborn_head_target)
+ const char **unborn_head_target)
{
int ret = 1;
int i = 0;
@@ -483,7 +483,7 @@ struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
const char *hash_name;
struct strvec *ref_prefixes = transport_options ?
&transport_options->ref_prefixes : NULL;
- char **unborn_head_target = transport_options ?
+ const char **unborn_head_target = transport_options ?
&transport_options->unborn_head_target : NULL;
*list = NULL;
diff --git a/contrib/buildsystems/CMakeLists.txt b/contrib/buildsystems/CMakeLists.txt
index 5100f56..185f56f 100644
--- a/contrib/buildsystems/CMakeLists.txt
+++ b/contrib/buildsystems/CMakeLists.txt
@@ -260,11 +260,19 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
_CONSOLE DETECT_MSYS_TTY STRIP_EXTENSION=".exe" NO_SYMLINK_HEAD UNRELIABLE_FSTAT
NOGDI OBJECT_CREATION_MODE=1 __USE_MINGW_ANSI_STDIO=0
USE_NED_ALLOCATOR OVERRIDE_STRDUP MMAP_PREVENTS_DELETE USE_WIN32_MMAP
- UNICODE _UNICODE HAVE_WPGMPTR ENSURE_MSYSTEM_IS_SET)
- list(APPEND compat_SOURCES compat/mingw.c compat/winansi.c compat/win32/path-utils.c
- compat/win32/pthread.c compat/win32mmap.c compat/win32/syslog.c
- compat/win32/trace2_win32_process_info.c compat/win32/dirent.c
- compat/nedmalloc/nedmalloc.c compat/strdup.c)
+ UNICODE _UNICODE HAVE_WPGMPTR ENSURE_MSYSTEM_IS_SET HAVE_RTLGENRANDOM)
+ list(APPEND compat_SOURCES
+ compat/mingw.c
+ compat/winansi.c
+ compat/win32/flush.c
+ compat/win32/path-utils.c
+ compat/win32/pthread.c
+ compat/win32mmap.c
+ compat/win32/syslog.c
+ compat/win32/trace2_win32_process_info.c
+ compat/win32/dirent.c
+ compat/nedmalloc/nedmalloc.c
+ compat/strdup.c)
set(NO_UNIX_SOCKETS 1)
elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux")
@@ -285,6 +293,16 @@ else()
endif()
endif()
+if(SUPPORTS_SIMPLE_IPC)
+ if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
+ add_compile_definitions(HAVE_FSMONITOR_DAEMON_BACKEND)
+ list(APPEND compat_SOURCES compat/fsmonitor/fsm-listen-win32.c)
+ elseif(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
+ add_compile_definitions(HAVE_FSMONITOR_DAEMON_BACKEND)
+ list(APPEND compat_SOURCES compat/fsmonitor/fsm-listen-darwin.c)
+ endif()
+endif()
+
set(EXE_EXTENSION ${CMAKE_EXECUTABLE_SUFFIX})
#header checks
diff --git a/contrib/coccinelle/strbuf.cocci b/contrib/coccinelle/strbuf.cocci
index d9ada69..0970d98 100644
--- a/contrib/coccinelle/strbuf.cocci
+++ b/contrib/coccinelle/strbuf.cocci
@@ -15,7 +15,7 @@ constant fmt !~ "%";
@@
expression E;
struct strbuf SB;
-format F =~ "s";
+format F =~ "^s$";
@@
- strbuf_addf(E, "%@F@", SB.buf);
+ strbuf_addbuf(E, &SB);
@@ -23,7 +23,7 @@ format F =~ "s";
@@
expression E;
struct strbuf *SBP;
-format F =~ "s";
+format F =~ "^s$";
@@
- strbuf_addf(E, "%@F@", SBP->buf);
+ strbuf_addbuf(E, SBP);
@@ -44,7 +44,7 @@ struct strbuf *SBP;
@@
expression E1, E2;
-format F =~ "s";
+format F =~ "^s$";
@@
- strbuf_addf(E1, "%@F@", E2);
+ strbuf_addstr(E1, E2);
diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash
index 377d6c5..ba5c395 100644
--- a/contrib/completion/git-completion.bash
+++ b/contrib/completion/git-completion.bash
@@ -49,6 +49,11 @@
# and git-switch completion (e.g., completing "foo" when "origin/foo"
# exists).
#
+# GIT_COMPLETION_SHOW_ALL_COMMANDS
+#
+# When set to "1" suggest all commands, including plumbing commands
+# which are hidden by default (e.g. "cat-file" on "git ca<TAB>").
+#
# GIT_COMPLETION_SHOW_ALL
#
# When set to "1" suggest all options, including options which are
@@ -2885,6 +2890,10 @@ _git_restore ()
--*)
__gitcomp_builtin restore
;;
+ *)
+ if __git rev-parse --verify --quiet HEAD >/dev/null; then
+ __git_complete_index_file "--modified"
+ fi
esac
}
@@ -2986,9 +2995,37 @@ _git_show_branch ()
__git_complete_revlist
}
+__gitcomp_directories ()
+{
+ local _tmp_dir _tmp_completions _found=0
+
+ # Get the directory of the current token; this differs from dirname
+ # in that it keeps up to the final trailing slash. If no slash found
+ # that's fine too.
+ [[ "$cur" =~ .*/ ]]
+ _tmp_dir=$BASH_REMATCH
+
+ # Find possible directory completions, adding trailing '/' characters,
+ # de-quoting, and handling unusual characters.
+ while IFS= read -r -d $'\0' c ; do
+ # If there are directory completions, find ones that start
+ # with "$cur", the current token, and put those in COMPREPLY
+ if [[ $c == "$cur"* ]]; then
+ COMPREPLY+=("$c/")
+ _found=1
+ fi
+ done < <(git ls-tree -z -d --name-only HEAD $_tmp_dir)
+
+ if [[ $_found == 0 ]] && [[ "$cur" =~ /$ ]]; then
+ # No possible further completions any deeper, so assume we're at
+ # a leaf directory and just consider it complete
+ __gitcomp_direct_append "$cur "
+ fi
+}
+
_git_sparse_checkout ()
{
- local subcommands="list init set disable"
+ local subcommands="list init set disable add reapply"
local subcommand="$(__git_find_on_cmdline "$subcommands")"
if [ -z "$subcommand" ]; then
__gitcomp "$subcommands"
@@ -2996,14 +3033,14 @@ _git_sparse_checkout ()
fi
case "$subcommand,$cur" in
- init,--*)
- __gitcomp "--cone"
- ;;
- set,--*)
- __gitcomp "--stdin"
- ;;
- *)
+ *,--*)
+ __gitcomp_builtin sparse-checkout_$subcommand "" "--"
;;
+ set,*|add,*)
+ if [ "$(__git config core.sparseCheckoutCone)" == "true" ] ||
+ [ -n "$(__git_find_on_cmdline --cone)" ]; then
+ __gitcomp_directories
+ fi
esac
}
@@ -3455,7 +3492,13 @@ __git_main ()
then
__gitcomp "$GIT_TESTING_PORCELAIN_COMMAND_LIST"
else
- __gitcomp "$(__git --list-cmds=list-mainporcelain,others,nohelpers,alias,list-complete,config)"
+ local list_cmds=list-mainporcelain,others,nohelpers,alias,list-complete,config
+
+ if test "${GIT_COMPLETION_SHOW_ALL_COMMANDS-}" = "1"
+ then
+ list_cmds=builtins,$list_cmds
+ fi
+ __gitcomp "$(__git --list-cmds=$list_cmds)"
fi
;;
esac
diff --git a/contrib/completion/git-prompt.sh b/contrib/completion/git-prompt.sh
index db7c006..87b2b91 100644
--- a/contrib/completion/git-prompt.sh
+++ b/contrib/completion/git-prompt.sh
@@ -66,6 +66,11 @@
# git always compare HEAD to @{upstream}
# svn always compare HEAD to your SVN upstream
#
+# By default, __git_ps1 will compare HEAD to your SVN upstream if it can
+# find one, or @{upstream} otherwise. Once you have set
+# GIT_PS1_SHOWUPSTREAM, you can override it on a per-repository basis by
+# setting the bash.showUpstream config variable.
+#
# You can change the separator between the branch name and the above
# state symbols by setting GIT_PS1_STATESEPARATOR. The default separator
# is SP.
@@ -79,11 +84,6 @@
# single '?' character by setting GIT_PS1_COMPRESSSPARSESTATE, or omitted
# by setting GIT_PS1_OMITSPARSESTATE.
#
-# By default, __git_ps1 will compare HEAD to your SVN upstream if it can
-# find one, or @{upstream} otherwise. Once you have set
-# GIT_PS1_SHOWUPSTREAM, you can override it on a per-repository basis by
-# setting the bash.showUpstream config variable.
-#
# If you would like to see more information about the identity of
# commits checked out as a detached HEAD, set GIT_PS1_DESCRIBE_STYLE
# to one of these values:
@@ -115,7 +115,7 @@ __git_ps1_show_upstream ()
{
local key value
local svn_remote svn_url_pattern count n
- local upstream=git legacy="" verbose="" name=""
+ local upstream_type=git legacy="" verbose="" name=""
svn_remote=()
# get some config options from git-config
@@ -132,7 +132,7 @@ __git_ps1_show_upstream ()
svn-remote.*.url)
svn_remote[$((${#svn_remote[@]} + 1))]="$value"
svn_url_pattern="$svn_url_pattern\\|$value"
- upstream=svn+git # default upstream is SVN if available, else git
+ upstream_type=svn+git # default upstream type is SVN if available, else git
;;
esac
done <<< "$output"
@@ -141,16 +141,16 @@ __git_ps1_show_upstream ()
local option
for option in ${GIT_PS1_SHOWUPSTREAM}; do
case "$option" in
- git|svn) upstream="$option" ;;
+ git|svn) upstream_type="$option" ;;
verbose) verbose=1 ;;
legacy) legacy=1 ;;
name) name=1 ;;
esac
done
- # Find our upstream
- case "$upstream" in
- git) upstream="@{upstream}" ;;
+ # Find our upstream type
+ case "$upstream_type" in
+ git) upstream_type="@{upstream}" ;;
svn*)
# get the upstream from the "git-svn-id: ..." in a commit message
# (git-svn uses essentially the same procedure internally)
@@ -167,12 +167,12 @@ __git_ps1_show_upstream ()
if [[ -z "$svn_upstream" ]]; then
# default branch name for checkouts with no layout:
- upstream=${GIT_SVN_ID:-git-svn}
+ upstream_type=${GIT_SVN_ID:-git-svn}
else
- upstream=${svn_upstream#/}
+ upstream_type=${svn_upstream#/}
fi
- elif [[ "svn+git" = "$upstream" ]]; then
- upstream="@{upstream}"
+ elif [[ "svn+git" = "$upstream_type" ]]; then
+ upstream_type="@{upstream}"
fi
;;
esac
@@ -180,11 +180,11 @@ __git_ps1_show_upstream ()
# Find how many commits we are ahead/behind our upstream
if [[ -z "$legacy" ]]; then
count="$(git rev-list --count --left-right \
- "$upstream"...HEAD 2>/dev/null)"
+ "$upstream_type"...HEAD 2>/dev/null)"
else
# produce equivalent output to --count for older versions of git
local commits
- if commits="$(git rev-list --left-right "$upstream"...HEAD 2>/dev/null)"
+ if commits="$(git rev-list --left-right "$upstream_type"...HEAD 2>/dev/null)"
then
local commit behind=0 ahead=0
for commit in $commits
@@ -214,26 +214,26 @@ __git_ps1_show_upstream ()
*) # diverged from upstream
p="<>" ;;
esac
- else
+ else # verbose, set upstream instead of p
case "$count" in
"") # no upstream
- p="" ;;
+ upstream="" ;;
"0 0") # equal to upstream
- p=" u=" ;;
+ upstream="|u=" ;;
"0 "*) # ahead of upstream
- p=" u+${count#0 }" ;;
+ upstream="|u+${count#0 }" ;;
*" 0") # behind upstream
- p=" u-${count% 0}" ;;
+ upstream="|u-${count% 0}" ;;
*) # diverged from upstream
- p=" u+${count#* }-${count% *}" ;;
+ upstream="|u+${count#* }-${count% *}" ;;
esac
if [[ -n "$count" && -n "$name" ]]; then
__git_ps1_upstream_name=$(git rev-parse \
- --abbrev-ref "$upstream" 2>/dev/null)
+ --abbrev-ref "$upstream_type" 2>/dev/null)
if [ $pcmode = yes ] && [ $ps1_expanded = yes ]; then
- p="$p \${__git_ps1_upstream_name}"
+ upstream="$upstream \${__git_ps1_upstream_name}"
else
- p="$p ${__git_ps1_upstream_name}"
+ upstream="$upstream ${__git_ps1_upstream_name}"
# not needed anymore; keep user's
# environment clean
unset __git_ps1_upstream_name
@@ -512,7 +512,8 @@ __git_ps1 ()
local u=""
local h=""
local c=""
- local p=""
+ local p="" # short version of upstream state indicator
+ local upstream="" # verbose version of upstream state indicator
if [ "true" = "$inside_gitdir" ]; then
if [ "true" = "$bare_repo" ]; then
@@ -568,8 +569,8 @@ __git_ps1 ()
b="\${__git_ps1_branch_name}"
fi
- local f="$h$w$i$s$u"
- local gitstring="$c$b${f:+$z$f}${sparse}$r$p"
+ local f="$h$w$i$s$u$p"
+ local gitstring="$c$b${f:+$z$f}${sparse}$r${upstream}"
if [ $pcmode = yes ]; then
if [ "${__git_printf_supports_v-}" != yes ]; then
diff --git a/contrib/rerere-train.sh b/contrib/rerere-train.sh
index 75125d6..26b724c 100755
--- a/contrib/rerere-train.sh
+++ b/contrib/rerere-train.sh
@@ -86,7 +86,7 @@ do
fi
if test -s "$GIT_DIR/MERGE_RR"
then
- git show -s --pretty=format:"Learning from %h %s" "$commit"
+ git --no-pager show -s --format="Learning from %h %s" "$commit"
git rerere
git checkout -q $commit -- .
git rerere
diff --git a/contrib/scalar/Makefile b/contrib/scalar/Makefile
index 231b1ee..37f283f 100644
--- a/contrib/scalar/Makefile
+++ b/contrib/scalar/Makefile
@@ -1,18 +1,8 @@
-QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir
-QUIET_SUBDIR1 =
-
-ifneq ($(findstring s,$(MAKEFLAGS)),s)
-ifndef V
- QUIET_GEN = @echo ' ' GEN $@;
- QUIET_SUBDIR0 = +@subdir=
- QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \
- $(MAKE) $(PRINT_DIR) -C $$subdir
-else
- export V
-endif
-endif
-
-all:
+# The default target of this Makefile is...
+all::
+
+# Import tree-wide shared Makefile behavior and libraries
+include ../../shared.mak
include ../../config.mak.uname
-include ../../config.mak.autogen
@@ -21,7 +11,7 @@ include ../../config.mak.uname
TARGETS = scalar$(X) scalar.o
GITLIBS = ../../common-main.o ../../libgit.a ../../xdiff/lib.a
-all: scalar$(X) ../../bin-wrappers/scalar
+all:: scalar$(X) ../../bin-wrappers/scalar
$(GITLIBS):
$(QUIET_SUBDIR0)../.. $(QUIET_SUBDIR1) $(subst ../../,,$@)
diff --git a/contrib/scalar/scalar.c b/contrib/scalar/scalar.c
index 1ce9c2b..58ca0e5 100644
--- a/contrib/scalar/scalar.c
+++ b/contrib/scalar/scalar.c
@@ -152,7 +152,6 @@ static int set_recommended_config(int reconfigure)
{ "pack.useBitmaps", "false", 1 },
{ "pack.useSparse", "true", 1 },
{ "receive.autoGC", "false", 1 },
- { "reset.quiet", "true", 1 },
{ "feature.manyFiles", "false", 1 },
{ "feature.experimental", "false", 1 },
{ "fetch.unpackLimit", "1", 1 },
@@ -808,6 +807,25 @@ int cmd_main(int argc, const char **argv)
struct strbuf scalar_usage = STRBUF_INIT;
int i;
+ while (argc > 1 && *argv[1] == '-') {
+ if (!strcmp(argv[1], "-C")) {
+ if (argc < 3)
+ die(_("-C requires a <directory>"));
+ if (chdir(argv[2]) < 0)
+ die_errno(_("could not change to '%s'"),
+ argv[2]);
+ argc -= 2;
+ argv += 2;
+ } else if (!strcmp(argv[1], "-c")) {
+ if (argc < 3)
+ die(_("-c requires a <key>=<value> argument"));
+ git_config_push_parameter(argv[2]);
+ argc -= 2;
+ argv += 2;
+ } else
+ break;
+ }
+
if (argc > 1) {
argv++;
argc--;
@@ -818,7 +836,8 @@ int cmd_main(int argc, const char **argv)
}
strbuf_addstr(&scalar_usage,
- N_("scalar <command> [<options>]\n\nCommands:\n"));
+ N_("scalar [-C <directory>] [-c <key>=<value>] "
+ "<command> [<options>]\n\nCommands:\n"));
for (i = 0; builtins[i].name; i++)
strbuf_addf(&scalar_usage, "\t%s\n", builtins[i].name);
diff --git a/contrib/scalar/scalar.txt b/contrib/scalar/scalar.txt
index f416d63..cf4e5b8 100644
--- a/contrib/scalar/scalar.txt
+++ b/contrib/scalar/scalar.txt
@@ -36,6 +36,16 @@ The `scalar` command implements various subcommands, and different options
depending on the subcommand. With the exception of `clone`, `list` and
`reconfigure --all`, all subcommands expect to be run in an enlistment.
+The following options can be specified _before_ the subcommand:
+
+-C <directory>::
+ Before running the subcommand, change the working directory. This
+ option imitates the same option of linkgit:git[1].
+
+-c <key>=<value>::
+ For the duration of running the specified subcommand, configure this
+ setting. This option imitates the same option of linkgit:git[1].
+
COMMANDS
--------
diff --git a/contrib/scalar/t/Makefile b/contrib/scalar/t/Makefile
index 6170672..01e82e5 100644
--- a/contrib/scalar/t/Makefile
+++ b/contrib/scalar/t/Makefile
@@ -1,3 +1,6 @@
+# Import tree-wide shared Makefile behavior and libraries
+include ../../../shared.mak
+
# Run scalar tests
#
# Copyright (c) 2005,2021 Junio C Hamano, Johannes Schindelin
diff --git a/contrib/scalar/t/t9099-scalar.sh b/contrib/scalar/t/t9099-scalar.sh
index 2e1502a..8978156 100755
--- a/contrib/scalar/t/t9099-scalar.sh
+++ b/contrib/scalar/t/t9099-scalar.sh
@@ -85,4 +85,12 @@ test_expect_success 'scalar delete with enlistment' '
test_path_is_missing cloned
'
+test_expect_success 'scalar supports -c/-C' '
+ test_when_finished "scalar delete sub" &&
+ git init sub &&
+ scalar -C sub -c status.aheadBehind=bogus register &&
+ test -z "$(git -C sub config --local status.aheadBehind)" &&
+ test true = "$(git -C sub config core.preloadIndex)"
+'
+
test_done
diff --git a/contrib/subtree/git-subtree.sh b/contrib/subtree/git-subtree.sh
index 71f1fd9..1af1d96 100755
--- a/contrib/subtree/git-subtree.sh
+++ b/contrib/subtree/git-subtree.sh
@@ -975,10 +975,10 @@ cmd_merge () {
if test -n "$arg_addmerge_message"
then
- git merge -Xsubtree="$arg_prefix" \
+ git merge --no-ff -Xsubtree="$arg_prefix" \
--message="$arg_addmerge_message" "$rev"
else
- git merge -Xsubtree="$arg_prefix" $rev
+ git merge --no-ff -Xsubtree="$arg_prefix" $rev
fi
}
diff --git a/convert.c b/convert.c
index df7186b..8e39731 100644
--- a/convert.c
+++ b/convert.c
@@ -1159,7 +1159,7 @@ static int ident_to_worktree(const char *src, size_t len,
/* are we "faking" in place editing ? */
if (src == buf->buf)
to_free = strbuf_detach(buf, NULL);
- hash_object_file(the_hash_algo, src, len, "blob", &oid);
+ hash_object_file(the_hash_algo, src, len, OBJ_BLOB, &oid);
strbuf_grow(buf, len + cnt * (the_hash_algo->hexsz + 3));
for (;;) {
@@ -1574,12 +1574,12 @@ static void null_free_fn(struct stream_filter *filter)
}
static struct stream_filter_vtbl null_vtbl = {
- null_filter_fn,
- null_free_fn,
+ .filter = null_filter_fn,
+ .free = null_free_fn,
};
static struct stream_filter null_filter_singleton = {
- &null_vtbl,
+ .vtbl = &null_vtbl,
};
int is_null_stream_filter(struct stream_filter *filter)
@@ -1683,8 +1683,8 @@ static void lf_to_crlf_free_fn(struct stream_filter *filter)
}
static struct stream_filter_vtbl lf_to_crlf_vtbl = {
- lf_to_crlf_filter_fn,
- lf_to_crlf_free_fn,
+ .filter = lf_to_crlf_filter_fn,
+ .free = lf_to_crlf_free_fn,
};
static struct stream_filter *lf_to_crlf_filter(void)
@@ -1779,8 +1779,8 @@ static void cascade_free_fn(struct stream_filter *filter)
}
static struct stream_filter_vtbl cascade_vtbl = {
- cascade_filter_fn,
- cascade_free_fn,
+ .filter = cascade_filter_fn,
+ .free = cascade_free_fn,
};
static struct stream_filter *cascade_filter(struct stream_filter *one,
@@ -1931,8 +1931,8 @@ static void ident_free_fn(struct stream_filter *filter)
}
static struct stream_filter_vtbl ident_vtbl = {
- ident_filter_fn,
- ident_free_fn,
+ .filter = ident_filter_fn,
+ .free = ident_free_fn,
};
static struct stream_filter *ident_filter(const struct object_id *oid)
diff --git a/credential.c b/credential.c
index e7240f3..f6389a5 100644
--- a/credential.c
+++ b/credential.c
@@ -130,6 +130,7 @@ static void credential_apply_config(struct credential *c)
git_config(urlmatch_config_entry, &config);
string_list_clear(&config.vars, 1);
free(normalized_url);
+ urlmatch_config_release(&config);
strbuf_release(&url);
c->configured = 1;
diff --git a/csum-file.c b/csum-file.c
index 26e8a6d..59ef339 100644
--- a/csum-file.c
+++ b/csum-file.c
@@ -58,7 +58,8 @@ static void free_hashfile(struct hashfile *f)
free(f);
}
-int finalize_hashfile(struct hashfile *f, unsigned char *result, unsigned int flags)
+int finalize_hashfile(struct hashfile *f, unsigned char *result,
+ enum fsync_component component, unsigned int flags)
{
int fd;
@@ -69,7 +70,7 @@ int finalize_hashfile(struct hashfile *f, unsigned char *result, unsigned int fl
if (flags & CSUM_HASH_IN_STREAM)
flush(f, f->buffer, the_hash_algo->rawsz);
if (flags & CSUM_FSYNC)
- fsync_or_die(f->fd, f->name);
+ fsync_component_or_die(component, f->fd, f->name);
if (flags & CSUM_CLOSE) {
if (close(f->fd))
die_errno("%s: sha1 file error on close", f->name);
diff --git a/csum-file.h b/csum-file.h
index 291215b..0d29f52 100644
--- a/csum-file.h
+++ b/csum-file.h
@@ -1,6 +1,7 @@
#ifndef CSUM_FILE_H
#define CSUM_FILE_H
+#include "cache.h"
#include "hash.h"
struct progress;
@@ -38,7 +39,7 @@ int hashfile_truncate(struct hashfile *, struct hashfile_checkpoint *);
struct hashfile *hashfd(int fd, const char *name);
struct hashfile *hashfd_check(const char *name);
struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp);
-int finalize_hashfile(struct hashfile *, unsigned char *, unsigned int);
+int finalize_hashfile(struct hashfile *, unsigned char *, enum fsync_component, unsigned int);
void hashwrite(struct hashfile *, const void *, unsigned int);
void hashflush(struct hashfile *f);
void crc32_begin(struct hashfile *);
diff --git a/date.c b/date.c
index 84bb445..68a260c 100644
--- a/date.c
+++ b/date.c
@@ -5,6 +5,7 @@
*/
#include "cache.h"
+#include "date.h"
/*
* This is like mktime, but without normalization of tm_wday and tm_yday.
@@ -205,11 +206,10 @@ void show_date_relative(timestamp_t time, struct strbuf *timebuf)
struct date_mode *date_mode_from_type(enum date_mode_type type)
{
- static struct date_mode mode;
+ static struct date_mode mode = DATE_MODE_INIT;
if (type == DATE_STRFTIME)
BUG("cannot create anonymous strftime date_mode struct");
mode.type = type;
- mode.local = 0;
return &mode;
}
@@ -993,6 +993,11 @@ void parse_date_format(const char *format, struct date_mode *mode)
die("unknown date format %s", format);
}
+void date_mode_release(struct date_mode *mode)
+{
+ free((char *)mode->strftime_fmt);
+}
+
void datestamp(struct strbuf *out)
{
time_t now;
diff --git a/date.h b/date.h
new file mode 100644
index 0000000..5d4eaba
--- /dev/null
+++ b/date.h
@@ -0,0 +1,74 @@
+#ifndef DATE_H
+#define DATE_H
+
+/**
+ * The date mode type. This has DATE_NORMAL at an explicit "= 0" to
+ * accommodate a memset([...], 0, [...]) initialization when "struct
+ * date_mode" is used as an embedded struct member, as in the case of
+ * e.g. "struct pretty_print_context" and "struct rev_info".
+ */
+enum date_mode_type {
+ DATE_NORMAL = 0,
+ DATE_HUMAN,
+ DATE_RELATIVE,
+ DATE_SHORT,
+ DATE_ISO8601,
+ DATE_ISO8601_STRICT,
+ DATE_RFC2822,
+ DATE_STRFTIME,
+ DATE_RAW,
+ DATE_UNIX
+};
+
+struct date_mode {
+ enum date_mode_type type;
+ const char *strftime_fmt;
+ int local;
+};
+
+#define DATE_MODE_INIT { \
+ .type = DATE_NORMAL, \
+}
+
+/**
+ * Convenience helper for passing a constant type, like:
+ *
+ * show_date(t, tz, DATE_MODE(NORMAL));
+ */
+#define DATE_MODE(t) date_mode_from_type(DATE_##t)
+struct date_mode *date_mode_from_type(enum date_mode_type type);
+
+/**
+ * Format <'time', 'timezone'> into static memory according to 'mode'
+ * and return it. The mode is an initialized "struct date_mode"
+ * (usually from the DATE_MODE() macro).
+ */
+const char *show_date(timestamp_t time, int timezone, const struct date_mode *mode);
+
+/**
+ * Parse a date format for later use with show_date().
+ *
+ * When the "date_mode_type" is DATE_STRFTIME the "strftime_fmt"
+ * member of "struct date_mode" will be a malloc()'d format string to
+ * be used with strbuf_addftime(), in which case you'll need to call
+ * date_mode_release() later.
+ */
+void parse_date_format(const char *format, struct date_mode *mode);
+
+/**
+ * Release a "struct date_mode", currently only required if
+ * parse_date_format() has parsed a "DATE_STRFTIME" format.
+ */
+void date_mode_release(struct date_mode *mode);
+
+void show_date_relative(timestamp_t time, struct strbuf *timebuf);
+int parse_date(const char *date, struct strbuf *out);
+int parse_date_basic(const char *date, timestamp_t *timestamp, int *offset);
+int parse_expiry_date(const char *date, timestamp_t *timestamp);
+void datestamp(struct strbuf *out);
+#define approxidate(s) approxidate_careful((s), NULL)
+timestamp_t approxidate_careful(const char *, int *);
+timestamp_t approxidate_relative(const char *date);
+int date_overflows(timestamp_t date);
+time_t tm_to_time_t(const struct tm *tm);
+#endif
diff --git a/diff-merges.c b/diff-merges.c
index 5060ccd..7f64156 100644
--- a/diff-merges.c
+++ b/diff-merges.c
@@ -17,12 +17,14 @@ static void suppress(struct rev_info *revs)
revs->combined_all_paths = 0;
revs->merges_imply_patch = 0;
revs->merges_need_diff = 0;
+ revs->remerge_diff = 0;
}
static void set_separate(struct rev_info *revs)
{
suppress(revs);
revs->separate_merges = 1;
+ revs->simplify_history = 0;
}
static void set_first_parent(struct rev_info *revs)
@@ -45,6 +47,13 @@ static void set_dense_combined(struct rev_info *revs)
revs->dense_combined_merges = 1;
}
+static void set_remerge_diff(struct rev_info *revs)
+{
+ suppress(revs);
+ revs->remerge_diff = 1;
+ revs->simplify_history = 0;
+}
+
static diff_merges_setup_func_t func_by_opt(const char *optarg)
{
if (!strcmp(optarg, "off") || !strcmp(optarg, "none"))
@@ -57,6 +66,8 @@ static diff_merges_setup_func_t func_by_opt(const char *optarg)
return set_combined;
else if (!strcmp(optarg, "cc") || !strcmp(optarg, "dense-combined"))
return set_dense_combined;
+ else if (!strcmp(optarg, "r") || !strcmp(optarg, "remerge"))
+ return set_remerge_diff;
else if (!strcmp(optarg, "m") || !strcmp(optarg, "on"))
return set_to_default;
return NULL;
@@ -67,7 +78,7 @@ static void set_diff_merges(struct rev_info *revs, const char *optarg)
diff_merges_setup_func_t func = func_by_opt(optarg);
if (!func)
- die(_("unknown value for --diff-merges: %s"), optarg);
+ die(_("invalid value for '%s': '%s'"), "--diff-merges", optarg);
func(revs);
@@ -110,6 +121,9 @@ int diff_merges_parse_opts(struct rev_info *revs, const char **argv)
} else if (!strcmp(arg, "--cc")) {
set_dense_combined(revs);
revs->merges_imply_patch = 1;
+ } else if (!strcmp(arg, "--remerge-diff")) {
+ set_remerge_diff(revs);
+ revs->merges_imply_patch = 1;
} else if (!strcmp(arg, "--no-diff-merges")) {
suppress(revs);
} else if (!strcmp(arg, "--combined-all-paths")) {
diff --git a/diff.c b/diff.c
index c862771..ef71599 100644
--- a/diff.c
+++ b/diff.c
@@ -28,6 +28,7 @@
#include "help.h"
#include "promisor-remote.h"
#include "dir.h"
+#include "strmap.h"
#ifdef NO_FAST_WORKING_DIRECTORY
#define FAST_WORKING_DIRECTORY 0
@@ -799,6 +800,14 @@ static void append_emitted_diff_symbol(struct diff_options *o,
f->line = e->line ? xmemdupz(e->line, e->len) : NULL;
}
+static void free_emitted_diff_symbols(struct emitted_diff_symbols *e)
+{
+ if (!e)
+ return;
+ free(e->buf);
+ free(e);
+}
+
struct moved_entry {
const struct emitted_diff_symbol *es;
struct moved_entry *next_line;
@@ -2226,7 +2235,7 @@ static void free_diff_words_data(struct emit_callback *ecbdata)
{
if (ecbdata->diff_words) {
diff_words_flush(ecbdata);
- free (ecbdata->diff_words->opt->emitted_symbols);
+ free_emitted_diff_symbols(ecbdata->diff_words->opt->emitted_symbols);
free (ecbdata->diff_words->opt);
free (ecbdata->diff_words->minus.text.ptr);
free (ecbdata->diff_words->minus.orig);
@@ -3353,6 +3362,31 @@ struct userdiff_driver *get_textconv(struct repository *r,
return userdiff_get_textconv(r, one->driver);
}
+static struct strbuf *additional_headers(struct diff_options *o,
+ const char *path)
+{
+ if (!o->additional_path_headers)
+ return NULL;
+ return strmap_get(o->additional_path_headers, path);
+}
+
+static void add_formatted_headers(struct strbuf *msg,
+ struct strbuf *more_headers,
+ const char *line_prefix,
+ const char *meta,
+ const char *reset)
+{
+ char *next, *newline;
+
+ for (next = more_headers->buf; *next; next = newline) {
+ newline = strchrnul(next, '\n');
+ strbuf_addf(msg, "%s%s%.*s%s\n", line_prefix, meta,
+ (int)(newline - next), next, reset);
+ if (*newline)
+ newline++;
+ }
+}
+
static void builtin_diff(const char *name_a,
const char *name_b,
struct diff_filespec *one,
@@ -3411,6 +3445,17 @@ static void builtin_diff(const char *name_a,
b_two = quote_two(b_prefix, name_b + (*name_b == '/'));
lbl[0] = DIFF_FILE_VALID(one) ? a_one : "/dev/null";
lbl[1] = DIFF_FILE_VALID(two) ? b_two : "/dev/null";
+ if (!DIFF_FILE_VALID(one) && !DIFF_FILE_VALID(two)) {
+ /*
+ * We should only reach this point for pairs from
+ * create_filepairs_for_header_only_notifications(). For
+ * these, we should avoid the "/dev/null" special casing
+ * above, meaning we avoid showing such pairs as either
+ * "new file" or "deleted file" below.
+ */
+ lbl[0] = a_one;
+ lbl[1] = b_two;
+ }
strbuf_addf(&header, "%s%sdiff --git %s %s%s\n", line_prefix, meta, a_one, b_two, reset);
if (lbl[0][0] == '/') {
/* /dev/null */
@@ -4275,6 +4320,7 @@ static void fill_metainfo(struct strbuf *msg,
const char *set = diff_get_color(use_color, DIFF_METAINFO);
const char *reset = diff_get_color(use_color, DIFF_RESET);
const char *line_prefix = diff_line_prefix(o);
+ struct strbuf *more_headers = NULL;
*must_show_header = 1;
strbuf_init(msg, PATH_MAX * 2 + 300);
@@ -4311,6 +4357,11 @@ static void fill_metainfo(struct strbuf *msg,
default:
*must_show_header = 0;
}
+ if ((more_headers = additional_headers(o, name))) {
+ add_formatted_headers(msg, more_headers,
+ line_prefix, set, reset);
+ *must_show_header = 1;
+ }
if (one && two && !oideq(&one->oid, &two->oid)) {
const unsigned hexsz = the_hash_algo->hexsz;
int abbrev = o->abbrev ? o->abbrev : DEFAULT_ABBREV;
@@ -4570,6 +4621,43 @@ void repo_diff_setup(struct repository *r, struct diff_options *options)
prep_parse_options(options);
}
+static const char diff_status_letters[] = {
+ DIFF_STATUS_ADDED,
+ DIFF_STATUS_COPIED,
+ DIFF_STATUS_DELETED,
+ DIFF_STATUS_MODIFIED,
+ DIFF_STATUS_RENAMED,
+ DIFF_STATUS_TYPE_CHANGED,
+ DIFF_STATUS_UNKNOWN,
+ DIFF_STATUS_UNMERGED,
+ DIFF_STATUS_FILTER_AON,
+ DIFF_STATUS_FILTER_BROKEN,
+ '\0',
+};
+
+static unsigned int filter_bit['Z' + 1];
+
+static void prepare_filter_bits(void)
+{
+ int i;
+
+ if (!filter_bit[DIFF_STATUS_ADDED]) {
+ for (i = 0; diff_status_letters[i]; i++)
+ filter_bit[(int) diff_status_letters[i]] = (1 << i);
+ }
+}
+
+static unsigned filter_bit_tst(char status, const struct diff_options *opt)
+{
+ return opt->filter & filter_bit[(int) status];
+}
+
+unsigned diff_filter_bit(char status)
+{
+ prepare_filter_bits();
+ return filter_bit[(int) status];
+}
+
void diff_setup_done(struct diff_options *options)
{
unsigned check_mask = DIFF_FORMAT_NAME |
@@ -4683,6 +4771,12 @@ void diff_setup_done(struct diff_options *options)
if (!options->use_color || external_diff())
options->color_moved = 0;
+ if (options->filter_not) {
+ if (!options->filter)
+ options->filter = ~filter_bit[DIFF_STATUS_FILTER_AON];
+ options->filter &= ~options->filter_not;
+ }
+
FREE_AND_NULL(options->parseopts);
}
@@ -4774,43 +4868,6 @@ static int parse_dirstat_opt(struct diff_options *options, const char *params)
return 1;
}
-static const char diff_status_letters[] = {
- DIFF_STATUS_ADDED,
- DIFF_STATUS_COPIED,
- DIFF_STATUS_DELETED,
- DIFF_STATUS_MODIFIED,
- DIFF_STATUS_RENAMED,
- DIFF_STATUS_TYPE_CHANGED,
- DIFF_STATUS_UNKNOWN,
- DIFF_STATUS_UNMERGED,
- DIFF_STATUS_FILTER_AON,
- DIFF_STATUS_FILTER_BROKEN,
- '\0',
-};
-
-static unsigned int filter_bit['Z' + 1];
-
-static void prepare_filter_bits(void)
-{
- int i;
-
- if (!filter_bit[DIFF_STATUS_ADDED]) {
- for (i = 0; diff_status_letters[i]; i++)
- filter_bit[(int) diff_status_letters[i]] = (1 << i);
- }
-}
-
-static unsigned filter_bit_tst(char status, const struct diff_options *opt)
-{
- return opt->filter & filter_bit[(int) status];
-}
-
-unsigned diff_filter_bit(char status)
-{
- prepare_filter_bits();
- return filter_bit[(int) status];
-}
-
static int diff_opt_diff_filter(const struct option *option,
const char *optarg, int unset)
{
@@ -4820,21 +4877,6 @@ static int diff_opt_diff_filter(const struct option *option,
BUG_ON_OPT_NEG(unset);
prepare_filter_bits();
- /*
- * If there is a negation e.g. 'd' in the input, and we haven't
- * initialized the filter field with another --diff-filter, start
- * from full set of bits, except for AON.
- */
- if (!opt->filter) {
- for (i = 0; (optch = optarg[i]) != '\0'; i++) {
- if (optch < 'a' || 'z' < optch)
- continue;
- opt->filter = (1 << (ARRAY_SIZE(diff_status_letters) - 1)) - 1;
- opt->filter &= ~filter_bit[DIFF_STATUS_FILTER_AON];
- break;
- }
- }
-
for (i = 0; (optch = optarg[i]) != '\0'; i++) {
unsigned int bit;
int negate;
@@ -4851,7 +4893,7 @@ static int diff_opt_diff_filter(const struct option *option,
return error(_("unknown change class '%c' in --diff-filter=%s"),
optarg[i], optarg);
if (negate)
- opt->filter &= ~bit;
+ opt->filter_not |= bit;
else
opt->filter |= bit;
}
@@ -5596,7 +5638,7 @@ static void prep_parse_options(struct diff_options *options)
N_("select files by diff type"),
PARSE_OPT_NONEG, diff_opt_diff_filter),
{ OPTION_CALLBACK, 0, "output", options, N_("<file>"),
- N_("Output to a specific file"),
+ N_("output to a specific file"),
PARSE_OPT_NONEG, NULL, 0, diff_opt_output },
OPT_END()
@@ -5803,12 +5845,27 @@ int diff_unmodified_pair(struct diff_filepair *p)
static void diff_flush_patch(struct diff_filepair *p, struct diff_options *o)
{
- if (diff_unmodified_pair(p))
+ int include_conflict_headers =
+ (additional_headers(o, p->one->path) &&
+ (!o->filter || filter_bit_tst(DIFF_STATUS_UNMERGED, o)));
+
+ /*
+ * Check if we can return early without showing a diff. Note that
+ * diff_filepair only stores {oid, path, mode, is_valid}
+ * information for each path, and thus diff_unmodified_pair() only
+ * considers those bits of info. However, we do not want pairs
+ * created by create_filepairs_for_header_only_notifications()
+ * (which always look like unmodified pairs) to be ignored, so
+ * return early if both p is unmodified AND we don't want to
+ * include_conflict_headers.
+ */
+ if (diff_unmodified_pair(p) && !include_conflict_headers)
return;
+ /* Actually, we can also return early to avoid showing tree diffs */
if ((DIFF_FILE_VALID(p->one) && S_ISDIR(p->one->mode)) ||
(DIFF_FILE_VALID(p->two) && S_ISDIR(p->two->mode)))
- return; /* no tree diffs in patch format */
+ return;
run_diff(p, o);
}
@@ -5839,10 +5896,17 @@ static void diff_flush_checkdiff(struct diff_filepair *p,
run_checkdiff(p, o);
}
-int diff_queue_is_empty(void)
+int diff_queue_is_empty(struct diff_options *o)
{
struct diff_queue_struct *q = &diff_queued_diff;
int i;
+ int include_conflict_headers =
+ (o->additional_path_headers &&
+ (!o->filter || filter_bit_tst(DIFF_STATUS_UNMERGED, o)));
+
+ if (include_conflict_headers)
+ return 0;
+
for (i = 0; i < q->nr; i++)
if (!diff_unmodified_pair(q->queue[i]))
return 0;
@@ -6276,6 +6340,54 @@ void diff_warn_rename_limit(const char *varname, int needed, int degraded_cc)
warning(_(rename_limit_advice), varname, needed);
}
+static void create_filepairs_for_header_only_notifications(struct diff_options *o)
+{
+ struct strset present;
+ struct diff_queue_struct *q = &diff_queued_diff;
+ struct hashmap_iter iter;
+ struct strmap_entry *e;
+ int i;
+
+ strset_init_with_options(&present, /*pool*/ NULL, /*strdup*/ 0);
+
+ /*
+ * Find out which paths exist in diff_queued_diff, preferring
+ * one->path for any pair that has multiple paths.
+ */
+ for (i = 0; i < q->nr; i++) {
+ struct diff_filepair *p = q->queue[i];
+ char *path = p->one->path ? p->one->path : p->two->path;
+
+ if (strmap_contains(o->additional_path_headers, path))
+ strset_add(&present, path);
+ }
+
+ /*
+ * Loop over paths in additional_path_headers; for each NOT already
+ * in diff_queued_diff, create a synthetic filepair and insert that
+ * into diff_queued_diff.
+ */
+ strmap_for_each_entry(o->additional_path_headers, &iter, e) {
+ if (!strset_contains(&present, e->key)) {
+ struct diff_filespec *one, *two;
+ struct diff_filepair *p;
+
+ one = alloc_filespec(e->key);
+ two = alloc_filespec(e->key);
+ fill_filespec(one, null_oid(), 0, 0);
+ fill_filespec(two, null_oid(), 0, 0);
+ p = diff_queue(q, one, two);
+ p->status = DIFF_STATUS_MODIFIED;
+ }
+ }
+
+ /* Re-sort the filepairs */
+ diffcore_fix_diff_index();
+
+ /* Cleanup */
+ strset_clear(&present);
+}
+
static void diff_flush_patch_all_file_pairs(struct diff_options *o)
{
int i;
@@ -6288,6 +6400,9 @@ static void diff_flush_patch_all_file_pairs(struct diff_options *o)
if (o->color_moved)
o->emitted_symbols = &esm;
+ if (o->additional_path_headers)
+ create_filepairs_for_header_only_notifications(o);
+
for (i = 0; i < q->nr; i++) {
struct diff_filepair *p = q->queue[i];
if (check_pair_status(p))
@@ -6345,6 +6460,8 @@ void diff_free(struct diff_options *options)
diff_free_file(options);
diff_free_ignore_regex(options);
+ clear_pathspec(&options->pathspec);
+ FREE_AND_NULL(options->parseopts);
}
void diff_flush(struct diff_options *options)
@@ -6358,7 +6475,7 @@ void diff_flush(struct diff_options *options)
* Order: raw, stat, summary, patch
* or: name/name-status/checkdiff (other bits clear)
*/
- if (!q->nr)
+ if (!q->nr && !options->additional_path_headers)
goto free_queue;
if (output_format & (DIFF_FORMAT_RAW |
diff --git a/diff.h b/diff.h
index 8ba85c5..8ae18e5 100644
--- a/diff.h
+++ b/diff.h
@@ -283,7 +283,7 @@ struct diff_options {
struct diff_flags flags;
/* diff-filter bits */
- unsigned int filter;
+ unsigned int filter, filter_not;
int use_color;
@@ -395,6 +395,7 @@ struct diff_options {
struct repository *repo;
struct option *parseopts;
+ struct strmap *additional_path_headers;
int no_free;
};
@@ -593,7 +594,7 @@ void diffcore_fix_diff_index(void);
" show all files diff when -S is used and hit is found.\n" \
" -a --text treat all files as text.\n"
-int diff_queue_is_empty(void);
+int diff_queue_is_empty(struct diff_options *o);
void diff_flush(struct diff_options*);
void diff_free(struct diff_options*);
void diff_warn_rename_limit(const char *varname, int needed, int degraded_cc);
diff --git a/diffcore-rename.c b/diffcore-rename.c
index bebd4ed..c0422d9 100644
--- a/diffcore-rename.c
+++ b/diffcore-rename.c
@@ -261,7 +261,7 @@ static unsigned int hash_filespec(struct repository *r,
if (diff_populate_filespec(r, filespec, NULL))
return 0;
hash_object_file(r->hash_algo, filespec->data, filespec->size,
- "blob", &filespec->oid);
+ OBJ_BLOB, &filespec->oid);
}
return oidhash(&filespec->oid);
}
diff --git a/dir.c b/dir.c
index d91295f..f2b0f24 100644
--- a/dir.c
+++ b/dir.c
@@ -1113,7 +1113,7 @@ static int add_patterns(const char *fname, const char *base, int baselen,
&istate->cache[pos]->oid);
else
hash_object_file(the_hash_algo, buf, size,
- "blob", &oid_stat->oid);
+ OBJ_BLOB, &oid_stat->oid);
fill_stat_data(&oid_stat->stat, &st);
oid_stat->valid = 1;
}
@@ -1463,10 +1463,11 @@ static int path_in_sparse_checkout_1(const char *path,
const char *end, *slash;
/*
- * We default to accepting a path if there are no patterns or
- * they are of the wrong type.
+ * We default to accepting a path if the path is empty, there are no
+ * patterns, or the patterns are of the wrong type.
*/
- if (init_sparse_checkout_patterns(istate) ||
+ if (!*path ||
+ init_sparse_checkout_patterns(istate) ||
(require_cone_mode &&
!istate->sparse_checkout_patterns->use_cone_patterns))
return 1;
@@ -2781,7 +2782,8 @@ void remove_untracked_cache(struct index_state *istate)
static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *dir,
int base_len,
- const struct pathspec *pathspec)
+ const struct pathspec *pathspec,
+ struct index_state *istate)
{
struct untracked_cache_dir *root;
static int untracked_cache_disabled = -1;
@@ -2845,8 +2847,11 @@ static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *d
return NULL;
}
- if (!dir->untracked->root)
+ if (!dir->untracked->root) {
+ /* Untracked cache existed but is not initialized; fix that */
FLEX_ALLOC_STR(dir->untracked->root, name, "");
+ istate->cache_changed |= UNTRACKED_CHANGED;
+ }
/* Validate $GIT_DIR/info/exclude and core.excludesfile */
root = dir->untracked->root;
@@ -2916,7 +2921,7 @@ int read_directory(struct dir_struct *dir, struct index_state *istate,
return dir->nr;
}
- untracked = validate_untracked_cache(dir, len, pathspec);
+ untracked = validate_untracked_cache(dir, len, pathspec, istate);
if (!untracked)
/*
* make sure untracked cache code path is disabled,
@@ -2936,7 +2941,9 @@ int read_directory(struct dir_struct *dir, struct index_state *istate,
if (force_untracked_cache < 0)
force_untracked_cache =
- git_env_bool("GIT_FORCE_UNTRACKED_CACHE", 0);
+ git_env_bool("GIT_FORCE_UNTRACKED_CACHE", -1);
+ if (force_untracked_cache < 0)
+ force_untracked_cache = (istate->repo->settings.core_untracked_cache == UNTRACKED_CACHE_WRITE);
if (force_untracked_cache &&
dir->untracked == istate->untracked &&
(dir->untracked->dir_opened ||
diff --git a/environment.c b/environment.c
index fd0501e..5bff1b3 100644
--- a/environment.c
+++ b/environment.c
@@ -42,8 +42,10 @@ const char *git_attributes_file;
const char *git_hooks_path;
int zlib_compression_level = Z_BEST_SPEED;
int pack_compression_level = Z_DEFAULT_COMPRESSION;
-int fsync_object_files;
+int fsync_object_files = -1;
int use_fsync = -1;
+enum fsync_method fsync_method = FSYNC_METHOD_DEFAULT;
+enum fsync_component fsync_components = FSYNC_COMPONENTS_DEFAULT;
size_t packed_git_window_size = DEFAULT_PACKED_GIT_WINDOW_SIZE;
size_t packed_git_limit = DEFAULT_PACKED_GIT_LIMIT;
size_t delta_base_cache_limit = 96 * 1024 * 1024;
@@ -70,6 +72,7 @@ char *notes_ref_name;
int grafts_replace_parents = 1;
int core_apply_sparse_checkout;
int core_sparse_checkout_cone;
+int sparse_expect_files_outside_of_patterns;
int merge_log_config = -1;
int precomposed_unicode = -1; /* see probe_utf8_pathname_composition() */
unsigned long pack_size_limit_cfg;
@@ -84,7 +87,6 @@ int protect_hfs = PROTECT_HFS_DEFAULT;
#define PROTECT_NTFS_DEFAULT 1
#endif
int protect_ntfs = PROTECT_NTFS_DEFAULT;
-const char *core_fsmonitor;
/*
* The character that begins a commented line in user-editable file
diff --git a/fetch-negotiator.c b/fetch-negotiator.c
index 2733902..be38336 100644
--- a/fetch-negotiator.c
+++ b/fetch-negotiator.c
@@ -18,8 +18,13 @@ void fetch_negotiator_init(struct repository *r,
noop_negotiator_init(negotiator);
return;
- case FETCH_NEGOTIATION_DEFAULT:
+ case FETCH_NEGOTIATION_CONSECUTIVE:
default_negotiator_init(negotiator);
return;
}
}
+
+void fetch_negotiator_init_noop(struct fetch_negotiator *negotiator)
+{
+ noop_negotiator_init(negotiator);
+}
diff --git a/fetch-negotiator.h b/fetch-negotiator.h
index ea78868..e348905 100644
--- a/fetch-negotiator.h
+++ b/fetch-negotiator.h
@@ -53,7 +53,15 @@ struct fetch_negotiator {
void *data;
};
+/*
+ * Initialize a negotiator based on the repository settings.
+ */
void fetch_negotiator_init(struct repository *r,
struct fetch_negotiator *negotiator);
+/*
+ * Initialize a noop negotiator.
+ */
+void fetch_negotiator_init_noop(struct fetch_negotiator *negotiator);
+
#endif
diff --git a/fetch-pack.c b/fetch-pack.c
index dd6ec44..4e1e88e 100644
--- a/fetch-pack.c
+++ b/fetch-pack.c
@@ -312,19 +312,21 @@ static int find_common(struct fetch_negotiator *negotiator,
const char *remote_hex;
struct object *o;
- /*
- * If that object is complete (i.e. it is an ancestor of a
- * local ref), we tell them we have it but do not have to
- * tell them about its ancestors, which they already know
- * about.
- *
- * We use lookup_object here because we are only
- * interested in the case we *know* the object is
- * reachable and we have already scanned it.
- */
- if (((o = lookup_object(the_repository, remote)) != NULL) &&
- (o->flags & COMPLETE)) {
- continue;
+ if (!args->refetch) {
+ /*
+ * If that object is complete (i.e. it is an ancestor of a
+ * local ref), we tell them we have it but do not have to
+ * tell them about its ancestors, which they already know
+ * about.
+ *
+ * We use lookup_object here because we are only
+ * interested in the case we *know* the object is
+ * reachable and we have already scanned it.
+ */
+ if (((o = lookup_object(the_repository, remote)) != NULL) &&
+ (o->flags & COMPLETE)) {
+ continue;
+ }
}
remote_hex = oid_to_hex(remote);
@@ -692,30 +694,37 @@ static void mark_complete_and_common_ref(struct fetch_negotiator *negotiator,
int old_save_commit_buffer = save_commit_buffer;
timestamp_t cutoff = 0;
+ if (args->refetch)
+ return;
+
save_commit_buffer = 0;
trace2_region_enter("fetch-pack", "parse_remote_refs_and_find_cutoff", NULL);
for (ref = *refs; ref; ref = ref->next) {
- struct object *o;
+ struct commit *commit;
+
+ commit = lookup_commit_in_graph(the_repository, &ref->old_oid);
+ if (!commit) {
+ struct object *o;
- if (!has_object_file_with_flags(&ref->old_oid,
+ if (!has_object_file_with_flags(&ref->old_oid,
OBJECT_INFO_QUICK |
- OBJECT_INFO_SKIP_FETCH_OBJECT))
- continue;
- o = parse_object(the_repository, &ref->old_oid);
- if (!o)
- continue;
+ OBJECT_INFO_SKIP_FETCH_OBJECT))
+ continue;
+ o = parse_object(the_repository, &ref->old_oid);
+ if (!o || o->type != OBJ_COMMIT)
+ continue;
+
+ commit = (struct commit *)o;
+ }
/*
* We already have it -- which may mean that we were
* in sync with the other side at some time after
* that (it is OK if we guess wrong here).
*/
- if (o->type == OBJ_COMMIT) {
- struct commit *commit = (struct commit *)o;
- if (!cutoff || cutoff < commit->date)
- cutoff = commit->date;
- }
+ if (!cutoff || cutoff < commit->date)
+ cutoff = commit->date;
}
trace2_region_leave("fetch-pack", "parse_remote_refs_and_find_cutoff", NULL);
@@ -1024,7 +1033,11 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
struct fetch_negotiator *negotiator;
negotiator = &negotiator_alloc;
- fetch_negotiator_init(r, negotiator);
+ if (args->refetch) {
+ fetch_negotiator_init_noop(negotiator);
+ } else {
+ fetch_negotiator_init(r, negotiator);
+ }
sort_ref_list(&ref, ref_compare_name);
QSORT(sought, nr_sought, cmp_ref_by_name);
@@ -1117,7 +1130,7 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
mark_complete_and_common_ref(negotiator, args, &ref);
filter_refs(args, &ref, sought, nr_sought);
- if (everything_local(args, &ref)) {
+ if (!args->refetch && everything_local(args, &ref)) {
packet_flush(fd[1]);
goto all_done;
}
@@ -1415,9 +1428,17 @@ static int process_ack(struct fetch_negotiator *negotiator,
* otherwise.
*/
if (*received_ready && reader->status != PACKET_READ_DELIM)
- die(_("expected packfile to be sent after 'ready'"));
+ /*
+ * TRANSLATORS: The parameter will be 'ready', a protocol
+ * keyword.
+ */
+ die(_("expected packfile to be sent after '%s'"), "ready");
if (!*received_ready && reader->status != PACKET_READ_FLUSH)
- die(_("expected no other sections to be sent after no 'ready'"));
+ /*
+ * TRANSLATORS: The parameter will be 'ready', a protocol
+ * keyword.
+ */
+ die(_("expected no other sections to be sent after no '%s'"), "ready");
return 0;
}
@@ -1575,7 +1596,10 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
struct strvec index_pack_args = STRVEC_INIT;
negotiator = &negotiator_alloc;
- fetch_negotiator_init(r, negotiator);
+ if (args->refetch)
+ fetch_negotiator_init_noop(negotiator);
+ else
+ fetch_negotiator_init(r, negotiator);
packet_reader_init(&reader, fd[0], NULL, 0,
PACKET_READ_CHOMP_NEWLINE |
@@ -1601,7 +1625,7 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
/* Filter 'ref' by 'sought' and those that aren't local */
mark_complete_and_common_ref(negotiator, args, &ref);
filter_refs(args, &ref, sought, nr_sought);
- if (everything_local(args, &ref))
+ if (!args->refetch && everything_local(args, &ref))
state = FETCH_DONE;
else
state = FETCH_SEND_REQUEST;
diff --git a/fetch-pack.h b/fetch-pack.h
index 7f94a2a..8c7752f 100644
--- a/fetch-pack.h
+++ b/fetch-pack.h
@@ -42,6 +42,7 @@ struct fetch_pack_args {
unsigned update_shallow:1;
unsigned reject_shallow_remote:1;
unsigned deepen:1;
+ unsigned refetch:1;
/*
* Indicate that the remote of this request is a promisor remote. The
diff --git a/fsmonitor--daemon.h b/fsmonitor--daemon.h
new file mode 100644
index 0000000..bd09fff
--- /dev/null
+++ b/fsmonitor--daemon.h
@@ -0,0 +1,166 @@
+#ifndef FSMONITOR_DAEMON_H
+#define FSMONITOR_DAEMON_H
+
+#ifdef HAVE_FSMONITOR_DAEMON_BACKEND
+
+#include "cache.h"
+#include "dir.h"
+#include "run-command.h"
+#include "simple-ipc.h"
+#include "thread-utils.h"
+
+struct fsmonitor_batch;
+struct fsmonitor_token_data;
+
+/*
+ * Create a new batch of path(s). The returned batch is considered
+ * private and not linked into the fsmonitor daemon state. The caller
+ * should fill this batch with one or more paths and then publish it.
+ */
+struct fsmonitor_batch *fsmonitor_batch__new(void);
+
+/*
+ * Free the list of batches starting with this one.
+ */
+void fsmonitor_batch__free_list(struct fsmonitor_batch *batch);
+
+/*
+ * Add this path to this batch of modified files.
+ *
+ * The batch should be private and NOT (yet) linked into the fsmonitor
+ * daemon state and therefore not yet visible to worker threads and so
+ * no locking is required.
+ */
+void fsmonitor_batch__add_path(struct fsmonitor_batch *batch, const char *path);
+
+struct fsmonitor_daemon_backend_data; /* opaque platform-specific data */
+
+struct fsmonitor_daemon_state {
+ pthread_t listener_thread;
+ pthread_mutex_t main_lock;
+
+ struct strbuf path_worktree_watch;
+ struct strbuf path_gitdir_watch;
+ int nr_paths_watching;
+
+ struct fsmonitor_token_data *current_token_data;
+
+ struct strbuf path_cookie_prefix;
+ pthread_cond_t cookies_cond;
+ int cookie_seq;
+ struct hashmap cookies;
+
+ int error_code;
+ struct fsmonitor_daemon_backend_data *backend_data;
+
+ struct ipc_server_data *ipc_server_data;
+};
+
+/*
+ * Pathname classifications.
+ *
+ * The daemon classifies the pathnames that it receives from file
+ * system notification events into the following categories and uses
+ * that to decide whether clients are told about them. (And to watch
+ * for file system synchronization events.)
+ *
+ * The daemon only collects and reports on the set of modified paths
+ * within the working directory (proper).
+ *
+ * The client should only care about paths within the working
+ * directory proper (inside the working directory and not ".git" nor
+ * inside of ".git/"). That is, the client has read the index and is
+ * asking for a list of any paths in the working directory that have
+ * been modified since the last token. The client does not care about
+ * file system changes within the ".git/" directory (such as new loose
+ * objects or packfiles). So the client will only receive paths that
+ * are classified as IS_WORKDIR_PATH.
+ *
+ * Note that ".git" is usually a directory and is therefore inside
+ * the cone of the FS watch that we have on the working directory root,
+ * so we will also get FS events for disk activity on and within ".git/"
+ * that we need to respond to or filter from the client.
+ *
+ * But Git also allows ".git" to be a *file* that points to a GITDIR
+ * outside of the working directory. When this happens, we need to
+ * create FS watches on both the working directory root *and* on the
+ * (external) GITDIR root. (The latter is required because we put
+ * cookie files inside it and use them to sync with the FS event
+ * stream.)
+ *
+ * Note that in the context of this discussion, I'm using "GITDIR"
+ * to only mean an external GITDIR referenced by a ".git" file.
+ *
+ * The platform FS event backends will receive watch-specific
+ * relative paths (except for those OS's that always emit absolute
+ * paths). We use the following enum and routines to classify each
+ * path so that we know how to handle it. There is a slight asymmetry
+ * here because ".git/" is inside the working directory and the
+ * (external) GITDIR is not, and therefore how we handle events may
+ * vary slightly, so I have different enums for "IS...DOT_GIT..." and
+ * "IS...GITDIR...".
+ *
+ * The daemon uses the IS_DOT_GIT and IS_GITDIR internally to mean the
+ * exact ".git" file/directory or GITDIR directory. If the daemon
+ * receives a delete event for either of these paths, it will
+ * automatically shutdown, for example.
+ *
+ * Note that the daemon DOES NOT explicitly watch nor special case the
+ * index. The daemon does not read the index nor have any internal
+ * index-relative state, so there are no "IS...INDEX..." enum values.
+ */
+enum fsmonitor_path_type {
+ IS_WORKDIR_PATH = 0,
+
+ IS_DOT_GIT,
+ IS_INSIDE_DOT_GIT,
+ IS_INSIDE_DOT_GIT_WITH_COOKIE_PREFIX,
+
+ IS_GITDIR,
+ IS_INSIDE_GITDIR,
+ IS_INSIDE_GITDIR_WITH_COOKIE_PREFIX,
+
+ IS_OUTSIDE_CONE,
+};
+
+/*
+ * Classify a pathname relative to the root of the working directory.
+ */
+enum fsmonitor_path_type fsmonitor_classify_path_workdir_relative(
+ const char *relative_path);
+
+/*
+ * Classify a pathname relative to a <gitdir> that is external to the
+ * worktree directory.
+ */
+enum fsmonitor_path_type fsmonitor_classify_path_gitdir_relative(
+ const char *relative_path);
+
+/*
+ * Classify an absolute pathname received from a filesystem event.
+ */
+enum fsmonitor_path_type fsmonitor_classify_path_absolute(
+ struct fsmonitor_daemon_state *state,
+ const char *path);
+
+/*
+ * Prepend the this batch of path(s) onto the list of batches associated
+ * with the current token. This makes the batch visible to worker threads.
+ *
+ * The caller no longer owns the batch and must not free it.
+ *
+ * Wake up the client threads waiting on these cookies.
+ */
+void fsmonitor_publish(struct fsmonitor_daemon_state *state,
+ struct fsmonitor_batch *batch,
+ const struct string_list *cookie_names);
+
+/*
+ * If the platform-specific layer loses sync with the filesystem,
+ * it should call this to invalidate cached data and abort waiting
+ * threads.
+ */
+void fsmonitor_force_resync(struct fsmonitor_daemon_state *state);
+
+#endif /* HAVE_FSMONITOR_DAEMON_BACKEND */
+#endif /* FSMONITOR_DAEMON_H */
diff --git a/fsmonitor-ipc.c b/fsmonitor-ipc.c
new file mode 100644
index 0000000..789e739
--- /dev/null
+++ b/fsmonitor-ipc.c
@@ -0,0 +1,171 @@
+#include "cache.h"
+#include "fsmonitor.h"
+#include "simple-ipc.h"
+#include "fsmonitor-ipc.h"
+#include "run-command.h"
+#include "strbuf.h"
+#include "trace2.h"
+
+#ifndef HAVE_FSMONITOR_DAEMON_BACKEND
+
+/*
+ * A trivial implementation of the fsmonitor_ipc__ API for unsupported
+ * platforms.
+ */
+
+int fsmonitor_ipc__is_supported(void)
+{
+ return 0;
+}
+
+const char *fsmonitor_ipc__get_path(void)
+{
+ return NULL;
+}
+
+enum ipc_active_state fsmonitor_ipc__get_state(void)
+{
+ return IPC_STATE__OTHER_ERROR;
+}
+
+int fsmonitor_ipc__send_query(const char *since_token,
+ struct strbuf *answer)
+{
+ return -1;
+}
+
+int fsmonitor_ipc__send_command(const char *command,
+ struct strbuf *answer)
+{
+ return -1;
+}
+
+#else
+
+int fsmonitor_ipc__is_supported(void)
+{
+ return 1;
+}
+
+GIT_PATH_FUNC(fsmonitor_ipc__get_path, "fsmonitor--daemon.ipc")
+
+enum ipc_active_state fsmonitor_ipc__get_state(void)
+{
+ return ipc_get_active_state(fsmonitor_ipc__get_path());
+}
+
+static int spawn_daemon(void)
+{
+ const char *args[] = { "fsmonitor--daemon", "start", NULL };
+
+ return run_command_v_opt_tr2(args, RUN_COMMAND_NO_STDIN | RUN_GIT_CMD,
+ "fsmonitor");
+}
+
+int fsmonitor_ipc__send_query(const char *since_token,
+ struct strbuf *answer)
+{
+ int ret = -1;
+ int tried_to_spawn = 0;
+ enum ipc_active_state state = IPC_STATE__OTHER_ERROR;
+ struct ipc_client_connection *connection = NULL;
+ struct ipc_client_connect_options options
+ = IPC_CLIENT_CONNECT_OPTIONS_INIT;
+ const char *tok = since_token ? since_token : "";
+ size_t tok_len = since_token ? strlen(since_token) : 0;
+
+ options.wait_if_busy = 1;
+ options.wait_if_not_found = 0;
+
+ trace2_region_enter("fsm_client", "query", NULL);
+ trace2_data_string("fsm_client", NULL, "query/command", tok);
+
+try_again:
+ state = ipc_client_try_connect(fsmonitor_ipc__get_path(), &options,
+ &connection);
+
+ switch (state) {
+ case IPC_STATE__LISTENING:
+ ret = ipc_client_send_command_to_connection(
+ connection, tok, tok_len, answer);
+ ipc_client_close_connection(connection);
+
+ trace2_data_intmax("fsm_client", NULL,
+ "query/response-length", answer->len);
+ goto done;
+
+ case IPC_STATE__NOT_LISTENING:
+ case IPC_STATE__PATH_NOT_FOUND:
+ if (tried_to_spawn)
+ goto done;
+
+ tried_to_spawn++;
+ if (spawn_daemon())
+ goto done;
+
+ /*
+ * Try again, but this time give the daemon a chance to
+ * actually create the pipe/socket.
+ *
+ * Granted, the daemon just started so it can't possibly have
+ * any FS cached yet, so we'll always get a trivial answer.
+ * BUT the answer should include a new token that can serve
+ * as the basis for subsequent requests.
+ */
+ options.wait_if_not_found = 1;
+ goto try_again;
+
+ case IPC_STATE__INVALID_PATH:
+ ret = error(_("fsmonitor_ipc__send_query: invalid path '%s'"),
+ fsmonitor_ipc__get_path());
+ goto done;
+
+ case IPC_STATE__OTHER_ERROR:
+ default:
+ ret = error(_("fsmonitor_ipc__send_query: unspecified error on '%s'"),
+ fsmonitor_ipc__get_path());
+ goto done;
+ }
+
+done:
+ trace2_region_leave("fsm_client", "query", NULL);
+
+ return ret;
+}
+
+int fsmonitor_ipc__send_command(const char *command,
+ struct strbuf *answer)
+{
+ struct ipc_client_connection *connection = NULL;
+ struct ipc_client_connect_options options
+ = IPC_CLIENT_CONNECT_OPTIONS_INIT;
+ int ret;
+ enum ipc_active_state state;
+ const char *c = command ? command : "";
+ size_t c_len = command ? strlen(command) : 0;
+
+ strbuf_reset(answer);
+
+ options.wait_if_busy = 1;
+ options.wait_if_not_found = 0;
+
+ state = ipc_client_try_connect(fsmonitor_ipc__get_path(), &options,
+ &connection);
+ if (state != IPC_STATE__LISTENING) {
+ die(_("fsmonitor--daemon is not running"));
+ return -1;
+ }
+
+ ret = ipc_client_send_command_to_connection(connection, c, c_len,
+ answer);
+ ipc_client_close_connection(connection);
+
+ if (ret == -1) {
+ die(_("could not send '%s' command to fsmonitor--daemon"), c);
+ return -1;
+ }
+
+ return 0;
+}
+
+#endif
diff --git a/fsmonitor-ipc.h b/fsmonitor-ipc.h
new file mode 100644
index 0000000..b6a7067
--- /dev/null
+++ b/fsmonitor-ipc.h
@@ -0,0 +1,48 @@
+#ifndef FSMONITOR_IPC_H
+#define FSMONITOR_IPC_H
+
+#include "simple-ipc.h"
+
+/*
+ * Returns true if built-in file system monitor daemon is defined
+ * for this platform.
+ */
+int fsmonitor_ipc__is_supported(void);
+
+/*
+ * Returns the pathname to the IPC named pipe or Unix domain socket
+ * where a `git-fsmonitor--daemon` process will listen. This is a
+ * per-worktree value.
+ *
+ * Returns NULL if the daemon is not supported on this platform.
+ */
+const char *fsmonitor_ipc__get_path(void);
+
+/*
+ * Try to determine whether there is a `git-fsmonitor--daemon` process
+ * listening on the IPC pipe/socket.
+ */
+enum ipc_active_state fsmonitor_ipc__get_state(void);
+
+/*
+ * Connect to a `git-fsmonitor--daemon` process via simple-ipc
+ * and ask for the set of changed files since the given token.
+ *
+ * Spawn a daemon process in the background if necessary.
+ *
+ * Returns -1 on error; 0 on success.
+ */
+int fsmonitor_ipc__send_query(const char *since_token,
+ struct strbuf *answer);
+
+/*
+ * Connect to a `git-fsmonitor--daemon` process via simple-ipc and
+ * send a command verb. If no daemon is available, we DO NOT try to
+ * start one.
+ *
+ * Returns -1 on error; 0 on success.
+ */
+int fsmonitor_ipc__send_command(const char *command,
+ struct strbuf *answer);
+
+#endif /* FSMONITOR_IPC_H */
diff --git a/fsmonitor-settings.c b/fsmonitor-settings.c
new file mode 100644
index 0000000..757d230
--- /dev/null
+++ b/fsmonitor-settings.c
@@ -0,0 +1,114 @@
+#include "cache.h"
+#include "config.h"
+#include "repository.h"
+#include "fsmonitor-settings.h"
+
+/*
+ * We keep this structure defintion private and have getters
+ * for all fields so that we can lazy load it as needed.
+ */
+struct fsmonitor_settings {
+ enum fsmonitor_mode mode;
+ char *hook_path;
+};
+
+static void lookup_fsmonitor_settings(struct repository *r)
+{
+ struct fsmonitor_settings *s;
+ const char *const_str;
+ int bool_value;
+
+ if (r->settings.fsmonitor)
+ return;
+
+ CALLOC_ARRAY(s, 1);
+ s->mode = FSMONITOR_MODE_DISABLED;
+
+ r->settings.fsmonitor = s;
+
+ /*
+ * Overload the existing "core.fsmonitor" config setting (which
+ * has historically been either unset or a hook pathname) to
+ * now allow a boolean value to enable the builtin FSMonitor
+ * or to turn everything off. (This does imply that you can't
+ * use a hook script named "true" or "false", but that's OK.)
+ */
+ switch (repo_config_get_maybe_bool(r, "core.fsmonitor", &bool_value)) {
+
+ case 0: /* config value was set to <bool> */
+ if (bool_value)
+ fsm_settings__set_ipc(r);
+ return;
+
+ case 1: /* config value was unset */
+ const_str = getenv("GIT_TEST_FSMONITOR");
+ break;
+
+ case -1: /* config value set to an arbitrary string */
+ if (repo_config_get_pathname(r, "core.fsmonitor", &const_str))
+ return; /* should not happen */
+ break;
+
+ default: /* should not happen */
+ return;
+ }
+
+ if (!const_str || !*const_str)
+ return;
+
+ fsm_settings__set_hook(r, const_str);
+}
+
+enum fsmonitor_mode fsm_settings__get_mode(struct repository *r)
+{
+ if (!r)
+ r = the_repository;
+
+ lookup_fsmonitor_settings(r);
+
+ return r->settings.fsmonitor->mode;
+}
+
+const char *fsm_settings__get_hook_path(struct repository *r)
+{
+ if (!r)
+ r = the_repository;
+
+ lookup_fsmonitor_settings(r);
+
+ return r->settings.fsmonitor->hook_path;
+}
+
+void fsm_settings__set_ipc(struct repository *r)
+{
+ if (!r)
+ r = the_repository;
+
+ lookup_fsmonitor_settings(r);
+
+ r->settings.fsmonitor->mode = FSMONITOR_MODE_IPC;
+ FREE_AND_NULL(r->settings.fsmonitor->hook_path);
+}
+
+void fsm_settings__set_hook(struct repository *r, const char *path)
+{
+ if (!r)
+ r = the_repository;
+
+ lookup_fsmonitor_settings(r);
+
+ r->settings.fsmonitor->mode = FSMONITOR_MODE_HOOK;
+ FREE_AND_NULL(r->settings.fsmonitor->hook_path);
+ r->settings.fsmonitor->hook_path = strdup(path);
+}
+
+void fsm_settings__set_disabled(struct repository *r)
+{
+ if (!r)
+ r = the_repository;
+
+ lookup_fsmonitor_settings(r);
+
+ r->settings.fsmonitor->mode = FSMONITOR_MODE_DISABLED;
+ FREE_AND_NULL(r->settings.fsmonitor->hook_path);
+}
diff --git a/fsmonitor-settings.h b/fsmonitor-settings.h
new file mode 100644
index 0000000..a4c5d7b
--- /dev/null
+++ b/fsmonitor-settings.h
@@ -0,0 +1,21 @@
+#ifndef FSMONITOR_SETTINGS_H
+#define FSMONITOR_SETTINGS_H
+
+struct repository;
+
+enum fsmonitor_mode {
+ FSMONITOR_MODE_DISABLED = 0,
+ FSMONITOR_MODE_HOOK = 1, /* core.fsmonitor=<hook_path> */
+ FSMONITOR_MODE_IPC = 2, /* core.fsmonitor=<true> */
+};
+
+void fsm_settings__set_ipc(struct repository *r);
+void fsm_settings__set_hook(struct repository *r, const char *path);
+void fsm_settings__set_disabled(struct repository *r);
+
+enum fsmonitor_mode fsm_settings__get_mode(struct repository *r);
+const char *fsm_settings__get_hook_path(struct repository *r);
+
+struct fsmonitor_settings;
+
+#endif /* FSMONITOR_SETTINGS_H */
diff --git a/fsmonitor.c b/fsmonitor.c
index ab9bfc6..292a674 100644
--- a/fsmonitor.c
+++ b/fsmonitor.c
@@ -3,6 +3,7 @@
#include "dir.h"
#include "ewah/ewok.h"
#include "fsmonitor.h"
+#include "fsmonitor-ipc.h"
#include "run-command.h"
#include "strbuf.h"
@@ -148,15 +149,18 @@ void write_fsmonitor_extension(struct strbuf *sb, struct index_state *istate)
/*
* Call the query-fsmonitor hook passing the last update token of the saved results.
*/
-static int query_fsmonitor(int version, const char *last_update, struct strbuf *query_result)
+static int query_fsmonitor_hook(struct repository *r,
+ int version,
+ const char *last_update,
+ struct strbuf *query_result)
{
struct child_process cp = CHILD_PROCESS_INIT;
int result;
- if (!core_fsmonitor)
+ if (fsm_settings__get_mode(r) != FSMONITOR_MODE_HOOK)
return -1;
- strvec_push(&cp.args, core_fsmonitor);
+ strvec_push(&cp.args, fsm_settings__get_hook_path(r));
strvec_pushf(&cp.args, "%d", version);
strvec_pushf(&cp.args, "%s", last_update);
cp.use_shell = 1;
@@ -168,29 +172,15 @@ static int query_fsmonitor(int version, const char *last_update, struct strbuf *
if (result)
trace2_data_intmax("fsm_hook", NULL, "query/failed", result);
- else {
+ else
trace2_data_intmax("fsm_hook", NULL, "query/response-length",
query_result->len);
- if (fsmonitor_is_trivial_response(query_result))
- trace2_data_intmax("fsm_hook", NULL,
- "query/trivial-response", 1);
- }
-
trace2_region_leave("fsm_hook", "query", NULL);
return result;
}
-int fsmonitor_is_trivial_response(const struct strbuf *query_result)
-{
- static char trivial_response[3] = { '\0', '/', '\0' };
-
- return query_result->len >= 3 &&
- !memcmp(trivial_response,
- &query_result->buf[query_result->len - 3], 3);
-}
-
static void fsmonitor_refresh_callback(struct index_state *istate, char *name)
{
int i, len = strlen(name);
@@ -229,6 +219,43 @@ static void fsmonitor_refresh_callback(struct index_state *istate, char *name)
untracked_cache_invalidate_path(istate, name, 0);
}
+/*
+ * The number of pathnames that we need to receive from FSMonitor
+ * before we force the index to be updated.
+ *
+ * Note that any pathname within the set of received paths MAY cause
+ * cache-entry or istate flag bits to be updated and thus cause the
+ * index to be updated on disk.
+ *
+ * However, the response may contain many paths (such as ignored
+ * paths) that will not update any flag bits. And thus not force the
+ * index to be updated. (This is fine and normal.) It also means
+ * that the token will not be updated in the FSMonitor index
+ * extension. So the next Git command will find the same token in the
+ * index, make the same token-relative request, and receive the same
+ * response (plus any newly changed paths). If this response is large
+ * (and continues to grow), performance could be impacted.
+ *
+ * For example, if the user runs a build and it writes 100K object
+ * files but doesn't modify any source files, the index would not need
+ * to be updated. The FSMonitor response (after the build and
+ * relative to a pre-build token) might be 5MB. Each subsequent Git
+ * command will receive that same 100K/5MB response until something
+ * causes the index to be updated. And `refresh_fsmonitor()` will
+ * have to iterate over those 100K paths each time.
+ *
+ * Performance could be improved if we optionally force update the
+ * index after a very large response and get an updated token into
+ * the FSMonitor index extension. This should allow subsequent
+ * commands to get smaller and more current responses.
+ *
+ * The value chosen here does not need to be precise. The index
+ * will be updated automatically the first time the user touches
+ * a tracked file and causes a command like `git status` to
+ * update an mtime to be updated and/or set a flag bit.
+ */
+static int fsmonitor_force_update_threshold = 100;
+
void refresh_fsmonitor(struct index_state *istate)
{
struct strbuf query_result = STRBUF_INIT;
@@ -238,17 +265,62 @@ void refresh_fsmonitor(struct index_state *istate)
struct strbuf last_update_token = STRBUF_INIT;
char *buf;
unsigned int i;
+ int is_trivial = 0;
+ struct repository *r = istate->repo ? istate->repo : the_repository;
+ enum fsmonitor_mode fsm_mode = fsm_settings__get_mode(r);
- if (!core_fsmonitor || istate->fsmonitor_has_run_once)
+ if (fsm_mode <= FSMONITOR_MODE_DISABLED ||
+ istate->fsmonitor_has_run_once)
return;
- hook_version = fsmonitor_hook_version();
-
istate->fsmonitor_has_run_once = 1;
trace_printf_key(&trace_fsmonitor, "refresh fsmonitor");
+
+ if (fsm_mode == FSMONITOR_MODE_IPC) {
+ query_success = !fsmonitor_ipc__send_query(
+ istate->fsmonitor_last_update ?
+ istate->fsmonitor_last_update : "builtin:fake",
+ &query_result);
+ if (query_success) {
+ /*
+ * The response contains a series of nul terminated
+ * strings. The first is the new token.
+ *
+ * Use `char *buf` as an interlude to trick the CI
+ * static analysis to let us use `strbuf_addstr()`
+ * here (and only copy the token) rather than
+ * `strbuf_addbuf()`.
+ */
+ buf = query_result.buf;
+ strbuf_addstr(&last_update_token, buf);
+ bol = last_update_token.len + 1;
+ is_trivial = query_result.buf[bol] == '/';
+ if (is_trivial)
+ trace2_data_intmax("fsm_client", NULL,
+ "query/trivial-response", 1);
+ } else {
+ /*
+ * The builtin daemon is not available on this
+ * platform -OR- we failed to get a response.
+ *
+ * Generate a fake token (rather than a V1
+ * timestamp) for the index extension. (If
+ * they switch back to the hook API, we don't
+ * want ambiguous state.)
+ */
+ strbuf_addstr(&last_update_token, "builtin:fake");
+ }
+
+ goto apply_results;
+ }
+
+ assert(fsm_mode == FSMONITOR_MODE_HOOK);
+
+ hook_version = fsmonitor_hook_version();
+
/*
- * This could be racy so save the date/time now and query_fsmonitor
+ * This could be racy so save the date/time now and query_fsmonitor_hook
* should be inclusive to ensure we don't miss potential changes.
*/
last_update = getnanotime();
@@ -256,13 +328,14 @@ void refresh_fsmonitor(struct index_state *istate)
strbuf_addf(&last_update_token, "%"PRIu64"", last_update);
/*
- * If we have a last update token, call query_fsmonitor for the set of
+ * If we have a last update token, call query_fsmonitor_hook for the set of
* changes since that token, else assume everything is possibly dirty
* and check it all.
*/
if (istate->fsmonitor_last_update) {
if (hook_version == -1 || hook_version == HOOK_INTERFACE_VERSION2) {
- query_success = !query_fsmonitor(HOOK_INTERFACE_VERSION2,
+ query_success = !query_fsmonitor_hook(
+ r, HOOK_INTERFACE_VERSION2,
istate->fsmonitor_last_update, &query_result);
if (query_success) {
@@ -283,6 +356,7 @@ void refresh_fsmonitor(struct index_state *istate)
query_success = 0;
} else {
bol = last_update_token.len + 1;
+ is_trivial = query_result.buf[bol] == '/';
}
} else if (hook_version < 0) {
hook_version = HOOK_INTERFACE_VERSION1;
@@ -292,37 +366,83 @@ void refresh_fsmonitor(struct index_state *istate)
}
if (hook_version == HOOK_INTERFACE_VERSION1) {
- query_success = !query_fsmonitor(HOOK_INTERFACE_VERSION1,
+ query_success = !query_fsmonitor_hook(
+ r, HOOK_INTERFACE_VERSION1,
istate->fsmonitor_last_update, &query_result);
+ if (query_success)
+ is_trivial = query_result.buf[0] == '/';
}
- trace_performance_since(last_update, "fsmonitor process '%s'", core_fsmonitor);
- trace_printf_key(&trace_fsmonitor, "fsmonitor process '%s' returned %s",
- core_fsmonitor, query_success ? "success" : "failure");
+ if (is_trivial)
+ trace2_data_intmax("fsm_hook", NULL,
+ "query/trivial-response", 1);
+
+ trace_performance_since(last_update, "fsmonitor process '%s'",
+ fsm_settings__get_hook_path(r));
+ trace_printf_key(&trace_fsmonitor,
+ "fsmonitor process '%s' returned %s",
+ fsm_settings__get_hook_path(r),
+ query_success ? "success" : "failure");
}
- /* a fsmonitor process can return '/' to indicate all entries are invalid */
- if (query_success && query_result.buf[bol] != '/') {
- /* Mark all entries returned by the monitor as dirty */
+apply_results:
+ /*
+ * The response from FSMonitor (excluding the header token) is
+ * either:
+ *
+ * [a] a (possibly empty) list of NUL delimited relative
+ * pathnames of changed paths. This list can contain
+ * files and directories. Directories have a trailing
+ * slash.
+ *
+ * [b] a single '/' to indicate the provider had no
+ * information and that we should consider everything
+ * invalid. We call this a trivial response.
+ */
+ trace2_region_enter("fsmonitor", "apply_results", istate->repo);
+
+ if (query_success && !is_trivial) {
+ /*
+ * Mark all pathnames returned by the monitor as dirty.
+ *
+ * This updates both the cache-entries and the untracked-cache.
+ */
+ int count = 0;
+
buf = query_result.buf;
for (i = bol; i < query_result.len; i++) {
if (buf[i] != '\0')
continue;
fsmonitor_refresh_callback(istate, buf + bol);
bol = i + 1;
+ count++;
}
- if (bol < query_result.len)
+ if (bol < query_result.len) {
fsmonitor_refresh_callback(istate, buf + bol);
+ count++;
+ }
/* Now mark the untracked cache for fsmonitor usage */
if (istate->untracked)
istate->untracked->use_fsmonitor = 1;
- } else {
- /* We only want to run the post index changed hook if we've actually changed entries, so keep track
- * if we actually changed entries or not */
+ if (count > fsmonitor_force_update_threshold)
+ istate->cache_changed |= FSMONITOR_CHANGED;
+
+ trace2_data_intmax("fsmonitor", istate->repo, "apply_count",
+ count);
+
+ } else {
+ /*
+ * We failed to get a response or received a trivial response,
+ * so invalidate everything.
+ *
+ * We only want to run the post index changed hook if
+ * we've actually changed entries, so keep track if we
+ * actually changed entries or not.
+ */
int is_cache_changed = 0;
- /* Mark all entries invalid */
+
for (i = 0; i < istate->cache_nr; i++) {
if (istate->cache[i]->ce_flags & CE_FSMONITOR_VALID) {
is_cache_changed = 1;
@@ -330,13 +450,18 @@ void refresh_fsmonitor(struct index_state *istate)
}
}
- /* If we're going to check every file, ensure we save the results */
+ /*
+ * If we're going to check every file, ensure we save
+ * the results.
+ */
if (is_cache_changed)
istate->cache_changed |= FSMONITOR_CHANGED;
if (istate->untracked)
istate->untracked->use_fsmonitor = 0;
}
+ trace2_region_leave("fsmonitor", "apply_results", istate->repo);
+
strbuf_release(&query_result);
/* Now that we've updated istate, save the last_update_token */
@@ -411,7 +536,8 @@ void remove_fsmonitor(struct index_state *istate)
void tweak_fsmonitor(struct index_state *istate)
{
unsigned int i;
- int fsmonitor_enabled = git_config_get_fsmonitor();
+ int fsmonitor_enabled = (fsm_settings__get_mode(istate->repo)
+ > FSMONITOR_MODE_DISABLED);
if (istate->fsmonitor_dirty) {
if (fsmonitor_enabled) {
@@ -431,16 +557,8 @@ void tweak_fsmonitor(struct index_state *istate)
istate->fsmonitor_dirty = NULL;
}
- switch (fsmonitor_enabled) {
- case -1: /* keep: do nothing */
- break;
- case 0: /* false */
- remove_fsmonitor(istate);
- break;
- case 1: /* true */
+ if (fsmonitor_enabled)
add_fsmonitor(istate);
- break;
- default: /* unknown value: do nothing */
- break;
- }
+ else
+ remove_fsmonitor(istate);
}
diff --git a/fsmonitor.h b/fsmonitor.h
index f20d726..3f41f65 100644
--- a/fsmonitor.h
+++ b/fsmonitor.h
@@ -3,6 +3,7 @@
#include "cache.h"
#include "dir.h"
+#include "fsmonitor-settings.h"
extern struct trace_key trace_fsmonitor;
@@ -57,7 +58,10 @@ int fsmonitor_is_trivial_response(const struct strbuf *query_result);
*/
static inline int is_fsmonitor_refreshed(const struct index_state *istate)
{
- return !core_fsmonitor || istate->fsmonitor_has_run_once;
+ enum fsmonitor_mode fsm_mode = fsm_settings__get_mode(istate->repo);
+
+ return fsm_mode <= FSMONITOR_MODE_DISABLED ||
+ istate->fsmonitor_has_run_once;
}
/*
@@ -67,7 +71,10 @@ static inline int is_fsmonitor_refreshed(const struct index_state *istate)
*/
static inline void mark_fsmonitor_valid(struct index_state *istate, struct cache_entry *ce)
{
- if (core_fsmonitor && !(ce->ce_flags & CE_FSMONITOR_VALID)) {
+ enum fsmonitor_mode fsm_mode = fsm_settings__get_mode(istate->repo);
+
+ if (fsm_mode > FSMONITOR_MODE_DISABLED &&
+ !(ce->ce_flags & CE_FSMONITOR_VALID)) {
istate->cache_changed = 1;
ce->ce_flags |= CE_FSMONITOR_VALID;
trace_printf_key(&trace_fsmonitor, "mark_fsmonitor_clean '%s'", ce->name);
@@ -83,7 +90,9 @@ static inline void mark_fsmonitor_valid(struct index_state *istate, struct cache
*/
static inline void mark_fsmonitor_invalid(struct index_state *istate, struct cache_entry *ce)
{
- if (core_fsmonitor) {
+ enum fsmonitor_mode fsm_mode = fsm_settings__get_mode(istate->repo);
+
+ if (fsm_mode > FSMONITOR_MODE_DISABLED) {
ce->ce_flags &= ~CE_FSMONITOR_VALID;
untracked_cache_invalidate_path(istate, ce->name, 1);
trace_printf_key(&trace_fsmonitor, "mark_fsmonitor_invalid '%s'", ce->name);
diff --git a/git-compat-util.h b/git-compat-util.h
index e30a374..58fd813 100644
--- a/git-compat-util.h
+++ b/git-compat-util.h
@@ -267,6 +267,12 @@
#else
#include <stdint.h>
#endif
+#ifdef HAVE_ARC4RANDOM_LIBBSD
+#include <bsd/stdlib.h>
+#endif
+#ifdef HAVE_GETRANDOM
+#include <sys/random.h>
+#endif
#ifdef NO_INTPTR_T
/*
* On I16LP32, ILP32 and LP64 "long" is the safe bet, however
@@ -531,12 +537,14 @@ void warning_errno(const char *err, ...) __attribute__((format (printf, 1, 2)));
#include <openssl/x509v3.h>
#endif /* NO_OPENSSL */
+#ifdef HAVE_OPENSSL_CSPRNG
+#include <openssl/rand.h>
+#endif
+
/*
* Let callers be aware of the constant return value; this can help
* gcc with -Wuninitialized analysis. We restrict this trick to gcc, though,
- * because some compilers may not support variadic macros. Since we're only
- * trying to help gcc, anyway, it's OK; other compilers will fall back to
- * using the function as usual.
+ * because other compilers may be confused by this.
*/
#if defined(__GNUC__)
static inline int const_error(void)
@@ -1258,25 +1266,42 @@ static inline int regexec_buf(const regex_t *preg, const char *buf, size_t size,
#endif
#endif
-/*
- * This is always defined as a first step towards making the use of variadic
- * macros unconditional. If it causes compilation problems on your platform,
- * please report it to the Git mailing list at git@vger.kernel.org.
- */
-#define HAVE_VARIADIC_MACROS 1
-
/* usage.c: only to be used for testing BUG() implementation (see test-tool) */
extern int BUG_exit_code;
-#ifdef HAVE_VARIADIC_MACROS
__attribute__((format (printf, 3, 4))) NORETURN
void BUG_fl(const char *file, int line, const char *fmt, ...);
#define BUG(...) BUG_fl(__FILE__, __LINE__, __VA_ARGS__)
+
+#ifdef __APPLE__
+#define FSYNC_METHOD_DEFAULT FSYNC_METHOD_WRITEOUT_ONLY
#else
-__attribute__((format (printf, 1, 2))) NORETURN
-void BUG(const char *fmt, ...);
+#define FSYNC_METHOD_DEFAULT FSYNC_METHOD_FSYNC
#endif
+enum fsync_action {
+ FSYNC_WRITEOUT_ONLY,
+ FSYNC_HARDWARE_FLUSH
+};
+
+/*
+ * Issues an fsync against the specified file according to the specified mode.
+ *
+ * FSYNC_WRITEOUT_ONLY attempts to use interfaces available on some operating
+ * systems to flush the OS cache without issuing a flush command to the storage
+ * controller. If those interfaces are unavailable, the function fails with
+ * ENOSYS.
+ *
+ * FSYNC_HARDWARE_FLUSH does an OS writeout and hardware flush to ensure that
+ * changes are durable. It is not expected to fail.
+ */
+int git_fsync(int fd, enum fsync_action action);
+
+/*
+ * Writes out trace statistics for fsync using the trace2 API.
+ */
+void trace_git_fsync_stats(void);
+
/*
* Preserves errno, prints a message, but gives no warning for ENOENT.
* Returns 0 on success, which includes trying to unlink an object that does
@@ -1398,6 +1423,18 @@ void unleak_memory(const void *ptr, size_t len);
#define UNLEAK(var) do {} while (0)
#endif
+#define z_const
+#include <zlib.h>
+
+#if ZLIB_VERNUM < 0x1290
+/*
+ * This is uncompress2, which is only available in zlib >= 1.2.9
+ * (released as of early 2017). See compat/zlib-uncompress2.c.
+ */
+int uncompress2(Bytef *dest, uLongf *destLen, const Bytef *source,
+ uLong *sourceLen);
+#endif
+
/*
* This include must come after system headers, since it introduces macros that
* replace system names.
@@ -1444,4 +1481,11 @@ static inline void *container_of_or_null_offset(void *ptr, size_t offset)
void sleep_millisec(int millisec);
+/*
+ * Generate len bytes from the system cryptographically secure PRNG.
+ * Returns 0 on success and -1 on error, setting errno. The inability to
+ * satisfy the full request is an error.
+ */
+int csprng_bytes(void *buf, size_t len);
+
#endif
diff --git a/git-p4.py b/git-p4.py
index cb37545..a9b1f90 100755
--- a/git-p4.py
+++ b/git-p4.py
@@ -108,10 +108,7 @@ def p4_build_cmd(cmd):
# Provide a way to not pass this option by setting git-p4.retries to 0
real_cmd += ["-r", str(retries)]
- if not isinstance(cmd, list):
- real_cmd = ' '.join(real_cmd) + ' ' + cmd
- else:
- real_cmd += cmd
+ real_cmd += cmd
# now check that we can actually talk to the server
global p4_access_checked
@@ -223,153 +220,93 @@ def decode_path(path):
def run_git_hook(cmd, param=[]):
"""Execute a hook if the hook exists."""
+ args = ['git', 'hook', 'run', '--ignore-missing', cmd]
+ if param:
+ args.append("--")
+ for p in param:
+ args.append(p)
+ return subprocess.call(args) == 0
+
+def write_pipe(c, stdin, *k, **kw):
if verbose:
- sys.stderr.write("Looking for hook: %s\n" % cmd)
- sys.stderr.flush()
-
- hooks_path = gitConfig("core.hooksPath")
- if len(hooks_path) <= 0:
- hooks_path = os.path.join(os.environ["GIT_DIR"], "hooks")
-
- if not isinstance(param, list):
- param=[param]
-
- # resolve hook file name, OS depdenent
- hook_file = os.path.join(hooks_path, cmd)
- if platform.system() == 'Windows':
- if not os.path.isfile(hook_file):
- # look for the file with an extension
- files = glob.glob(hook_file + ".*")
- if not files:
- return True
- files.sort()
- hook_file = files.pop()
- while hook_file.upper().endswith(".SAMPLE"):
- # The file is a sample hook. We don't want it
- if len(files) > 0:
- hook_file = files.pop()
- else:
- return True
-
- if not os.path.isfile(hook_file) or not os.access(hook_file, os.X_OK):
- return True
-
- return run_hook_command(hook_file, param) == 0
-
-def run_hook_command(cmd, param):
- """Executes a git hook command
- cmd = the command line file to be executed. This can be
- a file that is run by OS association.
-
- param = a list of parameters to pass to the cmd command
-
- On windows, the extension is checked to see if it should
- be run with the Git for Windows Bash shell. If there
- is no file extension, the file is deemed a bash shell
- and will be handed off to sh.exe. Otherwise, Windows
- will be called with the shell to handle the file assocation.
-
- For non Windows operating systems, the file is called
- as an executable.
- """
- cli = [cmd] + param
- use_shell = False
- if platform.system() == 'Windows':
- (root,ext) = os.path.splitext(cmd)
- if ext == "":
- exe_path = os.environ.get("EXEPATH")
- if exe_path is None:
- exe_path = ""
- else:
- exe_path = os.path.join(exe_path, "bin")
- cli = [os.path.join(exe_path, "SH.EXE")] + cli
- else:
- use_shell = True
- return subprocess.call(cli, shell=use_shell)
-
-
-def write_pipe(c, stdin):
- if verbose:
- sys.stderr.write('Writing pipe: %s\n' % str(c))
+ sys.stderr.write('Writing pipe: {}\n'.format(' '.join(c)))
- expand = not isinstance(c, list)
- p = subprocess.Popen(c, stdin=subprocess.PIPE, shell=expand)
+ p = subprocess.Popen(c, stdin=subprocess.PIPE, *k, **kw)
pipe = p.stdin
val = pipe.write(stdin)
pipe.close()
if p.wait():
- die('Command failed: %s' % str(c))
+ die('Command failed: {}'.format(' '.join(c)))
return val
-def p4_write_pipe(c, stdin):
+def p4_write_pipe(c, stdin, *k, **kw):
real_cmd = p4_build_cmd(c)
if bytes is not str and isinstance(stdin, str):
stdin = encode_text_stream(stdin)
- return write_pipe(real_cmd, stdin)
+ return write_pipe(real_cmd, stdin, *k, **kw)
-def read_pipe_full(c):
+def read_pipe_full(c, *k, **kw):
""" Read output from command. Returns a tuple
of the return status, stdout text and stderr
text.
"""
if verbose:
- sys.stderr.write('Reading pipe: %s\n' % str(c))
+ sys.stderr.write('Reading pipe: {}\n'.format(' '.join(c)))
- expand = not isinstance(c, list)
- p = subprocess.Popen(c, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=expand)
+ p = subprocess.Popen(
+ c, stdout=subprocess.PIPE, stderr=subprocess.PIPE, *k, **kw)
(out, err) = p.communicate()
return (p.returncode, out, decode_text_stream(err))
-def read_pipe(c, ignore_error=False, raw=False):
+def read_pipe(c, ignore_error=False, raw=False, *k, **kw):
""" Read output from command. Returns the output text on
success. On failure, terminates execution, unless
ignore_error is True, when it returns an empty string.
If raw is True, do not attempt to decode output text.
"""
- (retcode, out, err) = read_pipe_full(c)
+ (retcode, out, err) = read_pipe_full(c, *k, **kw)
if retcode != 0:
if ignore_error:
out = ""
else:
- die('Command failed: %s\nError: %s' % (str(c), err))
+ die('Command failed: {}\nError: {}'.format(' '.join(c), err))
if not raw:
out = decode_text_stream(out)
return out
-def read_pipe_text(c):
+def read_pipe_text(c, *k, **kw):
""" Read output from a command with trailing whitespace stripped.
On error, returns None.
"""
- (retcode, out, err) = read_pipe_full(c)
+ (retcode, out, err) = read_pipe_full(c, *k, **kw)
if retcode != 0:
return None
else:
return decode_text_stream(out).rstrip()
-def p4_read_pipe(c, ignore_error=False, raw=False):
+def p4_read_pipe(c, ignore_error=False, raw=False, *k, **kw):
real_cmd = p4_build_cmd(c)
- return read_pipe(real_cmd, ignore_error, raw=raw)
+ return read_pipe(real_cmd, ignore_error, raw=raw, *k, **kw)
-def read_pipe_lines(c, raw=False):
+def read_pipe_lines(c, raw=False, *k, **kw):
if verbose:
- sys.stderr.write('Reading pipe: %s\n' % str(c))
+ sys.stderr.write('Reading pipe: {}\n'.format(' '.join(c)))
- expand = not isinstance(c, list)
- p = subprocess.Popen(c, stdout=subprocess.PIPE, shell=expand)
+ p = subprocess.Popen(c, stdout=subprocess.PIPE, *k, **kw)
pipe = p.stdout
lines = pipe.readlines()
if not raw:
lines = [decode_text_stream(line) for line in lines]
if pipe.close() or p.wait():
- die('Command failed: %s' % str(c))
+ die('Command failed: {}'.format(' '.join(c)))
return lines
-def p4_read_pipe_lines(c):
+def p4_read_pipe_lines(c, *k, **kw):
"""Specifically invoke p4 on the command supplied. """
real_cmd = p4_build_cmd(c)
- return read_pipe_lines(real_cmd)
+ return read_pipe_lines(real_cmd, *k, **kw)
def p4_has_command(cmd):
"""Ask p4 for help on this command. If it returns an error, the
@@ -400,23 +337,22 @@ def p4_has_move_command():
# assume it failed because @... was invalid changelist
return True
-def system(cmd, ignore_error=False):
- expand = not isinstance(cmd, list)
+def system(cmd, ignore_error=False, *k, **kw):
if verbose:
- sys.stderr.write("executing %s\n" % str(cmd))
- retcode = subprocess.call(cmd, shell=expand)
+ sys.stderr.write("executing {}\n".format(
+ ' '.join(cmd) if isinstance(cmd, list) else cmd))
+ retcode = subprocess.call(cmd, *k, **kw)
if retcode and not ignore_error:
- raise CalledProcessError(retcode, cmd)
+ raise subprocess.CalledProcessError(retcode, cmd)
return retcode
-def p4_system(cmd):
+def p4_system(cmd, *k, **kw):
"""Specifically invoke p4 as the system command. """
real_cmd = p4_build_cmd(cmd)
- expand = not isinstance(real_cmd, list)
- retcode = subprocess.call(real_cmd, shell=expand)
+ retcode = subprocess.call(real_cmd, *k, **kw)
if retcode:
- raise CalledProcessError(retcode, real_cmd)
+ raise subprocess.CalledProcessError(retcode, real_cmd)
def die_bad_access(s):
die("failure accessing depot: {0}".format(s.rstrip()))
@@ -735,18 +671,11 @@ def isModeExecChanged(src_mode, dst_mode):
return isModeExec(src_mode) != isModeExec(dst_mode)
def p4CmdList(cmd, stdin=None, stdin_mode='w+b', cb=None, skip_info=False,
- errors_as_exceptions=False):
-
- if not isinstance(cmd, list):
- cmd = "-G " + cmd
- expand = True
- else:
- cmd = ["-G"] + cmd
- expand = False
+ errors_as_exceptions=False, *k, **kw):
- cmd = p4_build_cmd(cmd)
+ cmd = p4_build_cmd(["-G"] + cmd)
if verbose:
- sys.stderr.write("Opening pipe: %s\n" % str(cmd))
+ sys.stderr.write("Opening pipe: {}\n".format(' '.join(cmd)))
# Use a temporary file to avoid deadlocks without
# subprocess.communicate(), which would put another copy
@@ -763,10 +692,8 @@ def p4CmdList(cmd, stdin=None, stdin_mode='w+b', cb=None, skip_info=False,
stdin_file.flush()
stdin_file.seek(0)
- p4 = subprocess.Popen(cmd,
- shell=expand,
- stdin=stdin_file,
- stdout=subprocess.PIPE)
+ p4 = subprocess.Popen(
+ cmd, stdin=stdin_file, stdout=subprocess.PIPE, *k, **kw)
result = []
try:
@@ -819,8 +746,8 @@ def p4CmdList(cmd, stdin=None, stdin_mode='w+b', cb=None, skip_info=False,
return result
-def p4Cmd(cmd):
- list = p4CmdList(cmd)
+def p4Cmd(cmd, *k, **kw):
+ list = p4CmdList(cmd, *k, **kw)
result = {}
for entry in list:
result.update(entry)
@@ -869,7 +796,7 @@ def isValidGitDir(path):
return git_dir(path) != None
def parseRevision(ref):
- return read_pipe("git rev-parse %s" % ref).strip()
+ return read_pipe(["git", "rev-parse", ref]).strip()
def branchExists(ref):
rev = read_pipe(["git", "rev-parse", "-q", "--verify", ref],
@@ -975,11 +902,11 @@ def p4BranchesInGit(branchesAreInRemotes=True):
branches = {}
- cmdline = "git rev-parse --symbolic "
+ cmdline = ["git", "rev-parse", "--symbolic"]
if branchesAreInRemotes:
- cmdline += "--remotes"
+ cmdline.append("--remotes")
else:
- cmdline += "--branches"
+ cmdline.append("--branches")
for line in read_pipe_lines(cmdline):
line = line.strip()
@@ -1044,7 +971,7 @@ def createOrUpdateBranchesFromOrigin(localRefPrefix = "refs/remotes/p4/", silent
originPrefix = "origin/p4/"
- for line in read_pipe_lines("git rev-parse --symbolic --remotes"):
+ for line in read_pipe_lines(["git", "rev-parse", "--symbolic", "--remotes"]):
line = line.strip()
if (not line.startswith(originPrefix)) or line.endswith("HEAD"):
continue
@@ -1082,7 +1009,7 @@ def createOrUpdateBranchesFromOrigin(localRefPrefix = "refs/remotes/p4/", silent
remoteHead, ','.join(settings['depot-paths'])))
if update:
- system("git update-ref %s %s" % (remoteHead, originHead))
+ system(["git", "update-ref", remoteHead, originHead])
def originP4BranchesExist():
return gitBranchExists("origin") or gitBranchExists("origin/p4") or gitBranchExists("origin/p4/master")
@@ -1196,7 +1123,7 @@ def getClientSpec():
"""Look at the p4 client spec, create a View() object that contains
all the mappings, and return it."""
- specList = p4CmdList("client -o")
+ specList = p4CmdList(["client", "-o"])
if len(specList) != 1:
die('Output from "client -o" is %d lines, expecting 1' %
len(specList))
@@ -1225,7 +1152,7 @@ def getClientSpec():
def getClientRoot():
"""Grab the client directory."""
- output = p4CmdList("client -o")
+ output = p4CmdList(["client", "-o"])
if len(output) != 1:
die('Output from "client -o" is %d lines, expecting 1' % len(output))
@@ -1480,7 +1407,7 @@ class P4UserMap:
if self.myP4UserId:
return self.myP4UserId
- results = p4CmdList("user -o")
+ results = p4CmdList(["user", "-o"])
for r in results:
if 'User' in r:
self.myP4UserId = r['User']
@@ -1505,7 +1432,7 @@ class P4UserMap:
self.users = {}
self.emails = {}
- for output in p4CmdList("users"):
+ for output in p4CmdList(["users"]):
if "User" not in output:
continue
self.users[output["User"]] = output["FullName"] + " <" + output["Email"] + ">"
@@ -1629,7 +1556,7 @@ class P4Submit(Command, P4UserMap):
die("Large file system not supported for git-p4 submit command. Please remove it from config.")
def check(self):
- if len(p4CmdList("opened ...")) > 0:
+ if len(p4CmdList(["opened", "..."])) > 0:
die("You have files opened with perforce! Close them before starting the sync.")
def separate_jobs_from_description(self, message):
@@ -1733,7 +1660,7 @@ class P4Submit(Command, P4UserMap):
# then gets used to patch up the username in the change. If the same
# client spec is being used by multiple processes then this might go
# wrong.
- results = p4CmdList("client -o") # find the current client
+ results = p4CmdList(["client", "-o"]) # find the current client
client = None
for r in results:
if 'Client' in r:
@@ -1749,7 +1676,7 @@ class P4Submit(Command, P4UserMap):
def modifyChangelistUser(self, changelist, newUser):
# fixup the user field of a changelist after it has been submitted.
- changes = p4CmdList("change -o %s" % changelist)
+ changes = p4CmdList(["change", "-o", changelist])
if len(changes) != 1:
die("Bad output from p4 change modifying %s to user %s" %
(changelist, newUser))
@@ -1760,7 +1687,7 @@ class P4Submit(Command, P4UserMap):
# p4 does not understand format version 3 and above
input = marshal.dumps(c, 2)
- result = p4CmdList("change -f -i", stdin=input)
+ result = p4CmdList(["change", "-f", "-i"], stdin=input)
for r in result:
if 'code' in r:
if r['code'] == 'error':
@@ -1866,7 +1793,7 @@ class P4Submit(Command, P4UserMap):
if "P4EDITOR" in os.environ and (os.environ.get("P4EDITOR") != ""):
editor = os.environ.get("P4EDITOR")
else:
- editor = read_pipe("git var GIT_EDITOR").strip()
+ editor = read_pipe(["git", "var", "GIT_EDITOR"]).strip()
system(["sh", "-c", ('%s "$@"' % editor), editor, template_file])
# If the file was not saved, prompt to see if this patch should
@@ -1924,7 +1851,8 @@ class P4Submit(Command, P4UserMap):
(p4User, gitEmail) = self.p4UserForCommit(id)
- diff = read_pipe_lines("git diff-tree -r %s \"%s^\" \"%s\"" % (self.diffOpts, id, id))
+ diff = read_pipe_lines(
+ ["git", "diff-tree", "-r"] + self.diffOpts + ["{}^".format(id), id])
filesToAdd = set()
filesToChangeType = set()
filesToDelete = set()
@@ -2060,7 +1988,7 @@ class P4Submit(Command, P4UserMap):
#
# Apply the patch for real, and do add/delete/+x handling.
#
- system(applyPatchCmd)
+ system(applyPatchCmd, shell=True)
for f in filesToChangeType:
p4_edit(f, "-t", "auto")
@@ -2410,17 +2338,17 @@ class P4Submit(Command, P4UserMap):
#
if self.detectRenames:
# command-line -M arg
- self.diffOpts = "-M"
+ self.diffOpts = ["-M"]
else:
# If not explicitly set check the config variable
detectRenames = gitConfig("git-p4.detectRenames")
if detectRenames.lower() == "false" or detectRenames == "":
- self.diffOpts = ""
+ self.diffOpts = []
elif detectRenames.lower() == "true":
- self.diffOpts = "-M"
+ self.diffOpts = ["-M"]
else:
- self.diffOpts = "-M%s" % detectRenames
+ self.diffOpts = ["-M{}".format(detectRenames)]
# no command-line arg for -C or --find-copies-harder, just
# config variables
@@ -2428,12 +2356,12 @@ class P4Submit(Command, P4UserMap):
if detectCopies.lower() == "false" or detectCopies == "":
pass
elif detectCopies.lower() == "true":
- self.diffOpts += " -C"
+ self.diffOpts.append("-C")
else:
- self.diffOpts += " -C%s" % detectCopies
+ self.diffOpts.append("-C{}".format(detectCopies))
if gitConfigBool("git-p4.detectCopiesHarder"):
- self.diffOpts += " --find-copies-harder"
+ self.diffOpts.append("--find-copies-harder")
num_shelves = len(self.update_shelve)
if num_shelves > 0 and num_shelves != len(commits):
@@ -3381,12 +3309,9 @@ class P4Sync(Command, P4UserMap):
lostAndFoundBranches = set()
user = gitConfig("git-p4.branchUser")
- if len(user) > 0:
- command = "branches -u %s" % user
- else:
- command = "branches"
- for info in p4CmdList(command):
+ for info in p4CmdList(
+ ["branches"] + (["-u", user] if len(user) > 0 else [])):
details = p4Cmd(["branch", "-o", info["branch"]])
viewIdx = 0
while "View%s" % viewIdx in details:
@@ -3477,7 +3402,8 @@ class P4Sync(Command, P4UserMap):
while True:
if self.verbose:
print("trying: earliest %s latest %s" % (earliestCommit, latestCommit))
- next = read_pipe("git rev-list --bisect %s %s" % (latestCommit, earliestCommit)).strip()
+ next = read_pipe(["git", "rev-list", "--bisect",
+ latestCommit, earliestCommit]).strip()
if len(next) == 0:
if self.verbose:
print("argh")
@@ -3633,7 +3559,7 @@ class P4Sync(Command, P4UserMap):
if self.hasOrigin:
if not self.silent:
print('Syncing with origin first, using "git fetch origin"')
- system("git fetch origin")
+ system(["git", "fetch", "origin"])
def importHeadRevision(self, revision):
print("Doing initial import of %s from revision %s into %s" % (' '.join(self.depotPaths), revision, self.branch))
@@ -3800,8 +3726,8 @@ class P4Sync(Command, P4UserMap):
if len(self.branch) == 0:
self.branch = self.refPrefix + "master"
if gitBranchExists("refs/heads/p4") and self.importIntoRemotes:
- system("git update-ref %s refs/heads/p4" % self.branch)
- system("git branch -D p4")
+ system(["git", "update-ref", self.branch, "refs/heads/p4"])
+ system(["git", "branch", "-D", "p4"])
# accept either the command-line option, or the configuration variable
if self.useClientSpec:
@@ -4004,7 +3930,7 @@ class P4Sync(Command, P4UserMap):
# Cleanup temporary branches created during import
if self.tempBranches != []:
for branch in self.tempBranches:
- read_pipe("git update-ref -d %s" % branch)
+ read_pipe(["git", "update-ref", "-d", branch])
os.rmdir(os.path.join(os.environ.get("GIT_DIR", ".git"), self.tempBranchLocation))
# Create a symbolic ref p4/HEAD pointing to p4/<branch> to allow
@@ -4036,7 +3962,7 @@ class P4Rebase(Command):
def rebase(self):
if os.system("git update-index --refresh") != 0:
die("Some files in your working directory are modified and different than what is in your index. You can use git update-index <filename> to bring the index up to date or stash away all your changes with git stash.");
- if len(read_pipe("git diff-index HEAD --")) > 0:
+ if len(read_pipe(["git", "diff-index", "HEAD", "--"])) > 0:
die("You have uncommitted changes. Please commit them before rebasing or stash them away with git stash.");
[upstream, settings] = findUpstreamBranchPoint()
@@ -4047,9 +3973,10 @@ class P4Rebase(Command):
upstream = re.sub("~[0-9]+$", "", upstream)
print("Rebasing the current branch onto %s" % upstream)
- oldHead = read_pipe("git rev-parse HEAD").strip()
- system("git rebase %s" % upstream)
- system("git diff-tree --stat --summary -M %s HEAD --" % oldHead)
+ oldHead = read_pipe(["git", "rev-parse", "HEAD"]).strip()
+ system(["git", "rebase", upstream])
+ system(["git", "diff-tree", "--stat", "--summary", "-M", oldHead,
+ "HEAD", "--"])
return True
class P4Clone(P4Sync):
@@ -4110,7 +4037,7 @@ class P4Clone(P4Sync):
init_cmd.append("--bare")
retcode = subprocess.call(init_cmd)
if retcode:
- raise CalledProcessError(retcode, init_cmd)
+ raise subprocess.CalledProcessError(retcode, init_cmd)
if not P4Sync.run(self, depotPaths):
return False
@@ -4126,7 +4053,7 @@ class P4Clone(P4Sync):
# auto-set this variable if invoked with --use-client-spec
if self.useClientSpec_from_options:
- system("git config --bool git-p4.useclientspec true")
+ system(["git", "config", "--bool", "git-p4.useclientspec", "true"])
return True
@@ -4257,10 +4184,7 @@ class P4Branches(Command):
if originP4BranchesExist():
createOrUpdateBranchesFromOrigin()
- cmdline = "git rev-parse --symbolic "
- cmdline += " --remotes"
-
- for line in read_pipe_lines(cmdline):
+ for line in read_pipe_lines(["git", "rev-parse", "--symbolic", "--remotes"]):
line = line.strip()
if not line.startswith('p4/') or line == "p4/HEAD":
@@ -4343,9 +4267,9 @@ def main():
cmd.gitdir = os.path.abspath(".git")
if not isValidGitDir(cmd.gitdir):
# "rev-parse --git-dir" without arguments will try $PWD/.git
- cmd.gitdir = read_pipe("git rev-parse --git-dir").strip()
+ cmd.gitdir = read_pipe(["git", "rev-parse", "--git-dir"]).strip()
if os.path.exists(cmd.gitdir):
- cdup = read_pipe("git rev-parse --show-cdup").strip()
+ cdup = read_pipe(["git", "rev-parse", "--show-cdup"]).strip()
if len(cdup) > 0:
chdir(cdup);
diff --git a/git-send-email.perl b/git-send-email.perl
index 0408722..a98460b 100755
--- a/git-send-email.perl
+++ b/git-send-email.perl
@@ -225,13 +225,13 @@ my $multiedit;
my $editor;
sub system_or_msg {
- my ($args, $msg) = @_;
+ my ($args, $msg, $cmd_name) = @_;
system(@$args);
my $signalled = $? & 127;
my $exit_code = $? >> 8;
return unless $signalled or $exit_code;
- my @sprintf_args = ($args->[0], $exit_code);
+ my @sprintf_args = ($cmd_name ? $cmd_name : $args->[0], $exit_code);
if (defined $msg) {
# Quiet the 'redundant' warning category, except we
# need to support down to Perl 5.8, so we can't do a
@@ -2075,10 +2075,10 @@ sub validate_patch {
my ($fn, $xfer_encoding) = @_;
if ($repo) {
+ my $hook_name = 'sendemail-validate';
my $hooks_path = $repo->command_oneline('rev-parse', '--git-path', 'hooks');
require File::Spec;
- my $validate_hook = File::Spec->catfile($hooks_path,
- 'sendemail-validate');
+ my $validate_hook = File::Spec->catfile($hooks_path, $hook_name);
my $hook_error;
if (-x $validate_hook) {
require Cwd;
@@ -2088,13 +2088,19 @@ sub validate_patch {
chdir($repo->wc_path() or $repo->repo_path())
or die("chdir: $!");
local $ENV{"GIT_DIR"} = $repo->repo_path();
- $hook_error = system_or_msg([$validate_hook, $target]);
+ my @cmd = ("git", "hook", "run", "--ignore-missing",
+ $hook_name, "--");
+ my @cmd_msg = (@cmd, "<patch>");
+ my @cmd_run = (@cmd, $target);
+ $hook_error = system_or_msg(\@cmd_run, undef, "@cmd_msg");
chdir($cwd_save) or die("chdir: $!");
}
if ($hook_error) {
- die sprintf(__("fatal: %s: rejected by sendemail-validate hook\n" .
- "%s\n" .
- "warning: no patches were sent\n"), $fn, $hook_error);
+ $hook_error = sprintf(__("fatal: %s: rejected by %s hook\n" .
+ $hook_error . "\n" .
+ "warning: no patches were sent\n"),
+ $fn, $hook_name);
+ die $hook_error;
}
}
diff --git a/git-sh-setup.sh b/git-sh-setup.sh
index b93f392..d92df37 100644
--- a/git-sh-setup.sh
+++ b/git-sh-setup.sh
@@ -101,7 +101,6 @@ $LONG_USAGE")"
case "$1" in
-h)
echo "$LONG_USAGE"
- case "$0" in *git-legacy-stash) exit 129;; esac
exit
esac
fi
diff --git a/git-submodule.sh b/git-submodule.sh
index 652861a..fd0b4a2 100755
--- a/git-submodule.sh
+++ b/git-submodule.sh
@@ -10,7 +10,7 @@ USAGE="[--quiet] [--cached]
or: $dashless [--quiet] status [--cached] [--recursive] [--] [<path>...]
or: $dashless [--quiet] init [--] [<path>...]
or: $dashless [--quiet] deinit [-f|--force] (--all| [--] <path>...)
- or: $dashless [--quiet] update [--init] [--remote] [-N|--no-fetch] [-f|--force] [--checkout|--merge|--rebase] [--[no-]recommend-shallow] [--reference <repository>] [--recursive] [--[no-]single-branch] [--] [<path>...]
+ or: $dashless [--quiet] update [--init [--filter=<filter-spec>]] [--remote] [-N|--no-fetch] [-f|--force] [--checkout|--merge|--rebase] [--[no-]recommend-shallow] [--reference <repository>] [--recursive] [--[no-]single-branch] [--] [<path>...]
or: $dashless [--quiet] set-branch (--default|--branch <branch>) [--] <path>
or: $dashless [--quiet] set-url [--] <path> <newurl>
or: $dashless [--quiet] summary [--cached|--files] [--summary-limit <n>] [commit] [--] [<path>...]
@@ -49,14 +49,7 @@ dissociate=
single_branch=
jobs=
recommend_shallow=
-
-die_if_unmatched ()
-{
- if test "$1" = "#unmatched"
- then
- exit ${2:-1}
- fi
-}
+filter=
isnumber()
{
@@ -246,20 +239,6 @@ cmd_deinit()
git ${wt_prefix:+-C "$wt_prefix"} submodule--helper deinit ${GIT_QUIET:+--quiet} ${force:+--force} ${deinit_all:+--all} -- "$@"
}
-# usage: fetch_in_submodule <module_path> [<depth>] [<sha1>]
-# Because arguments are positional, use an empty string to omit <depth>
-# but include <sha1>.
-fetch_in_submodule () (
- sanitize_submodule_env &&
- cd "$1" &&
- if test $# -eq 3
- then
- echo "$3" | git fetch ${GIT_QUIET:+--quiet} --stdin ${2:+"$2"}
- else
- git fetch ${GIT_QUIET:+--quiet} ${2:+"$2"}
- fi
-)
-
#
# Update each submodule path to correct revision, using clone and checkout as needed
#
@@ -347,6 +326,14 @@ cmd_update()
--no-single-branch)
single_branch="--no-single-branch"
;;
+ --filter)
+ case "$2" in '') usage ;; esac
+ filter="--filter=$2"
+ shift
+ ;;
+ --filter=*)
+ filter="$1"
+ ;;
--)
shift
break
@@ -361,133 +348,28 @@ cmd_update()
shift
done
- if test -n "$init"
- then
- cmd_init "--" "$@" || return
- fi
-
- {
- git submodule--helper update-clone ${GIT_QUIET:+--quiet} \
+ git ${wt_prefix:+-C "$wt_prefix"} submodule--helper update \
+ ${GIT_QUIET:+--quiet} \
+ ${force:+--force} \
${progress:+"--progress"} \
+ ${remote:+--remote} \
+ ${recursive:+--recursive} \
+ ${init:+--init} \
+ ${nofetch:+--no-fetch} \
${wt_prefix:+--prefix "$wt_prefix"} \
${prefix:+--recursive-prefix "$prefix"} \
${update:+--update "$update"} \
${reference:+"$reference"} \
${dissociate:+"--dissociate"} \
- ${depth:+--depth "$depth"} \
+ ${depth:+"$depth"} \
${require_init:+--require-init} \
+ ${dissociate:+"--dissociate"} \
$single_branch \
$recommend_shallow \
$jobs \
+ $filter \
-- \
- "$@" || echo "#unmatched" $?
- } | {
- err=
- while read -r quickabort sha1 just_cloned sm_path
- do
- die_if_unmatched "$quickabort" "$sha1"
-
- git submodule--helper ensure-core-worktree "$sm_path" || exit 1
-
- displaypath=$(git submodule--helper relative-path "$prefix$sm_path" "$wt_prefix")
-
- if test $just_cloned -eq 1
- then
- subsha1=
- else
- just_cloned=
- subsha1=$(sanitize_submodule_env; cd "$sm_path" &&
- git rev-parse --verify HEAD) ||
- die "fatal: $(eval_gettext "Unable to find current revision in submodule path '\$displaypath'")"
- fi
-
- if test -n "$remote"
- then
- branch=$(git submodule--helper remote-branch "$sm_path")
- if test -z "$nofetch"
- then
- # Fetch remote before determining tracking $sha1
- fetch_in_submodule "$sm_path" $depth ||
- die "fatal: $(eval_gettext "Unable to fetch in submodule path '\$sm_path'")"
- fi
- remote_name=$(sanitize_submodule_env; cd "$sm_path" && git submodule--helper print-default-remote)
- sha1=$(sanitize_submodule_env; cd "$sm_path" &&
- git rev-parse --verify "${remote_name}/${branch}") ||
- die "fatal: $(eval_gettext "Unable to find current \${remote_name}/\${branch} revision in submodule path '\$sm_path'")"
- fi
-
- out=$(git submodule--helper run-update-procedure \
- ${wt_prefix:+--prefix "$wt_prefix"} \
- ${GIT_QUIET:+--quiet} \
- ${force:+--force} \
- ${just_cloned:+--just-cloned} \
- ${nofetch:+--no-fetch} \
- ${depth:+"$depth"} \
- ${update:+--update "$update"} \
- ${prefix:+--recursive-prefix "$prefix"} \
- ${sha1:+--oid "$sha1"} \
- ${subsha1:+--suboid "$subsha1"} \
- "--" \
- "$sm_path")
-
- # exit codes for run-update-procedure:
- # 0: update was successful, say command output
- # 1: update procedure failed, but should not die
- # 2 or 128: subcommand died during execution
- # 3: no update procedure was run
- res="$?"
- case $res in
- 0)
- say "$out"
- ;;
- 1)
- err="${err};fatal: $out"
- continue
- ;;
- 2|128)
- die_with_status $res "fatal: $out"
- ;;
- esac
-
- if test -n "$recursive"
- then
- (
- prefix=$(git submodule--helper relative-path "$prefix$sm_path/" "$wt_prefix")
- wt_prefix=
- sanitize_submodule_env
- cd "$sm_path" &&
- eval cmd_update
- )
- res=$?
- if test $res -gt 0
- then
- die_msg="fatal: $(eval_gettext "Failed to recurse into submodule path '\$displaypath'")"
- if test $res -ne 2
- then
- err="${err};$die_msg"
- continue
- else
- die_with_status $res "$die_msg"
- fi
- fi
- fi
- done
-
- if test -n "$err"
- then
- OIFS=$IFS
- IFS=';'
- for e in $err
- do
- if test -n "$e"
- then
- echo >&2 "$e"
- fi
- done
- IFS=$OIFS
- exit 1
- fi
- }
+ "$@"
}
#
diff --git a/git.c b/git.c
index edda922..3d8e48c 100644
--- a/git.c
+++ b/git.c
@@ -436,6 +436,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
} else {
prefix = NULL;
}
+ assert(!prefix || *prefix);
precompose_argv_prefix(argc, argv, NULL);
if (use_pager == -1 && run_setup &&
!(p->option & DELAY_PAGER_CONFIG))
@@ -536,11 +537,13 @@ static struct cmd_struct commands[] = {
{ "format-patch", cmd_format_patch, RUN_SETUP },
{ "fsck", cmd_fsck, RUN_SETUP },
{ "fsck-objects", cmd_fsck, RUN_SETUP },
+ { "fsmonitor--daemon", cmd_fsmonitor__daemon, RUN_SETUP },
{ "gc", cmd_gc, RUN_SETUP },
{ "get-tar-commit-id", cmd_get_tar_commit_id, NO_PARSEOPT },
{ "grep", cmd_grep, RUN_SETUP_GENTLY },
{ "hash-object", cmd_hash_object },
{ "help", cmd_help },
+ { "hook", cmd_hook, RUN_SETUP },
{ "index-pack", cmd_index_pack, RUN_SETUP_GENTLY | NO_PARSEOPT },
{ "init", cmd_init_db },
{ "init-db", cmd_init_db },
diff --git a/gitweb/gitweb.perl b/gitweb/gitweb.perl
index fbd1c20..606b501 100755
--- a/gitweb/gitweb.perl
+++ b/gitweb/gitweb.perl
@@ -4213,8 +4213,7 @@ sub git_header_html {
my %opts = @_;
my $title = get_page_title();
- my $content_type = get_content_type_html();
- print $cgi->header(-type=>$content_type, -charset => 'utf-8',
+ print $cgi->header(-type=>get_content_type_html(), -charset => 'utf-8',
-status=> $status, -expires => $expires)
unless ($opts{'-no_http_header'});
my $mod_perl_version = $ENV{'MOD_PERL'} ? " $ENV{'MOD_PERL'}" : '';
@@ -4225,7 +4224,6 @@ sub git_header_html {
<!-- git web interface version $version, (C) 2005-2006, Kay Sievers <kay.sievers\@vrfy.org>, Christian Gierke -->
<!-- git core binaries version $git_version -->
<head>
-<meta http-equiv="content-type" content="$content_type; charset=utf-8"/>
<meta name="generator" content="gitweb/$version git/$git_version$mod_perl_version"/>
<meta name="robots" content="index, nofollow"/>
<title>$title</title>
diff --git a/gpg-interface.c b/gpg-interface.c
index b52eb0e..280f1fa 100644
--- a/gpg-interface.c
+++ b/gpg-interface.c
@@ -433,7 +433,6 @@ static int verify_ssh_signed_buffer(struct signature_check *sigc,
struct tempfile *buffer_file;
int ret = -1;
const char *line;
- size_t trust_size;
char *principal;
struct strbuf ssh_principals_out = STRBUF_INIT;
struct strbuf ssh_principals_err = STRBUF_INIT;
@@ -502,15 +501,30 @@ static int verify_ssh_signed_buffer(struct signature_check *sigc,
ret = -1;
} else {
/* Check every principal we found (one per line) */
- for (line = ssh_principals_out.buf; *line;
- line = strchrnul(line + 1, '\n')) {
- while (*line == '\n')
- line++;
- if (!*line)
- break;
+ const char *next;
+ for (line = ssh_principals_out.buf;
+ *line;
+ line = next) {
+ const char *end_of_text;
+
+ next = end_of_text = strchrnul(line, '\n');
+
+ /* Did we find a LF, and did we have CR before it? */
+ if (*end_of_text &&
+ line < end_of_text &&
+ end_of_text[-1] == '\r')
+ end_of_text--;
+
+ /* Unless we hit NUL, skip over the LF we found */
+ if (*next)
+ next++;
- trust_size = strcspn(line, "\n");
- principal = xmemdupz(line, trust_size);
+ /* Not all lines are data. Skip empty ones */
+ if (line == end_of_text)
+ continue;
+
+ /* We now know we have an non-empty line. Process it */
+ principal = xmemdupz(line, end_of_text - line);
child_process_init(&ssh_keygen);
strbuf_release(&ssh_keygen_out);
@@ -702,7 +716,7 @@ int git_gpg_config(const char *var, const char *value, void *cb)
return config_error_nonbool(var);
fmt = get_format_by_name(value);
if (!fmt)
- return error("unsupported value for %s: %s",
+ return error(_("invalid value for '%s': '%s'"),
var, value);
use_format = fmt;
return 0;
@@ -717,8 +731,8 @@ int git_gpg_config(const char *var, const char *value, void *cb)
free(trust);
if (ret)
- return error("unsupported value for %s: %s", var,
- value);
+ return error(_("invalid value for '%s': '%s'"),
+ var, value);
return 0;
}
@@ -920,6 +934,7 @@ static int sign_buffer_gpg(struct strbuf *buffer, struct strbuf *signature,
struct child_process gpg = CHILD_PROCESS_INIT;
int ret;
size_t bottom;
+ const char *cp;
struct strbuf gpg_status = STRBUF_INIT;
strvec_pushl(&gpg.args,
@@ -939,7 +954,13 @@ static int sign_buffer_gpg(struct strbuf *buffer, struct strbuf *signature,
signature, 1024, &gpg_status, 0);
sigchain_pop(SIGPIPE);
- ret |= !strstr(gpg_status.buf, "\n[GNUPG:] SIG_CREATED ");
+ for (cp = gpg_status.buf;
+ cp && (cp = strstr(cp, "[GNUPG:] SIG_CREATED "));
+ cp++) {
+ if (cp == gpg_status.buf || cp[-1] == '\n')
+ break; /* found */
+ }
+ ret |= !cp;
strbuf_release(&gpg_status);
if (ret)
return error(_("gpg failed to sign the data"));
diff --git a/graph.c b/graph.c
index e3828eb..568b6e7 100644
--- a/graph.c
+++ b/graph.c
@@ -401,6 +401,18 @@ struct git_graph *graph_init(struct rev_info *opt)
return graph;
}
+void graph_clear(struct git_graph *graph)
+{
+ if (!graph)
+ return;
+
+ free(graph->columns);
+ free(graph->new_columns);
+ free(graph->mapping);
+ free(graph->old_mapping);
+ free(graph);
+}
+
static void graph_update_state(struct git_graph *graph, enum graph_state s)
{
graph->prev_state = graph->state;
diff --git a/graph.h b/graph.h
index 8313e29..e88632a 100644
--- a/graph.h
+++ b/graph.h
@@ -140,6 +140,11 @@ void graph_set_column_colors(const char **colors, unsigned short colors_max);
struct git_graph *graph_init(struct rev_info *opt);
/*
+ * Free a struct git_graph.
+ */
+void graph_clear(struct git_graph *graph);
+
+/*
* Update a git_graph with a new commit.
* This will cause the graph to begin outputting lines for the new commit
* the next time graph_next_line() is called.
diff --git a/grep.c b/grep.c
index 7bb0360..82eb7da 100644
--- a/grep.c
+++ b/grep.c
@@ -19,27 +19,6 @@ static void std_output(struct grep_opt *opt, const void *buf, size_t size)
fwrite(buf, size, 1, stdout);
}
-static struct grep_opt grep_defaults = {
- .relative = 1,
- .pathname = 1,
- .max_depth = -1,
- .pattern_type_option = GREP_PATTERN_TYPE_UNSPECIFIED,
- .colors = {
- [GREP_COLOR_CONTEXT] = "",
- [GREP_COLOR_FILENAME] = GIT_COLOR_MAGENTA,
- [GREP_COLOR_FUNCTION] = "",
- [GREP_COLOR_LINENO] = GIT_COLOR_GREEN,
- [GREP_COLOR_COLUMNNO] = GIT_COLOR_GREEN,
- [GREP_COLOR_MATCH_CONTEXT] = GIT_COLOR_BOLD_RED,
- [GREP_COLOR_MATCH_SELECTED] = GIT_COLOR_BOLD_RED,
- [GREP_COLOR_SELECTED] = "",
- [GREP_COLOR_SEP] = GIT_COLOR_CYAN,
- },
- .only_matching = 0,
- .color = -1,
- .output = std_output,
-};
-
static const char *color_grep_slots[] = {
[GREP_COLOR_CONTEXT] = "context",
[GREP_COLOR_FILENAME] = "filename",
@@ -75,20 +54,12 @@ define_list_config_array_extra(color_grep_slots, {"match"});
*/
int grep_config(const char *var, const char *value, void *cb)
{
- struct grep_opt *opt = &grep_defaults;
+ struct grep_opt *opt = cb;
const char *slot;
if (userdiff_config(var, value) < 0)
return -1;
- /*
- * The instance of grep_opt that we set up here is copied by
- * grep_init() to be used by each individual invocation.
- * When populating a new field of this structure here, be
- * sure to think about ownership -- e.g., you might need to
- * override the shallow copy in grep_init() with a deep copy.
- */
-
if (!strcmp(var, "grep.extendedregexp")) {
opt->extended_regexp_option = git_config_bool(var, value);
return 0;
@@ -134,78 +105,16 @@ int grep_config(const char *var, const char *value, void *cb)
return 0;
}
-/*
- * Initialize one instance of grep_opt and copy the
- * default values from the template we read the configuration
- * information in an earlier call to git_config(grep_config).
- */
-void grep_init(struct grep_opt *opt, struct repository *repo, const char *prefix)
+void grep_init(struct grep_opt *opt, struct repository *repo)
{
- *opt = grep_defaults;
+ struct grep_opt blank = GREP_OPT_INIT;
+ memcpy(opt, &blank, sizeof(*opt));
opt->repo = repo;
- opt->prefix = prefix;
- opt->prefix_length = (prefix && *prefix) ? strlen(prefix) : 0;
opt->pattern_tail = &opt->pattern_list;
opt->header_tail = &opt->header_list;
}
-static void grep_set_pattern_type_option(enum grep_pattern_type pattern_type, struct grep_opt *opt)
-{
- /*
- * When committing to the pattern type by setting the relevant
- * fields in grep_opt it's generally not necessary to zero out
- * the fields we're not choosing, since they won't have been
- * set by anything. The extended_regexp_option field is the
- * only exception to this.
- *
- * This is because in the process of parsing grep.patternType
- * & grep.extendedRegexp we set opt->pattern_type_option and
- * opt->extended_regexp_option, respectively. We then
- * internally use opt->extended_regexp_option to see if we're
- * compiling an ERE. It must be unset if that's not actually
- * the case.
- */
- if (pattern_type != GREP_PATTERN_TYPE_ERE &&
- opt->extended_regexp_option)
- opt->extended_regexp_option = 0;
-
- switch (pattern_type) {
- case GREP_PATTERN_TYPE_UNSPECIFIED:
- /* fall through */
-
- case GREP_PATTERN_TYPE_BRE:
- break;
-
- case GREP_PATTERN_TYPE_ERE:
- opt->extended_regexp_option = 1;
- break;
-
- case GREP_PATTERN_TYPE_FIXED:
- opt->fixed = 1;
- break;
-
- case GREP_PATTERN_TYPE_PCRE:
- opt->pcre2 = 1;
- break;
- }
-}
-
-void grep_commit_pattern_type(enum grep_pattern_type pattern_type, struct grep_opt *opt)
-{
- if (pattern_type != GREP_PATTERN_TYPE_UNSPECIFIED)
- grep_set_pattern_type_option(pattern_type, opt);
- else if (opt->pattern_type_option != GREP_PATTERN_TYPE_UNSPECIFIED)
- grep_set_pattern_type_option(opt->pattern_type_option, opt);
- else if (opt->extended_regexp_option)
- /*
- * This branch *must* happen after setting from the
- * opt->pattern_type_option above, we don't want
- * grep.extendedRegexp to override grep.patternType!
- */
- grep_set_pattern_type_option(GREP_PATTERN_TYPE_ERE, opt);
-}
-
static struct grep_pat *create_grep_pat(const char *pat, size_t patlen,
const char *origin, int no,
enum grep_pat_token t,
@@ -386,7 +295,7 @@ static void compile_pcre2_pattern(struct grep_pat *p, const struct grep_opt *opt
if (!opt->ignore_locale && is_utf8_locale() && !literal)
options |= (PCRE2_UTF | PCRE2_MATCH_INVALID_UTF);
-#ifdef GIT_PCRE2_VERSION_10_36_OR_HIGHER
+#ifndef GIT_PCRE2_VERSION_10_36_OR_HIGHER
/* Work around https://bugs.exim.org/show_bug.cgi?id=2642 fixed in 10.36 */
if (PCRE2_MATCH_INVALID_UTF && options & (PCRE2_UTF | PCRE2_CASELESS))
options |= PCRE2_NO_START_OPTIMIZE;
@@ -523,11 +432,17 @@ static void compile_regexp(struct grep_pat *p, struct grep_opt *opt)
int err;
int regflags = REG_NEWLINE;
+ if (opt->pattern_type_option == GREP_PATTERN_TYPE_UNSPECIFIED)
+ opt->pattern_type_option = (opt->extended_regexp_option
+ ? GREP_PATTERN_TYPE_ERE
+ : GREP_PATTERN_TYPE_BRE);
+
p->word_regexp = opt->word_regexp;
p->ignore_case = opt->ignore_case;
- p->fixed = opt->fixed;
+ p->fixed = opt->pattern_type_option == GREP_PATTERN_TYPE_FIXED;
- if (memchr(p->pattern, 0, p->patternlen) && !opt->pcre2)
+ if (opt->pattern_type_option != GREP_PATTERN_TYPE_PCRE &&
+ memchr(p->pattern, 0, p->patternlen))
die(_("given pattern contains NULL byte (via -f <file>). This is only supported with -P under PCRE v2"));
p->is_fixed = is_fixed(p->pattern, p->patternlen);
@@ -578,14 +493,14 @@ static void compile_regexp(struct grep_pat *p, struct grep_opt *opt)
return;
}
- if (opt->pcre2) {
+ if (opt->pattern_type_option == GREP_PATTERN_TYPE_PCRE) {
compile_pcre2_pattern(p, opt);
return;
}
if (p->ignore_case)
regflags |= REG_ICASE;
- if (opt->extended_regexp_option)
+ if (opt->pattern_type_option == GREP_PATTERN_TYPE_ERE)
regflags |= REG_EXTENDED;
err = regcomp(&p->regexp, p->pattern, regflags);
if (err) {
@@ -595,6 +510,35 @@ static void compile_regexp(struct grep_pat *p, struct grep_opt *opt)
}
}
+static struct grep_expr *grep_not_expr(struct grep_expr *expr)
+{
+ struct grep_expr *z = xcalloc(1, sizeof(*z));
+ z->node = GREP_NODE_NOT;
+ z->u.unary = expr;
+ return z;
+}
+
+static struct grep_expr *grep_binexp(enum grep_expr_node kind,
+ struct grep_expr *left,
+ struct grep_expr *right)
+{
+ struct grep_expr *z = xcalloc(1, sizeof(*z));
+ z->node = kind;
+ z->u.binary.left = left;
+ z->u.binary.right = right;
+ return z;
+}
+
+static struct grep_expr *grep_or_expr(struct grep_expr *left, struct grep_expr *right)
+{
+ return grep_binexp(GREP_NODE_OR, left, right);
+}
+
+static struct grep_expr *grep_and_expr(struct grep_expr *left, struct grep_expr *right)
+{
+ return grep_binexp(GREP_NODE_AND, left, right);
+}
+
static struct grep_expr *compile_pattern_or(struct grep_pat **);
static struct grep_expr *compile_pattern_atom(struct grep_pat **list)
{
@@ -638,12 +582,10 @@ static struct grep_expr *compile_pattern_not(struct grep_pat **list)
if (!p->next)
die("--not not followed by pattern expression");
*list = p->next;
- CALLOC_ARRAY(x, 1);
- x->node = GREP_NODE_NOT;
- x->u.unary = compile_pattern_not(list);
- if (!x->u.unary)
+ x = compile_pattern_not(list);
+ if (!x)
die("--not followed by non pattern expression");
- return x;
+ return grep_not_expr(x);
default:
return compile_pattern_atom(list);
}
@@ -652,7 +594,7 @@ static struct grep_expr *compile_pattern_not(struct grep_pat **list)
static struct grep_expr *compile_pattern_and(struct grep_pat **list)
{
struct grep_pat *p;
- struct grep_expr *x, *y, *z;
+ struct grep_expr *x, *y;
x = compile_pattern_not(list);
p = *list;
@@ -665,11 +607,7 @@ static struct grep_expr *compile_pattern_and(struct grep_pat **list)
y = compile_pattern_and(list);
if (!y)
die("--and not followed by pattern expression");
- CALLOC_ARRAY(z, 1);
- z->node = GREP_NODE_AND;
- z->u.binary.left = x;
- z->u.binary.right = y;
- return z;
+ return grep_and_expr(x, y);
}
return x;
}
@@ -677,7 +615,7 @@ static struct grep_expr *compile_pattern_and(struct grep_pat **list)
static struct grep_expr *compile_pattern_or(struct grep_pat **list)
{
struct grep_pat *p;
- struct grep_expr *x, *y, *z;
+ struct grep_expr *x, *y;
x = compile_pattern_and(list);
p = *list;
@@ -685,11 +623,7 @@ static struct grep_expr *compile_pattern_or(struct grep_pat **list)
y = compile_pattern_or(list);
if (!y)
die("not a pattern expression %s", p->pattern);
- CALLOC_ARRAY(z, 1);
- z->node = GREP_NODE_OR;
- z->u.binary.left = x;
- z->u.binary.right = y;
- return z;
+ return grep_or_expr(x, y);
}
return x;
}
@@ -699,14 +633,6 @@ static struct grep_expr *compile_pattern_expr(struct grep_pat **list)
return compile_pattern_or(list);
}
-static struct grep_expr *grep_not_expr(struct grep_expr *expr)
-{
- struct grep_expr *z = xcalloc(1, sizeof(*z));
- z->node = GREP_NODE_NOT;
- z->u.unary = expr;
- return z;
-}
-
static struct grep_expr *grep_true_expr(void)
{
struct grep_expr *z = xcalloc(1, sizeof(*z));
@@ -714,15 +640,6 @@ static struct grep_expr *grep_true_expr(void)
return z;
}
-static struct grep_expr *grep_or_expr(struct grep_expr *left, struct grep_expr *right)
-{
- struct grep_expr *z = xcalloc(1, sizeof(*z));
- z->node = GREP_NODE_OR;
- z->u.binary.left = left;
- z->u.binary.right = right;
- return z;
-}
-
static struct grep_expr *prep_header_patterns(struct grep_opt *opt)
{
struct grep_pat *p;
diff --git a/grep.h b/grep.h
index 6a1f0ab..c722d25 100644
--- a/grep.h
+++ b/grep.h
@@ -134,9 +134,6 @@ struct grep_opt {
*/
struct repository *repo;
- const char *prefix;
- int prefix_length;
- regex_t regexp;
int linenum;
int columnnum;
int invert;
@@ -146,7 +143,6 @@ struct grep_opt {
int unmatch_name_only;
int count;
int word_regexp;
- int fixed;
int all_match;
int no_body_match;
int body_hit;
@@ -157,7 +153,6 @@ struct grep_opt {
int allow_textconv;
int extended;
int use_reflog_filter;
- int pcre2;
int relative;
int pathname;
int null_following_name;
@@ -167,7 +162,7 @@ struct grep_opt {
int funcname;
int funcbody;
int extended_regexp_option;
- int pattern_type_option;
+ enum grep_pattern_type pattern_type_option;
int ignore_locale;
char colors[NR_GREP_COLORS][COLOR_MAXLEN];
unsigned pre_context;
@@ -182,9 +177,29 @@ struct grep_opt {
void *output_priv;
};
+#define GREP_OPT_INIT { \
+ .relative = 1, \
+ .pathname = 1, \
+ .max_depth = -1, \
+ .pattern_type_option = GREP_PATTERN_TYPE_UNSPECIFIED, \
+ .colors = { \
+ [GREP_COLOR_CONTEXT] = "", \
+ [GREP_COLOR_FILENAME] = GIT_COLOR_MAGENTA, \
+ [GREP_COLOR_FUNCTION] = "", \
+ [GREP_COLOR_LINENO] = GIT_COLOR_GREEN, \
+ [GREP_COLOR_COLUMNNO] = GIT_COLOR_GREEN, \
+ [GREP_COLOR_MATCH_CONTEXT] = GIT_COLOR_BOLD_RED, \
+ [GREP_COLOR_MATCH_SELECTED] = GIT_COLOR_BOLD_RED, \
+ [GREP_COLOR_SELECTED] = "", \
+ [GREP_COLOR_SEP] = GIT_COLOR_CYAN, \
+ }, \
+ .only_matching = 0, \
+ .color = -1, \
+ .output = std_output, \
+}
+
int grep_config(const char *var, const char *value, void *);
-void grep_init(struct grep_opt *, struct repository *repo, const char *prefix);
-void grep_commit_pattern_type(enum grep_pattern_type, struct grep_opt *opt);
+void grep_init(struct grep_opt *, struct repository *repo);
void append_grep_pat(struct grep_opt *opt, const char *pat, size_t patlen, const char *origin, int no, enum grep_pat_token t);
void append_grep_pattern(struct grep_opt *opt, const char *pat, const char *origin, int no, enum grep_pat_token t);
diff --git a/help.c b/help.c
index 7144490..41c41c2 100644
--- a/help.c
+++ b/help.c
@@ -12,6 +12,7 @@
#include "refs.h"
#include "parse-options.h"
#include "prompt.h"
+#include "fsmonitor-ipc.h"
struct category_description {
uint32_t category;
@@ -124,7 +125,9 @@ static void print_cmd_by_category(const struct category_description *catdesc,
uint32_t mask = catdesc[i].category;
const char *desc = catdesc[i].desc;
- printf("\n%s\n", _(desc));
+ if (i)
+ putchar('\n');
+ puts(_(desc));
print_command_list(cmds, mask, longest);
}
free(cmds);
@@ -317,7 +320,7 @@ void list_commands(struct cmdnames *main_cmds, struct cmdnames *other_cmds)
}
if (other_cmds->cnt) {
- printf_ln(_("git commands available from elsewhere on your $PATH"));
+ puts(_("git commands available from elsewhere on your $PATH"));
putchar('\n');
pretty_print_cmdnames(other_cmds, colopts);
putchar('\n');
@@ -327,6 +330,7 @@ void list_commands(struct cmdnames *main_cmds, struct cmdnames *other_cmds)
void list_common_cmds_help(void)
{
puts(_("These are common Git commands used in various situations:"));
+ putchar('\n');
print_cmd_by_category(common_categories, NULL);
}
@@ -432,15 +436,10 @@ static int get_alias(const char *var, const char *value, void *data)
return 0;
}
-void list_all_cmds_help(void)
+static void list_all_cmds_help_external_commands(void)
{
struct string_list others = STRING_LIST_INIT_DUP;
- struct string_list alias_list = STRING_LIST_INIT_DUP;
- struct cmdname_help *aliases;
- int i, longest;
-
- printf_ln(_("See 'git help <command>' to read about a specific subcommand"));
- print_cmd_by_category(main_categories, &longest);
+ int i;
list_all_other_cmds(&others);
if (others.nr)
@@ -448,6 +447,13 @@ void list_all_cmds_help(void)
for (i = 0; i < others.nr; i++)
printf(" %s\n", others.items[i].string);
string_list_clear(&others, 0);
+}
+
+static void list_all_cmds_help_aliases(int longest)
+{
+ struct string_list alias_list = STRING_LIST_INIT_DUP;
+ struct cmdname_help *aliases;
+ int i;
git_config(get_alias, &alias_list);
string_list_sort(&alias_list);
@@ -473,6 +479,20 @@ void list_all_cmds_help(void)
string_list_clear(&alias_list, 1);
}
+void list_all_cmds_help(int show_external_commands, int show_aliases)
+{
+ int longest;
+
+ puts(_("See 'git help <command>' to read about a specific subcommand"));
+ putchar('\n');
+ print_cmd_by_category(main_categories, &longest);
+
+ if (show_external_commands)
+ list_all_cmds_help_external_commands();
+ if (show_aliases)
+ list_all_cmds_help_aliases(longest);
+}
+
int is_in_cmdlist(struct cmdnames *c, const char *s)
{
int i;
@@ -695,6 +715,9 @@ void get_version_info(struct strbuf *buf, int show_build_options)
strbuf_addf(buf, "sizeof-size_t: %d\n", (int)sizeof(size_t));
strbuf_addf(buf, "shell-path: %s\n", SHELL_PATH);
/* NEEDSWORK: also save and output GIT-BUILD_OPTIONS? */
+
+ if (fsmonitor_ipc__is_supported())
+ strbuf_addstr(buf, "feature: fsmonitor--daemon\n");
}
}
diff --git a/help.h b/help.h
index 9d383f1..971a3ad 100644
--- a/help.h
+++ b/help.h
@@ -20,7 +20,7 @@ static inline void mput_char(char c, unsigned int num)
}
void list_common_cmds_help(void);
-void list_all_cmds_help(void);
+void list_all_cmds_help(int show_external_commands, int show_aliases);
void list_guides_help(void);
void list_all_main_cmds(struct string_list *list);
diff --git a/hook.c b/hook.c
index 55e1145..1d51be3 100644
--- a/hook.c
+++ b/hook.c
@@ -1,6 +1,7 @@
#include "cache.h"
#include "hook.h"
#include "run-command.h"
+#include "config.h"
const char *find_hook(const char *name)
{
@@ -40,3 +41,140 @@ int hook_exists(const char *name)
{
return !!find_hook(name);
}
+
+static int pick_next_hook(struct child_process *cp,
+ struct strbuf *out,
+ void *pp_cb,
+ void **pp_task_cb)
+{
+ struct hook_cb_data *hook_cb = pp_cb;
+ const char *hook_path = hook_cb->hook_path;
+
+ if (!hook_path)
+ return 0;
+
+ cp->no_stdin = 1;
+ strvec_pushv(&cp->env_array, hook_cb->options->env.v);
+ cp->stdout_to_stderr = 1;
+ cp->trace2_hook_name = hook_cb->hook_name;
+ cp->dir = hook_cb->options->dir;
+
+ strvec_push(&cp->args, hook_path);
+ strvec_pushv(&cp->args, hook_cb->options->args.v);
+
+ /* Provide context for errors if necessary */
+ *pp_task_cb = (char *)hook_path;
+
+ /*
+ * This pick_next_hook() will be called again, we're only
+ * running one hook, so indicate that no more work will be
+ * done.
+ */
+ hook_cb->hook_path = NULL;
+
+ return 1;
+}
+
+static int notify_start_failure(struct strbuf *out,
+ void *pp_cb,
+ void *pp_task_cp)
+{
+ struct hook_cb_data *hook_cb = pp_cb;
+ const char *hook_path = pp_task_cp;
+
+ hook_cb->rc |= 1;
+
+ strbuf_addf(out, _("Couldn't start hook '%s'\n"),
+ hook_path);
+
+ return 1;
+}
+
+static int notify_hook_finished(int result,
+ struct strbuf *out,
+ void *pp_cb,
+ void *pp_task_cb)
+{
+ struct hook_cb_data *hook_cb = pp_cb;
+ struct run_hooks_opt *opt = hook_cb->options;
+
+ hook_cb->rc |= result;
+
+ if (opt->invoked_hook)
+ *opt->invoked_hook = 1;
+
+ return 0;
+}
+
+static void run_hooks_opt_clear(struct run_hooks_opt *options)
+{
+ strvec_clear(&options->env);
+ strvec_clear(&options->args);
+}
+
+int run_hooks_opt(const char *hook_name, struct run_hooks_opt *options)
+{
+ struct strbuf abs_path = STRBUF_INIT;
+ struct hook_cb_data cb_data = {
+ .rc = 0,
+ .hook_name = hook_name,
+ .options = options,
+ };
+ const char *const hook_path = find_hook(hook_name);
+ int jobs = 1;
+ int ret = 0;
+
+ if (!options)
+ BUG("a struct run_hooks_opt must be provided to run_hooks");
+
+ if (options->invoked_hook)
+ *options->invoked_hook = 0;
+
+ if (!hook_path && !options->error_if_missing)
+ goto cleanup;
+
+ if (!hook_path) {
+ ret = error("cannot find a hook named %s", hook_name);
+ goto cleanup;
+ }
+
+ cb_data.hook_path = hook_path;
+ if (options->dir) {
+ strbuf_add_absolute_path(&abs_path, hook_path);
+ cb_data.hook_path = abs_path.buf;
+ }
+
+ run_processes_parallel_tr2(jobs,
+ pick_next_hook,
+ notify_start_failure,
+ notify_hook_finished,
+ &cb_data,
+ "hook",
+ hook_name);
+ ret = cb_data.rc;
+cleanup:
+ strbuf_release(&abs_path);
+ run_hooks_opt_clear(options);
+ return ret;
+}
+
+int run_hooks(const char *hook_name)
+{
+ struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
+
+ return run_hooks_opt(hook_name, &opt);
+}
+
+int run_hooks_l(const char *hook_name, ...)
+{
+ struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
+ va_list ap;
+ const char *arg;
+
+ va_start(ap, hook_name);
+ while ((arg = va_arg(ap, const char *)))
+ strvec_push(&opt.args, arg);
+ va_end(ap);
+
+ return run_hooks_opt(hook_name, &opt);
+}
diff --git a/hook.h b/hook.h
index 6aa36fc..4258b13 100644
--- a/hook.h
+++ b/hook.h
@@ -1,5 +1,49 @@
#ifndef HOOK_H
#define HOOK_H
+#include "strvec.h"
+
+struct run_hooks_opt
+{
+ /* Environment vars to be set for each hook */
+ struct strvec env;
+
+ /* Args to be passed to each hook */
+ struct strvec args;
+
+ /* Emit an error if the hook is missing */
+ unsigned int error_if_missing:1;
+
+ /**
+ * An optional initial working directory for the hook,
+ * translates to "struct child_process"'s "dir" member.
+ */
+ const char *dir;
+
+ /**
+ * A pointer which if provided will be set to 1 or 0 depending
+ * on if a hook was started, regardless of whether or not that
+ * was successful. I.e. if the underlying start_command() was
+ * successful this will be set to 1.
+ *
+ * Used for avoiding TOCTOU races in code that would otherwise
+ * call hook_exist() after a "maybe hook run" to see if a hook
+ * was invoked.
+ */
+ int *invoked_hook;
+};
+
+#define RUN_HOOKS_OPT_INIT { \
+ .env = STRVEC_INIT, \
+ .args = STRVEC_INIT, \
+}
+
+struct hook_cb_data {
+ /* rc reflects the cumulative failure state */
+ int rc;
+ const char *hook_name;
+ const char *hook_path;
+ struct run_hooks_opt *options;
+};
/*
* Returns the path to the hook file, or NULL if the hook is missing
@@ -13,4 +57,29 @@ const char *find_hook(const char *name);
*/
int hook_exists(const char *hookname);
+/**
+ * Takes a `hook_name`, resolves it to a path with find_hook(), and
+ * runs the hook for you with the options specified in "struct
+ * run_hooks opt". Will free memory associated with the "struct run_hooks_opt".
+ *
+ * Returns the status code of the run hook, or a negative value on
+ * error().
+ */
+int run_hooks_opt(const char *hook_name, struct run_hooks_opt *options);
+
+/**
+ * A wrapper for run_hooks_opt() which provides a dummy "struct
+ * run_hooks_opt" initialized with "RUN_HOOKS_OPT_INIT".
+ */
+int run_hooks(const char *hook_name);
+
+/**
+ * Like run_hooks(), a wrapper for run_hooks_opt().
+ *
+ * In addition to the wrapping behavior provided by run_hooks(), this
+ * wrapper takes a list of strings terminated by a NULL
+ * argument. These things will be used as positional arguments to the
+ * hook. This function behaves like the old run_hook_le() API.
+ */
+int run_hooks_l(const char *hook_name, ...);
#endif
diff --git a/http-backend.c b/http-backend.c
index 807fb88..81a7229 100644
--- a/http-backend.c
+++ b/http-backend.c
@@ -13,6 +13,7 @@
#include "packfile.h"
#include "object-store.h"
#include "protocol.h"
+#include "date.h"
static const char content_type[] = "Content-Type";
static const char content_length[] = "Content-Length";
diff --git a/http-push.c b/http-push.c
index 3309aaf..f0c044d 100644
--- a/http-push.c
+++ b/http-push.c
@@ -363,7 +363,7 @@ static void start_put(struct transfer_request *request)
git_zstream stream;
unpacked = read_object_file(&request->obj->oid, &type, &len);
- hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX , type_name(type), (uintmax_t)len) + 1;
+ hdrlen = format_object_header(hdr, sizeof(hdr), type, len);
/* Set it up */
git_deflate_init(&stream, zlib_compression_level);
diff --git a/ident.c b/ident.c
index 6aba4b5..89ca5b4 100644
--- a/ident.c
+++ b/ident.c
@@ -7,6 +7,7 @@
*/
#include "cache.h"
#include "config.h"
+#include "date.h"
static struct strbuf git_default_name = STRBUF_INIT;
static struct strbuf git_default_email = STRBUF_INIT;
diff --git a/imap-send.c b/imap-send.c
index e6090a0..a50af56 100644
--- a/imap-send.c
+++ b/imap-send.c
@@ -27,7 +27,7 @@
#include "exec-cmd.h"
#include "run-command.h"
#include "parse-options.h"
-#ifdef NO_OPENSSL
+#if defined(NO_OPENSSL) && !defined(HAVE_OPENSSL_CSPRNG)
typedef void *SSL;
#endif
#ifdef USE_CURL_FOR_IMAP_SEND
@@ -98,17 +98,7 @@ struct imap_server_conf {
};
static struct imap_server_conf server = {
- NULL, /* name */
- NULL, /* tunnel */
- NULL, /* host */
- 0, /* port */
- NULL, /* folder */
- NULL, /* user */
- NULL, /* pass */
- 0, /* use_ssl */
- 1, /* ssl_verify */
- 0, /* use_html */
- NULL, /* auth_method */
+ .ssl_verify = 1,
};
struct imap_socket {
diff --git a/list-objects-filter-options.c b/list-objects-filter-options.c
index fd8d59f..4b25287 100644
--- a/list-objects-filter-options.c
+++ b/list-objects-filter-options.c
@@ -40,22 +40,7 @@ const char *list_object_filter_config_name(enum list_objects_filter_choice c)
BUG("list_object_filter_config_name: invalid argument '%d'", c);
}
-/*
- * Parse value of the argument to the "filter" keyword.
- * On the command line this looks like:
- * --filter=<arg>
- * and in the pack protocol as:
- * "filter" SP <arg>
- *
- * The filter keyword will be used by many commands.
- * See Documentation/rev-list-options.txt for allowed values for <arg>.
- *
- * Capture the given arg as the "filter_spec". This can be forwarded to
- * subordinate commands when necessary (although it's better to pass it through
- * expand_list_objects_filter_spec() first). We also "intern" the arg for the
- * convenience of the current command.
- */
-static int gently_parse_list_objects_filter(
+int gently_parse_list_objects_filter(
struct list_objects_filter_options *filter_options,
const char *arg,
struct strbuf *errbuf)
@@ -300,6 +285,10 @@ int opt_parse_list_objects_filter(const struct option *opt,
const char *arg, int unset)
{
struct list_objects_filter_options *filter_options = opt->value;
+ opt_lof_init init = (opt_lof_init)opt->defval;
+
+ if (init)
+ filter_options = init(opt->value);
if (unset || !arg)
list_objects_filter_set_no_filter(filter_options);
@@ -415,3 +404,22 @@ void partial_clone_get_default_filter_spec(
&errbuf);
strbuf_release(&errbuf);
}
+
+void list_objects_filter_copy(
+ struct list_objects_filter_options *dest,
+ const struct list_objects_filter_options *src)
+{
+ int i;
+ struct string_list_item *item;
+
+ /* Copy everything. We will overwrite the pointers shortly. */
+ memcpy(dest, src, sizeof(struct list_objects_filter_options));
+
+ string_list_init_dup(&dest->filter_spec);
+ for_each_string_list_item(item, &src->filter_spec)
+ string_list_append(&dest->filter_spec, item->string);
+
+ ALLOC_ARRAY(dest->sub, dest->sub_alloc);
+ for (i = 0; i < src->sub_nr; i++)
+ list_objects_filter_copy(&dest->sub[i], &src->sub[i]);
+}
diff --git a/list-objects-filter-options.h b/list-objects-filter-options.h
index da5b673..ffc02d7 100644
--- a/list-objects-filter-options.h
+++ b/list-objects-filter-options.h
@@ -69,8 +69,25 @@ struct list_objects_filter_options {
*/
};
-/* Normalized command line arguments */
-#define CL_ARG__FILTER "filter"
+/*
+ * Parse value of the argument to the "filter" keyword.
+ * On the command line this looks like:
+ * --filter=<arg>
+ * and in the pack protocol as:
+ * "filter" SP <arg>
+ *
+ * The filter keyword will be used by many commands.
+ * See Documentation/rev-list-options.txt for allowed values for <arg>.
+ *
+ * Capture the given arg as the "filter_spec". This can be forwarded to
+ * subordinate commands when necessary (although it's better to pass it through
+ * expand_list_objects_filter_spec() first). We also "intern" the arg for the
+ * convenience of the current command.
+ */
+int gently_parse_list_objects_filter(
+ struct list_objects_filter_options *filter_options,
+ const char *arg,
+ struct strbuf *errbuf);
void list_objects_filter_die_if_populated(
struct list_objects_filter_options *filter_options);
@@ -87,13 +104,31 @@ void parse_list_objects_filter(
struct list_objects_filter_options *filter_options,
const char *arg);
+/**
+ * The opt->value to opt_parse_list_objects_filter() is either a
+ * "struct list_objects_filter_option *" when using
+ * OPT_PARSE_LIST_OBJECTS_FILTER().
+ *
+ * Or, if using no "struct option" field is used by the callback,
+ * except the "defval" which is expected to be an "opt_lof_init"
+ * function, which is called with the "opt->value" and must return a
+ * pointer to the ""struct list_objects_filter_option *" to be used.
+ *
+ * The OPT_PARSE_LIST_OBJECTS_FILTER_INIT() can be used e.g. the
+ * "struct list_objects_filter_option" is embedded in a "struct
+ * rev_info", which the "defval" could be tasked with lazily
+ * initializing. See cmd_pack_objects() for an example.
+ */
int opt_parse_list_objects_filter(const struct option *opt,
const char *arg, int unset);
+typedef struct list_objects_filter_options *(*opt_lof_init)(void *);
+#define OPT_PARSE_LIST_OBJECTS_FILTER_INIT(fo, init) \
+ { OPTION_CALLBACK, 0, "filter", (fo), N_("args"), \
+ N_("object filtering"), 0, opt_parse_list_objects_filter, \
+ (intptr_t)(init) }
#define OPT_PARSE_LIST_OBJECTS_FILTER(fo) \
- OPT_CALLBACK(0, CL_ARG__FILTER, fo, N_("args"), \
- N_("object filtering"), \
- opt_parse_list_objects_filter)
+ OPT_PARSE_LIST_OBJECTS_FILTER_INIT((fo), NULL)
/*
* Translates abbreviated numbers in the filter's filter_spec into their
@@ -132,4 +167,8 @@ void partial_clone_get_default_filter_spec(
struct list_objects_filter_options *filter_options,
const char *remote);
+void list_objects_filter_copy(
+ struct list_objects_filter_options *dest,
+ const struct list_objects_filter_options *src);
+
#endif /* LIST_OBJECTS_FILTER_OPTIONS_H */
diff --git a/list-objects.c b/list-objects.c
index 2f623f8..250d9de 100644
--- a/list-objects.c
+++ b/list-objects.c
@@ -21,6 +21,23 @@ struct traversal_context {
struct filter *filter;
};
+static void show_commit(struct traversal_context *ctx,
+ struct commit *commit)
+{
+ if (!ctx->show_commit)
+ return;
+ ctx->show_commit(commit, ctx->show_data);
+}
+
+static void show_object(struct traversal_context *ctx,
+ struct object *object,
+ const char *name)
+{
+ if (!ctx->show_object)
+ return;
+ ctx->show_object(object, name, ctx->show_data);
+}
+
static void process_blob(struct traversal_context *ctx,
struct blob *blob,
struct strbuf *path,
@@ -60,7 +77,7 @@ static void process_blob(struct traversal_context *ctx,
if (r & LOFR_MARK_SEEN)
obj->flags |= SEEN;
if (r & LOFR_DO_SHOW)
- ctx->show_object(obj, path->buf, ctx->show_data);
+ show_object(ctx, obj, path->buf);
strbuf_setlen(path, pathlen);
}
@@ -194,7 +211,7 @@ static void process_tree(struct traversal_context *ctx,
if (r & LOFR_MARK_SEEN)
obj->flags |= SEEN;
if (r & LOFR_DO_SHOW)
- ctx->show_object(obj, base->buf, ctx->show_data);
+ show_object(ctx, obj, base->buf);
if (base->len)
strbuf_addch(base, '/');
@@ -210,7 +227,7 @@ static void process_tree(struct traversal_context *ctx,
if (r & LOFR_MARK_SEEN)
obj->flags |= SEEN;
if (r & LOFR_DO_SHOW)
- ctx->show_object(obj, base->buf, ctx->show_data);
+ show_object(ctx, obj, base->buf);
strbuf_setlen(base, baselen);
free_tree_buffer(tree);
@@ -228,7 +245,7 @@ static void process_tag(struct traversal_context *ctx,
if (r & LOFR_MARK_SEEN)
tag->object.flags |= SEEN;
if (r & LOFR_DO_SHOW)
- ctx->show_object(&tag->object, name, ctx->show_data);
+ show_object(ctx, &tag->object, name);
}
static void mark_edge_parents_uninteresting(struct commit *commit,
@@ -402,7 +419,7 @@ static void do_traverse(struct traversal_context *ctx)
if (r & LOFR_MARK_SEEN)
commit->object.flags |= SEEN;
if (r & LOFR_DO_SHOW)
- ctx->show_commit(commit, ctx->show_data);
+ show_commit(ctx, commit);
if (ctx->revs->tree_blobs_in_commit_order)
/*
@@ -416,35 +433,25 @@ static void do_traverse(struct traversal_context *ctx)
strbuf_release(&csp);
}
-void traverse_commit_list(struct rev_info *revs,
- show_commit_fn show_commit,
- show_object_fn show_object,
- void *show_data)
-{
- struct traversal_context ctx;
- ctx.revs = revs;
- ctx.show_commit = show_commit;
- ctx.show_object = show_object;
- ctx.show_data = show_data;
- ctx.filter = NULL;
- do_traverse(&ctx);
-}
-
void traverse_commit_list_filtered(
- struct list_objects_filter_options *filter_options,
struct rev_info *revs,
show_commit_fn show_commit,
show_object_fn show_object,
void *show_data,
struct oidset *omitted)
{
- struct traversal_context ctx;
+ struct traversal_context ctx = {
+ .revs = revs,
+ .show_object = show_object,
+ .show_commit = show_commit,
+ .show_data = show_data,
+ };
+
+ if (revs->filter.choice)
+ ctx.filter = list_objects_filter__init(omitted, &revs->filter);
- ctx.revs = revs;
- ctx.show_object = show_object;
- ctx.show_commit = show_commit;
- ctx.show_data = show_data;
- ctx.filter = list_objects_filter__init(omitted, filter_options);
do_traverse(&ctx);
- list_objects_filter__free(ctx.filter);
+
+ if (ctx.filter)
+ list_objects_filter__free(ctx.filter);
}
diff --git a/list-objects.h b/list-objects.h
index a952680..9eaf4de 100644
--- a/list-objects.h
+++ b/list-objects.h
@@ -7,7 +7,6 @@ struct rev_info;
typedef void (*show_commit_fn)(struct commit *, void *);
typedef void (*show_object_fn)(struct object *, const char *, void *);
-void traverse_commit_list(struct rev_info *, show_commit_fn, show_object_fn, void *);
typedef void (*show_edge_fn)(struct commit *);
void mark_edges_uninteresting(struct rev_info *revs,
@@ -18,11 +17,20 @@ struct oidset;
struct list_objects_filter_options;
void traverse_commit_list_filtered(
- struct list_objects_filter_options *filter_options,
struct rev_info *revs,
show_commit_fn show_commit,
show_object_fn show_object,
void *show_data,
struct oidset *omitted);
+static inline void traverse_commit_list(
+ struct rev_info *revs,
+ show_commit_fn show_commit,
+ show_object_fn show_object,
+ void *show_data)
+{
+ traverse_commit_list_filtered(revs, show_commit,
+ show_object, show_data, NULL);
+}
+
#endif /* LIST_OBJECTS_H */
diff --git a/ll-merge.c b/ll-merge.c
index 2616575..a937cec 100644
--- a/ll-merge.c
+++ b/ll-merge.c
@@ -14,7 +14,7 @@
struct ll_merge_driver;
-typedef int (*ll_merge_fn)(const struct ll_merge_driver *,
+typedef enum ll_merge_result (*ll_merge_fn)(const struct ll_merge_driver *,
mmbuffer_t *result,
const char *path,
mmfile_t *orig, const char *orig_name,
@@ -49,7 +49,7 @@ void reset_merge_attributes(void)
/*
* Built-in low-levels
*/
-static int ll_binary_merge(const struct ll_merge_driver *drv_unused,
+static enum ll_merge_result ll_binary_merge(const struct ll_merge_driver *drv_unused,
mmbuffer_t *result,
const char *path,
mmfile_t *orig, const char *orig_name,
@@ -58,6 +58,7 @@ static int ll_binary_merge(const struct ll_merge_driver *drv_unused,
const struct ll_merge_options *opts,
int marker_size)
{
+ enum ll_merge_result ret;
mmfile_t *stolen;
assert(opts);
@@ -68,16 +69,19 @@ static int ll_binary_merge(const struct ll_merge_driver *drv_unused,
*/
if (opts->virtual_ancestor) {
stolen = orig;
+ ret = LL_MERGE_OK;
} else {
switch (opts->variant) {
default:
- warning("Cannot merge binary files: %s (%s vs. %s)",
- path, name1, name2);
- /* fallthru */
+ ret = LL_MERGE_BINARY_CONFLICT;
+ stolen = src1;
+ break;
case XDL_MERGE_FAVOR_OURS:
+ ret = LL_MERGE_OK;
stolen = src1;
break;
case XDL_MERGE_FAVOR_THEIRS:
+ ret = LL_MERGE_OK;
stolen = src2;
break;
}
@@ -87,16 +91,10 @@ static int ll_binary_merge(const struct ll_merge_driver *drv_unused,
result->size = stolen->size;
stolen->ptr = NULL;
- /*
- * With -Xtheirs or -Xours, we have cleanly merged;
- * otherwise we got a conflict.
- */
- return opts->variant == XDL_MERGE_FAVOR_OURS ||
- opts->variant == XDL_MERGE_FAVOR_THEIRS ?
- 0 : 1;
+ return ret;
}
-static int ll_xdl_merge(const struct ll_merge_driver *drv_unused,
+static enum ll_merge_result ll_xdl_merge(const struct ll_merge_driver *drv_unused,
mmbuffer_t *result,
const char *path,
mmfile_t *orig, const char *orig_name,
@@ -105,7 +103,9 @@ static int ll_xdl_merge(const struct ll_merge_driver *drv_unused,
const struct ll_merge_options *opts,
int marker_size)
{
+ enum ll_merge_result ret;
xmparam_t xmp;
+ int status;
assert(opts);
if (orig->size > MAX_XDIFF_SIZE ||
@@ -133,10 +133,12 @@ static int ll_xdl_merge(const struct ll_merge_driver *drv_unused,
xmp.ancestor = orig_name;
xmp.file1 = name1;
xmp.file2 = name2;
- return xdl_merge(orig, src1, src2, &xmp, result);
+ status = xdl_merge(orig, src1, src2, &xmp, result);
+ ret = (status > 0) ? LL_MERGE_CONFLICT : status;
+ return ret;
}
-static int ll_union_merge(const struct ll_merge_driver *drv_unused,
+static enum ll_merge_result ll_union_merge(const struct ll_merge_driver *drv_unused,
mmbuffer_t *result,
const char *path,
mmfile_t *orig, const char *orig_name,
@@ -178,7 +180,7 @@ static void create_temp(mmfile_t *src, char *path, size_t len)
/*
* User defined low-level merge driver support.
*/
-static int ll_ext_merge(const struct ll_merge_driver *fn,
+static enum ll_merge_result ll_ext_merge(const struct ll_merge_driver *fn,
mmbuffer_t *result,
const char *path,
mmfile_t *orig, const char *orig_name,
@@ -194,6 +196,7 @@ static int ll_ext_merge(const struct ll_merge_driver *fn,
const char *args[] = { NULL, NULL };
int status, fd, i;
struct stat st;
+ enum ll_merge_result ret;
assert(opts);
sq_quote_buf(&path_sq, path);
@@ -236,7 +239,8 @@ static int ll_ext_merge(const struct ll_merge_driver *fn,
unlink_or_warn(temp[i]);
strbuf_release(&cmd);
strbuf_release(&path_sq);
- return status;
+ ret = (status > 0) ? LL_MERGE_CONFLICT : status;
+ return ret;
}
/*
@@ -362,7 +366,7 @@ static void normalize_file(mmfile_t *mm, const char *path, struct index_state *i
}
}
-int ll_merge(mmbuffer_t *result_buf,
+enum ll_merge_result ll_merge(mmbuffer_t *result_buf,
const char *path,
mmfile_t *ancestor, const char *ancestor_label,
mmfile_t *ours, const char *our_label,
diff --git a/ll-merge.h b/ll-merge.h
index aceb1b2..e4a20e8 100644
--- a/ll-merge.h
+++ b/ll-merge.h
@@ -82,13 +82,20 @@ struct ll_merge_options {
long xdl_opts;
};
+enum ll_merge_result {
+ LL_MERGE_ERROR = -1,
+ LL_MERGE_OK = 0,
+ LL_MERGE_CONFLICT,
+ LL_MERGE_BINARY_CONFLICT,
+};
+
/**
* Perform a three-way single-file merge in core. This is a thin wrapper
* around `xdl_merge` that takes the path and any merge backend specified in
* `.gitattributes` or `.git/info/attributes` into account.
* Returns 0 for a clean merge.
*/
-int ll_merge(mmbuffer_t *result_buf,
+enum ll_merge_result ll_merge(mmbuffer_t *result_buf,
const char *path,
mmfile_t *ancestor, const char *ancestor_label,
mmfile_t *ours, const char *our_label,
diff --git a/log-tree.c b/log-tree.c
index d3e7a40..38e5ccc 100644
--- a/log-tree.c
+++ b/log-tree.c
@@ -1,12 +1,15 @@
#include "cache.h"
+#include "commit-reach.h"
#include "config.h"
#include "diff.h"
#include "object-store.h"
#include "repository.h"
+#include "tmp-objdir.h"
#include "commit.h"
#include "tag.h"
#include "graph.h"
#include "log-tree.h"
+#include "merge-ort.h"
#include "reflog-walk.h"
#include "refs.h"
#include "string-list.h"
@@ -16,6 +19,7 @@
#include "line-log.h"
#include "help.h"
#include "range-diff.h"
+#include "strmap.h"
static struct decoration name_decoration = { "object names" };
static int decoration_loaded;
@@ -561,7 +565,7 @@ static int show_one_mergetag(struct commit *commit,
struct strbuf signature = STRBUF_INIT;
hash_object_file(the_hash_algo, extra->value, extra->len,
- type_name(OBJ_TAG), &oid);
+ OBJ_TAG, &oid);
tag = lookup_tag(the_repository, &oid);
if (!tag)
return -1; /* error message already given */
@@ -849,7 +853,7 @@ int log_tree_diff_flush(struct rev_info *opt)
opt->shown_dashes = 0;
diffcore_std(&opt->diffopt);
- if (diff_queue_is_empty()) {
+ if (diff_queue_is_empty(&opt->diffopt)) {
int saved_fmt = opt->diffopt.output_format;
opt->diffopt.output_format = DIFF_FORMAT_NO_OUTPUT;
diff_flush(&opt->diffopt);
@@ -904,6 +908,106 @@ static int do_diff_combined(struct rev_info *opt, struct commit *commit)
return !opt->loginfo;
}
+static void setup_additional_headers(struct diff_options *o,
+ struct strmap *all_headers)
+{
+ struct hashmap_iter iter;
+ struct strmap_entry *entry;
+
+ /*
+ * Make o->additional_path_headers contain the subset of all_headers
+ * that match o->pathspec. If there aren't any that match o->pathspec,
+ * then make o->additional_path_headers be NULL.
+ */
+
+ if (!o->pathspec.nr) {
+ o->additional_path_headers = all_headers;
+ return;
+ }
+
+ o->additional_path_headers = xmalloc(sizeof(struct strmap));
+ strmap_init_with_options(o->additional_path_headers, NULL, 0);
+ strmap_for_each_entry(all_headers, &iter, entry) {
+ if (match_pathspec(the_repository->index, &o->pathspec,
+ entry->key, strlen(entry->key),
+ 0 /* prefix */, NULL /* seen */,
+ 0 /* is_dir */))
+ strmap_put(o->additional_path_headers,
+ entry->key, entry->value);
+ }
+ if (!strmap_get_size(o->additional_path_headers)) {
+ strmap_clear(o->additional_path_headers, 0);
+ FREE_AND_NULL(o->additional_path_headers);
+ }
+}
+
+static void cleanup_additional_headers(struct diff_options *o)
+{
+ if (!o->pathspec.nr) {
+ o->additional_path_headers = NULL;
+ return;
+ }
+ if (!o->additional_path_headers)
+ return;
+
+ strmap_clear(o->additional_path_headers, 0);
+ FREE_AND_NULL(o->additional_path_headers);
+}
+
+static int do_remerge_diff(struct rev_info *opt,
+ struct commit_list *parents,
+ struct object_id *oid,
+ struct commit *commit)
+{
+ struct merge_options o;
+ struct commit_list *bases;
+ struct merge_result res = {0};
+ struct pretty_print_context ctx = {0};
+ struct commit *parent1 = parents->item;
+ struct commit *parent2 = parents->next->item;
+ struct strbuf parent1_desc = STRBUF_INIT;
+ struct strbuf parent2_desc = STRBUF_INIT;
+
+ /* Setup merge options */
+ init_merge_options(&o, the_repository);
+ o.show_rename_progress = 0;
+ o.record_conflict_msgs_as_headers = 1;
+ o.msg_header_prefix = "remerge";
+
+ ctx.abbrev = DEFAULT_ABBREV;
+ format_commit_message(parent1, "%h (%s)", &parent1_desc, &ctx);
+ format_commit_message(parent2, "%h (%s)", &parent2_desc, &ctx);
+ o.branch1 = parent1_desc.buf;
+ o.branch2 = parent2_desc.buf;
+
+ /* Parse the relevant commits and get the merge bases */
+ parse_commit_or_die(parent1);
+ parse_commit_or_die(parent2);
+ bases = get_merge_bases(parent1, parent2);
+
+ /* Re-merge the parents */
+ merge_incore_recursive(&o, bases, parent1, parent2, &res);
+
+ /* Show the diff */
+ setup_additional_headers(&opt->diffopt, res.path_messages);
+ diff_tree_oid(&res.tree->object.oid, oid, "", &opt->diffopt);
+ log_tree_diff_flush(opt);
+
+ /* Cleanup */
+ cleanup_additional_headers(&opt->diffopt);
+ strbuf_release(&parent1_desc);
+ strbuf_release(&parent2_desc);
+ merge_finalize(&o, &res);
+
+ /* Clean up the contents of the temporary object directory */
+ if (opt->remerge_objdir)
+ tmp_objdir_discard_objects(opt->remerge_objdir);
+ else
+ BUG("did a remerge diff without remerge_objdir?!?");
+
+ return !opt->loginfo;
+}
+
/*
* Show the diff of a commit.
*
@@ -938,6 +1042,18 @@ static int log_tree_diff(struct rev_info *opt, struct commit *commit, struct log
}
if (is_merge) {
+ int octopus = (parents->next->next != NULL);
+
+ if (opt->remerge_diff) {
+ if (octopus) {
+ show_log(opt);
+ fprintf(opt->diffopt.file,
+ "diff: warning: Skipping remerge-diff "
+ "for octopus merges.\n");
+ return 1;
+ }
+ return do_remerge_diff(opt, parents, oid, commit);
+ }
if (opt->combine_merges)
return do_diff_combined(opt, commit);
if (opt->separate_merges) {
diff --git a/ls-refs.c b/ls-refs.c
index 5407832..98e6937 100644
--- a/ls-refs.c
+++ b/ls-refs.c
@@ -34,7 +34,8 @@ static void ensure_config_read(void)
} else if (!strcmp(str, "ignore")) {
/* do nothing */
} else {
- die(_("invalid value '%s' for lsrefs.unborn"), str);
+ die(_("invalid value for '%s': '%s'"),
+ "lsrefs.unborn", str);
}
}
config_read = 1;
diff --git a/mailmap.c b/mailmap.c
index 40ce152..7befdc5 100644
--- a/mailmap.c
+++ b/mailmap.c
@@ -43,8 +43,8 @@ static void free_mailmap_info(void *p, const char *s)
static void free_mailmap_entry(void *p, const char *s)
{
struct mailmap_entry *me = (struct mailmap_entry *)p;
- debug_mm("mailmap: removing entries for <%s>, with %d sub-entries\n",
- s, me->namemap.nr);
+ debug_mm("mailmap: removing entries for <%s>, with %"PRIuMAX" sub-entries\n",
+ s, (uintmax_t)me->namemap.nr);
debug_mm("mailmap: - simple: '%s' <%s>\n",
debug_str(me->name), debug_str(me->email));
@@ -250,7 +250,8 @@ int read_mailmap(struct string_list *map)
void clear_mailmap(struct string_list *map)
{
- debug_mm("mailmap: clearing %d entries...\n", map->nr);
+ debug_mm("mailmap: clearing %"PRIuMAX" entries...\n",
+ (uintmax_t)map->nr);
map->strdup_strings = 1;
string_list_clear_func(map, free_mailmap_entry);
debug_mm("mailmap: cleared\n");
diff --git a/match-trees.c b/match-trees.c
index df41398..49398e5 100644
--- a/match-trees.c
+++ b/match-trees.c
@@ -235,7 +235,7 @@ static int splice_tree(const struct object_id *oid1, const char *prefix,
rewrite_with = oid2;
}
hashcpy(rewrite_here, rewrite_with->hash);
- status = write_object_file(buf, sz, tree_type, result);
+ status = write_object_file(buf, sz, OBJ_TREE, result);
free(buf);
return status;
}
diff --git a/mem-pool.c b/mem-pool.c
index ccdcad2..599d8e8 100644
--- a/mem-pool.c
+++ b/mem-pool.c
@@ -8,6 +8,26 @@
#define BLOCK_GROWTH_SIZE (1024 * 1024 - sizeof(struct mp_block))
/*
+ * The inner union is an approximation for C11's max_align_t, and the
+ * struct + offsetof computes _Alignof. This can all just be replaced
+ * with _Alignof(max_align_t) if/when C11 is part of the baseline.
+ * Note that _Alignof(X) need not be the same as sizeof(X); it's only
+ * required to be a (possibly trivial) factor. They are the same for
+ * most architectures, but m68k for example has only 2-byte alignment
+ * for its 4-byte and 8-byte types, so using sizeof would waste space.
+ *
+ * Add more types to the union if the current set is insufficient.
+ */
+struct git_max_alignment {
+ char unalign;
+ union {
+ uintmax_t max_align_uintmax;
+ void *max_align_pointer;
+ } aligned;
+};
+#define GIT_MAX_ALIGNMENT offsetof(struct git_max_alignment, aligned)
+
+/*
* Allocate a new mp_block and insert it after the block specified in
* `insert_after`. If `insert_after` is NULL, then insert block at the
* head of the linked list.
@@ -69,9 +89,9 @@ void *mem_pool_alloc(struct mem_pool *pool, size_t len)
struct mp_block *p = NULL;
void *r;
- /* round up to a 'uintmax_t' alignment */
- if (len & (sizeof(uintmax_t) - 1))
- len += sizeof(uintmax_t) - (len & (sizeof(uintmax_t) - 1));
+ /* round up to a 'GIT_MAX_ALIGNMENT' alignment */
+ if (len & (GIT_MAX_ALIGNMENT - 1))
+ len += GIT_MAX_ALIGNMENT - (len & (GIT_MAX_ALIGNMENT - 1));
if (pool->mp_block &&
pool->mp_block->end - pool->mp_block->next_free >= len)
diff --git a/merge-blobs.c b/merge-blobs.c
index ee0a0e9..8138090 100644
--- a/merge-blobs.c
+++ b/merge-blobs.c
@@ -36,7 +36,7 @@ static void *three_way_filemerge(struct index_state *istate,
mmfile_t *their,
unsigned long *size)
{
- int merge_status;
+ enum ll_merge_result merge_status;
mmbuffer_t res;
/*
@@ -50,6 +50,9 @@ static void *three_way_filemerge(struct index_state *istate,
istate, NULL);
if (merge_status < 0)
return NULL;
+ if (merge_status == LL_MERGE_BINARY_CONFLICT)
+ warning("Cannot merge binary files: %s (%s vs. %s)",
+ path, ".our", ".their");
*size = res.size;
return res.ptr;
diff --git a/merge-ort.c b/merge-ort.c
index c319797..8545354 100644
--- a/merge-ort.c
+++ b/merge-ort.c
@@ -634,17 +634,57 @@ static void path_msg(struct merge_options *opt,
const char *fmt, ...)
{
va_list ap;
- struct strbuf *sb = strmap_get(&opt->priv->output, path);
+ struct strbuf *sb, *dest;
+ struct strbuf tmp = STRBUF_INIT;
+
+ if (opt->record_conflict_msgs_as_headers && omittable_hint)
+ return; /* Do not record mere hints in headers */
+ if (opt->priv->call_depth && opt->verbosity < 5)
+ return; /* Ignore messages from inner merges */
+
+ sb = strmap_get(&opt->priv->output, path);
if (!sb) {
sb = xmalloc(sizeof(*sb));
strbuf_init(sb, 0);
strmap_put(&opt->priv->output, path, sb);
}
+ dest = (opt->record_conflict_msgs_as_headers ? &tmp : sb);
+
va_start(ap, fmt);
- strbuf_vaddf(sb, fmt, ap);
+ if (opt->priv->call_depth) {
+ strbuf_addchars(dest, ' ', 2);
+ strbuf_addstr(dest, "From inner merge:");
+ strbuf_addchars(dest, ' ', opt->priv->call_depth * 2);
+ }
+ strbuf_vaddf(dest, fmt, ap);
va_end(ap);
+ if (opt->record_conflict_msgs_as_headers) {
+ int i_sb = 0, i_tmp = 0;
+
+ /* Start with the specified prefix */
+ if (opt->msg_header_prefix)
+ strbuf_addf(sb, "%s ", opt->msg_header_prefix);
+
+ /* Copy tmp to sb, adding spaces after newlines */
+ strbuf_grow(sb, sb->len + 2*tmp.len); /* more than sufficient */
+ for (; i_tmp < tmp.len; i_tmp++, i_sb++) {
+ /* Copy next character from tmp to sb */
+ sb->buf[sb->len + i_sb] = tmp.buf[i_tmp];
+
+ /* If we copied a newline, add a space */
+ if (tmp.buf[i_tmp] == '\n')
+ sb->buf[++i_sb] = ' ';
+ }
+ /* Update length and ensure it's NUL-terminated */
+ sb->len += i_sb;
+ sb->buf[sb->len] = '\0';
+
+ strbuf_release(&tmp);
+ }
+
+ /* Add final newline character to sb */
strbuf_addch(sb, '\n');
}
@@ -688,13 +728,15 @@ static void add_flattened_path(struct strbuf *out, const char *s)
out->buf[i] = '_';
}
-static char *unique_path(struct strmap *existing_paths,
+static char *unique_path(struct merge_options *opt,
const char *path,
const char *branch)
{
+ char *ret = NULL;
struct strbuf newpath = STRBUF_INIT;
int suffix = 0;
size_t base_len;
+ struct strmap *existing_paths = &opt->priv->paths;
strbuf_addf(&newpath, "%s~", path);
add_flattened_path(&newpath, branch);
@@ -705,7 +747,11 @@ static char *unique_path(struct strmap *existing_paths,
strbuf_addf(&newpath, "_%d", suffix++);
}
- return strbuf_detach(&newpath, NULL);
+ /* Track the new path in our memory pool */
+ ret = mem_pool_alloc(&opt->priv->pool, newpath.len + 1);
+ memcpy(ret, newpath.buf, newpath.len + 1);
+ strbuf_release(&newpath);
+ return ret;
}
/*** Function Grouping: functions related to collect_merge_info() ***/
@@ -1743,7 +1789,7 @@ static int merge_3way(struct merge_options *opt,
mmfile_t orig, src1, src2;
struct ll_merge_options ll_opts = {0};
char *base, *name1, *name2;
- int merge_status;
+ enum ll_merge_result merge_status;
if (!opt->priv->attr_index.initialized)
initialize_attr_index(opt);
@@ -1787,6 +1833,10 @@ static int merge_3way(struct merge_options *opt,
merge_status = ll_merge(result_buf, path, &orig, base,
&src1, name1, &src2, name2,
&opt->priv->attr_index, &ll_opts);
+ if (merge_status == LL_MERGE_BINARY_CONFLICT)
+ path_msg(opt, path, 0,
+ "warning: Cannot merge binary files: %s (%s vs. %s)",
+ path, name1, name2);
free(base);
free(name1);
@@ -1888,7 +1938,7 @@ static int handle_content_merge(struct merge_options *opt,
if (!ret &&
write_object_file(result_buf.ptr, result_buf.size,
- blob_type, &result->oid))
+ OBJ_BLOB, &result->oid))
ret = err(opt, _("Unable to add %s to database"),
path);
@@ -2416,7 +2466,7 @@ static void apply_directory_rename_modifications(struct merge_options *opt,
*/
ci->path_conflict = 1;
if (pair->status == 'A')
- path_msg(opt, new_path, 0,
+ path_msg(opt, new_path, 1,
_("CONFLICT (file location): %s added in %s "
"inside a directory that was renamed in %s, "
"suggesting it should perhaps be moved to "
@@ -2424,7 +2474,7 @@ static void apply_directory_rename_modifications(struct merge_options *opt,
old_path, branch_with_new_path,
branch_with_dir_rename, new_path);
else
- path_msg(opt, new_path, 0,
+ path_msg(opt, new_path, 1,
_("CONFLICT (file location): %s renamed to %s "
"in %s, inside a directory that was renamed "
"in %s, suggesting it should perhaps be "
@@ -3048,18 +3098,21 @@ static int detect_and_process_renames(struct merge_options *opt,
struct tree *side1,
struct tree *side2)
{
- struct diff_queue_struct combined;
+ struct diff_queue_struct combined = { 0 };
struct rename_info *renames = &opt->priv->renames;
- int need_dir_renames, s, clean = 1;
+ int need_dir_renames, s, i, clean = 1;
unsigned detection_run = 0;
- memset(&combined, 0, sizeof(combined));
if (!possible_renames(renames))
goto cleanup;
trace2_region_enter("merge", "regular renames", opt->repo);
detection_run |= detect_regular_renames(opt, MERGE_SIDE1);
detection_run |= detect_regular_renames(opt, MERGE_SIDE2);
+ if (renames->needed_limit) {
+ renames->cached_pairs_valid_side = 0;
+ renames->redo_after_renames = 0;
+ }
if (renames->redo_after_renames && detection_run) {
int i, side;
struct diff_filepair *p;
@@ -3133,13 +3186,9 @@ simple_cleanup:
free(renames->pairs[s].queue);
DIFF_QUEUE_CLEAR(&renames->pairs[s]);
}
- if (combined.nr) {
- int i;
- for (i = 0; i < combined.nr; i++)
- pool_diff_free_filepair(&opt->priv->pool,
- combined.queue[i]);
- free(combined.queue);
- }
+ for (i = 0; i < combined.nr; i++)
+ pool_diff_free_filepair(&opt->priv->pool, combined.queue[i]);
+ free(combined.queue);
return clean;
}
@@ -3343,7 +3392,7 @@ static void write_tree(struct object_id *result_oid,
}
/* Write this object file out, and record in result_oid */
- write_object_file(buf.buf, buf.len, tree_type, result_oid);
+ write_object_file(buf.buf, buf.len, OBJ_TREE, result_oid);
strbuf_release(&buf);
}
@@ -3637,7 +3686,7 @@ static void process_entry(struct merge_options *opt,
*/
df_file_index = (ci->dirmask & (1 << 1)) ? 2 : 1;
branch = (df_file_index == 1) ? opt->branch1 : opt->branch2;
- path = unique_path(&opt->priv->paths, path, branch);
+ path = unique_path(opt, path, branch);
strmap_put(&opt->priv->paths, path, new_ci);
path_msg(opt, path, 0,
@@ -3762,14 +3811,12 @@ static void process_entry(struct merge_options *opt,
/* Insert entries into opt->priv_paths */
assert(rename_a || rename_b);
if (rename_a) {
- a_path = unique_path(&opt->priv->paths,
- path, opt->branch1);
+ a_path = unique_path(opt, path, opt->branch1);
strmap_put(&opt->priv->paths, a_path, ci);
}
if (rename_b)
- b_path = unique_path(&opt->priv->paths,
- path, opt->branch2);
+ b_path = unique_path(opt, path, opt->branch2);
else
b_path = path;
strmap_put(&opt->priv->paths, b_path, new_ci);
@@ -4017,8 +4064,8 @@ static void process_entries(struct merge_options *opt,
trace2_region_enter("merge", "process_entries cleanup", opt->repo);
if (dir_metadata.offsets.nr != 1 ||
(uintptr_t)dir_metadata.offsets.items[0].util != 0) {
- printf("dir_metadata.offsets.nr = %d (should be 1)\n",
- dir_metadata.offsets.nr);
+ printf("dir_metadata.offsets.nr = %"PRIuMAX" (should be 1)\n",
+ (uintmax_t)dir_metadata.offsets.nr);
printf("dir_metadata.offsets.items[0].util = %u (should be 0)\n",
(unsigned)(uintptr_t)dir_metadata.offsets.items[0].util);
fflush(stdout);
@@ -4157,7 +4204,7 @@ static int record_conflicted_index_entries(struct merge_options *opt)
struct stat st;
if (!lstat(path, &st)) {
- char *new_name = unique_path(&opt->priv->paths,
+ char *new_name = unique_path(opt,
path,
"cruft");
@@ -4165,7 +4212,6 @@ static int record_conflicted_index_entries(struct merge_options *opt)
_("Note: %s not up to date and in way of checking out conflicted version; old copy renamed to %s"),
path, new_name);
errs |= rename(path, new_name);
- free(new_name);
}
errs |= checkout_entry(ce, &state, NULL, NULL);
}
@@ -4255,6 +4301,9 @@ void merge_switch_to_result(struct merge_options *opt,
struct string_list olist = STRING_LIST_INIT_NODUP;
int i;
+ if (opt->record_conflict_msgs_as_headers)
+ BUG("Either display conflict messages or record them as headers, not both");
+
trace2_region_enter("merge", "display messages", opt->repo);
/* Hack to pre-allocate olist to the desired size */
@@ -4356,6 +4405,9 @@ static void merge_start(struct merge_options *opt, struct merge_result *result)
assert(opt->recursive_variant >= MERGE_VARIANT_NORMAL &&
opt->recursive_variant <= MERGE_VARIANT_THEIRS);
+ if (opt->msg_header_prefix)
+ assert(opt->record_conflict_msgs_as_headers);
+
/*
* detect_renames, verbosity, buffer_output, and obuf are ignored
* fields that were used by "recursive" rather than "ort" -- but
@@ -4556,6 +4608,7 @@ redo:
trace2_region_leave("merge", "process_entries", opt->repo);
/* Set return values */
+ result->path_messages = &opt->priv->output;
result->tree = parse_tree_indirect(&working_tree_oid);
/* existence of conflicted entries implies unclean */
result->clean &= strmap_empty(&opt->priv->conflicted);
@@ -4575,7 +4628,7 @@ static void merge_ort_internal(struct merge_options *opt,
struct commit *h2,
struct merge_result *result)
{
- struct commit_list *iter;
+ struct commit *next;
struct commit *merged_merge_bases;
const char *ancestor_name;
struct strbuf merge_base_abbrev = STRBUF_INIT;
@@ -4604,7 +4657,8 @@ static void merge_ort_internal(struct merge_options *opt,
ancestor_name = merge_base_abbrev.buf;
}
- for (iter = merge_bases; iter; iter = iter->next) {
+ for (next = pop_commit(&merge_bases); next;
+ next = pop_commit(&merge_bases)) {
const char *saved_b1, *saved_b2;
struct commit *prev = merged_merge_bases;
@@ -4621,7 +4675,7 @@ static void merge_ort_internal(struct merge_options *opt,
saved_b2 = opt->branch2;
opt->branch1 = "Temporary merge branch 1";
opt->branch2 = "Temporary merge branch 2";
- merge_ort_internal(opt, NULL, prev, iter->item, result);
+ merge_ort_internal(opt, NULL, prev, next, result);
if (result->clean < 0)
return;
opt->branch1 = saved_b1;
@@ -4632,8 +4686,7 @@ static void merge_ort_internal(struct merge_options *opt,
result->tree,
"merged tree");
commit_list_insert(prev, &merged_merge_bases->parents);
- commit_list_insert(iter->item,
- &merged_merge_bases->parents->next);
+ commit_list_insert(next, &merged_merge_bases->parents->next);
clear_or_reinit_internal_opts(opt->priv, 1);
}
diff --git a/merge-ort.h b/merge-ort.h
index c011864..fe599b8 100644
--- a/merge-ort.h
+++ b/merge-ort.h
@@ -5,6 +5,7 @@
struct commit;
struct tree;
+struct strmap;
struct merge_result {
/*
@@ -24,6 +25,15 @@ struct merge_result {
struct tree *tree;
/*
+ * Special messages and conflict notices for various paths
+ *
+ * This is a map of pathnames to strbufs. It contains various
+ * warning/conflict/notice messages (possibly multiple per path)
+ * that callers may want to use.
+ */
+ struct strmap *path_messages;
+
+ /*
* Additional metadata used by merge_switch_to_result() or future calls
* to merge_incore_*(). Includes data needed to update the index (if
* !clean) and to print "CONFLICT" messages. Not for external use.
diff --git a/merge-recursive.c b/merge-recursive.c
index d945779..1ee6364 100644
--- a/merge-recursive.c
+++ b/merge-recursive.c
@@ -1044,7 +1044,7 @@ static int merge_3way(struct merge_options *opt,
mmfile_t orig, src1, src2;
struct ll_merge_options ll_opts = {0};
char *base, *name1, *name2;
- int merge_status;
+ enum ll_merge_result merge_status;
ll_opts.renormalize = opt->renormalize;
ll_opts.extra_marker_size = extra_marker_size;
@@ -1090,6 +1090,9 @@ static int merge_3way(struct merge_options *opt,
merge_status = ll_merge(result_buf, a->path, &orig, base,
&src1, name1, &src2, name2,
opt->repo->index, &ll_opts);
+ if (merge_status == LL_MERGE_BINARY_CONFLICT)
+ warning("Cannot merge binary files: %s (%s vs. %s)",
+ a->path, name1, name2);
free(base);
free(name1);
@@ -1373,7 +1376,7 @@ static int merge_mode_and_contents(struct merge_options *opt,
if (!ret &&
write_object_file(result_buf.ptr, result_buf.size,
- blob_type, &result->blob.oid))
+ OBJ_BLOB, &result->blob.oid))
ret = err(opt, _("Unable to add %s to database"),
a->path);
@@ -3711,6 +3714,10 @@ static int merge_start(struct merge_options *opt, struct tree *head)
assert(opt->priv == NULL);
+ /* Not supported; option specific to merge-ort */
+ assert(!opt->record_conflict_msgs_as_headers);
+ assert(!opt->msg_header_prefix);
+
/* Sanity check on repo state; index must match head */
if (repo_index_has_changes(opt->repo, head, &sb)) {
err(opt, _("Your local changes to the following files would be overwritten by merge:\n %s"),
diff --git a/merge-recursive.h b/merge-recursive.h
index 0795a1d..b88000e 100644
--- a/merge-recursive.h
+++ b/merge-recursive.h
@@ -46,6 +46,8 @@ struct merge_options {
/* miscellaneous control options */
const char *subtree_shift;
unsigned renormalize : 1;
+ unsigned record_conflict_msgs_as_headers : 1;
+ const char *msg_header_prefix;
/* internal fields used by the implementation */
struct merge_options_internal *priv;
diff --git a/midx.c b/midx.c
index 837b46b..107365d 100644
--- a/midx.c
+++ b/midx.c
@@ -33,6 +33,7 @@
#define MIDX_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */
#define MIDX_CHUNKID_OBJECTOFFSETS 0x4f4f4646 /* "OOFF" */
#define MIDX_CHUNKID_LARGEOFFSETS 0x4c4f4646 /* "LOFF" */
+#define MIDX_CHUNKID_REVINDEX 0x52494458 /* "RIDX" */
#define MIDX_CHUNK_FANOUT_SIZE (sizeof(uint32_t) * 256)
#define MIDX_CHUNK_OFFSET_WIDTH (2 * sizeof(uint32_t))
#define MIDX_CHUNK_LARGE_OFFSET_WIDTH (sizeof(uint64_t))
@@ -161,6 +162,9 @@ struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local
pair_chunk(cf, MIDX_CHUNKID_LARGEOFFSETS, &m->chunk_large_offsets);
+ if (git_env_bool("GIT_TEST_MIDX_READ_RIDX", 1))
+ pair_chunk(cf, MIDX_CHUNKID_REVINDEX, &m->chunk_revindex);
+
m->num_objects = ntohl(m->chunk_oid_fanout[255]);
CALLOC_ARRAY(m->pack_names, m->num_packs);
@@ -833,6 +837,18 @@ static int write_midx_large_offsets(struct hashfile *f,
return 0;
}
+static int write_midx_revindex(struct hashfile *f,
+ void *data)
+{
+ struct write_midx_context *ctx = data;
+ uint32_t i;
+
+ for (i = 0; i < ctx->entries_nr; i++)
+ hashwrite_be32(f, ctx->pack_order[i]);
+
+ return 0;
+}
+
struct midx_pack_order_data {
uint32_t nr;
uint32_t pack;
@@ -1061,6 +1077,9 @@ static int write_midx_bitmap(char *midx_name, unsigned char *midx_hash,
char *bitmap_name = xstrfmt("%s-%s.bitmap", midx_name, hash_to_hex(midx_hash));
int ret;
+ if (!ctx->entries_nr)
+ BUG("cannot write a bitmap without any objects");
+
if (flags & MIDX_WRITE_BITMAP_HASH_CACHE)
options |= BITMAP_OPT_HASH_CACHE;
@@ -1385,6 +1404,12 @@ static int write_midx_internal(const char *object_dir,
goto cleanup;
}
+ if (!ctx.entries_nr) {
+ if (flags & MIDX_WRITE_BITMAP)
+ warning(_("refusing to write multi-pack .bitmap without any objects"));
+ flags &= ~(MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP);
+ }
+
cf = init_chunkfile(f);
add_chunk(cf, MIDX_CHUNKID_PACKNAMES, pack_name_concat_len,
@@ -1403,16 +1428,22 @@ static int write_midx_internal(const char *object_dir,
(size_t)ctx.num_large_offsets * MIDX_CHUNK_LARGE_OFFSET_WIDTH,
write_midx_large_offsets);
+ if (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP)) {
+ ctx.pack_order = midx_pack_order(&ctx);
+ add_chunk(cf, MIDX_CHUNKID_REVINDEX,
+ ctx.entries_nr * sizeof(uint32_t),
+ write_midx_revindex);
+ }
+
write_midx_header(f, get_num_chunks(cf), ctx.nr - dropped_packs);
write_chunkfile(cf, &ctx);
- finalize_hashfile(f, midx_hash, CSUM_FSYNC | CSUM_HASH_IN_STREAM);
+ finalize_hashfile(f, midx_hash, FSYNC_COMPONENT_PACK_METADATA,
+ CSUM_FSYNC | CSUM_HASH_IN_STREAM);
free_chunkfile(cf);
- if (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP))
- ctx.pack_order = midx_pack_order(&ctx);
-
- if (flags & MIDX_WRITE_REV_INDEX)
+ if (flags & MIDX_WRITE_REV_INDEX &&
+ git_env_bool("GIT_TEST_MIDX_WRITE_REV", 0))
write_midx_reverse_index(midx_name.buf, midx_hash, &ctx);
if (flags & MIDX_WRITE_BITMAP) {
if (write_midx_bitmap(midx_name.buf, midx_hash, &ctx,
diff --git a/midx.h b/midx.h
index b7d79a5..22e8e53 100644
--- a/midx.h
+++ b/midx.h
@@ -36,6 +36,7 @@ struct multi_pack_index {
const unsigned char *chunk_oid_lookup;
const unsigned char *chunk_object_offsets;
const unsigned char *chunk_large_offsets;
+ const unsigned char *chunk_revindex;
const char **pack_names;
struct packed_git **packs;
diff --git a/notes-cache.c b/notes-cache.c
index 2473314..9dfd251 100644
--- a/notes-cache.c
+++ b/notes-cache.c
@@ -92,7 +92,7 @@ int notes_cache_put(struct notes_cache *c, struct object_id *key_oid,
{
struct object_id value_oid;
- if (write_object_file(data, size, "blob", &value_oid) < 0)
+ if (write_object_file(data, size, OBJ_BLOB, &value_oid) < 0)
return -1;
return add_note(&c->tree, key_oid, &value_oid, NULL);
}
diff --git a/notes-merge.c b/notes-merge.c
index b4a3a90..b4cc594 100644
--- a/notes-merge.c
+++ b/notes-merge.c
@@ -113,6 +113,7 @@ static struct notes_merge_pair *find_notes_merge_pair_pos(
}
static struct object_id uninitialized = {
+ .hash =
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" \
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
};
@@ -175,7 +176,6 @@ static struct notes_merge_pair *diff_tree_remote(struct notes_merge_options *o,
oid_to_hex(&mp->remote));
}
diff_flush(&opt);
- clear_pathspec(&opt.pathspec);
*num_changes = len;
return changes;
@@ -261,7 +261,6 @@ static void diff_tree_local(struct notes_merge_options *o,
oid_to_hex(&mp->local));
}
diff_flush(&opt);
- clear_pathspec(&opt.pathspec);
}
static void check_notes_merge_worktree(struct notes_merge_options *o)
@@ -344,7 +343,7 @@ static int ll_merge_in_worktree(struct notes_merge_options *o,
{
mmbuffer_t result_buf;
mmfile_t base, local, remote;
- int status;
+ enum ll_merge_result status;
read_mmblob(&base, &p->base);
read_mmblob(&local, &p->local);
@@ -358,6 +357,9 @@ static int ll_merge_in_worktree(struct notes_merge_options *o,
free(local.ptr);
free(remote.ptr);
+ if (status == LL_MERGE_BINARY_CONFLICT)
+ warning("Cannot merge binary files: %s (%s vs. %s)",
+ oid_to_hex(&p->obj), o->local_ref, o->remote_ref);
if ((status < 0) || !result_buf.ptr)
die("Failed to execute internal merge");
diff --git a/notes.c b/notes.c
index f87dac4..7452e71 100644
--- a/notes.c
+++ b/notes.c
@@ -675,7 +675,7 @@ static int tree_write_stack_finish_subtree(struct tree_write_stack *tws)
ret = tree_write_stack_finish_subtree(n);
if (ret)
return ret;
- ret = write_object_file(n->buf.buf, n->buf.len, tree_type, &s);
+ ret = write_object_file(n->buf.buf, n->buf.len, OBJ_TREE, &s);
if (ret)
return ret;
strbuf_release(&n->buf);
@@ -836,7 +836,7 @@ int combine_notes_concatenate(struct object_id *cur_oid,
free(new_msg);
/* create a new blob object from buf */
- ret = write_object_file(buf, buf_len, blob_type, cur_oid);
+ ret = write_object_file(buf, buf_len, OBJ_BLOB, cur_oid);
free(buf);
return ret;
}
@@ -916,7 +916,7 @@ int combine_notes_cat_sort_uniq(struct object_id *cur_oid,
string_list_join_lines_helper, &buf))
goto out;
- ret = write_object_file(buf.buf, buf.len, blob_type, cur_oid);
+ ret = write_object_file(buf.buf, buf.len, OBJ_BLOB, cur_oid);
out:
strbuf_release(&buf);
@@ -1192,7 +1192,7 @@ int write_notes_tree(struct notes_tree *t, struct object_id *result)
ret = for_each_note(t, flags, write_each_note, &cb_data) ||
write_each_non_note_until(NULL, &cb_data) ||
tree_write_stack_finish_subtree(&root) ||
- write_object_file(root.buf.buf, root.buf.len, tree_type, result);
+ write_object_file(root.buf.buf, root.buf.len, OBJ_TREE, result);
strbuf_release(&root.buf);
return ret;
}
diff --git a/object-file.c b/object-file.c
index 8be57f4..5ffbf3d 100644
--- a/object-file.c
+++ b/object-file.c
@@ -167,49 +167,49 @@ static void git_hash_unknown_final_oid(struct object_id *oid, git_hash_ctx *ctx)
const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
{
- NULL,
- 0x00000000,
- 0,
- 0,
- 0,
- git_hash_unknown_init,
- git_hash_unknown_clone,
- git_hash_unknown_update,
- git_hash_unknown_final,
- git_hash_unknown_final_oid,
- NULL,
- NULL,
- NULL,
+ .name = NULL,
+ .format_id = 0x00000000,
+ .rawsz = 0,
+ .hexsz = 0,
+ .blksz = 0,
+ .init_fn = git_hash_unknown_init,
+ .clone_fn = git_hash_unknown_clone,
+ .update_fn = git_hash_unknown_update,
+ .final_fn = git_hash_unknown_final,
+ .final_oid_fn = git_hash_unknown_final_oid,
+ .empty_tree = NULL,
+ .empty_blob = NULL,
+ .null_oid = NULL,
},
{
- "sha1",
- GIT_SHA1_FORMAT_ID,
- GIT_SHA1_RAWSZ,
- GIT_SHA1_HEXSZ,
- GIT_SHA1_BLKSZ,
- git_hash_sha1_init,
- git_hash_sha1_clone,
- git_hash_sha1_update,
- git_hash_sha1_final,
- git_hash_sha1_final_oid,
- &empty_tree_oid,
- &empty_blob_oid,
- &null_oid_sha1,
+ .name = "sha1",
+ .format_id = GIT_SHA1_FORMAT_ID,
+ .rawsz = GIT_SHA1_RAWSZ,
+ .hexsz = GIT_SHA1_HEXSZ,
+ .blksz = GIT_SHA1_BLKSZ,
+ .init_fn = git_hash_sha1_init,
+ .clone_fn = git_hash_sha1_clone,
+ .update_fn = git_hash_sha1_update,
+ .final_fn = git_hash_sha1_final,
+ .final_oid_fn = git_hash_sha1_final_oid,
+ .empty_tree = &empty_tree_oid,
+ .empty_blob = &empty_blob_oid,
+ .null_oid = &null_oid_sha1,
},
{
- "sha256",
- GIT_SHA256_FORMAT_ID,
- GIT_SHA256_RAWSZ,
- GIT_SHA256_HEXSZ,
- GIT_SHA256_BLKSZ,
- git_hash_sha256_init,
- git_hash_sha256_clone,
- git_hash_sha256_update,
- git_hash_sha256_final,
- git_hash_sha256_final_oid,
- &empty_tree_oid_sha256,
- &empty_blob_oid_sha256,
- &null_oid_sha256,
+ .name = "sha256",
+ .format_id = GIT_SHA256_FORMAT_ID,
+ .rawsz = GIT_SHA256_RAWSZ,
+ .hexsz = GIT_SHA256_HEXSZ,
+ .blksz = GIT_SHA256_BLKSZ,
+ .init_fn = git_hash_sha256_init,
+ .clone_fn = git_hash_sha256_clone,
+ .update_fn = git_hash_sha256_update,
+ .final_fn = git_hash_sha256_final,
+ .final_oid_fn = git_hash_sha256_final_oid,
+ .empty_tree = &empty_tree_oid_sha256,
+ .empty_blob = &empty_blob_oid_sha256,
+ .null_oid = &null_oid_sha256,
}
};
@@ -274,10 +274,11 @@ static struct cached_object {
static int cached_object_nr, cached_object_alloc;
static struct cached_object empty_tree = {
- { EMPTY_TREE_SHA1_BIN_LITERAL },
- OBJ_TREE,
- "",
- 0
+ .oid = {
+ .hash = EMPTY_TREE_SHA1_BIN_LITERAL,
+ },
+ .type = OBJ_TREE,
+ .buf = "",
};
static struct cached_object *find_cached_object(const struct object_id *oid)
@@ -1049,35 +1050,50 @@ void *xmmap(void *start, size_t length,
return ret;
}
-/*
- * With an in-core object data in "map", rehash it to make sure the
- * object name actually matches "oid" to detect object corruption.
- * With "map" == NULL, try reading the object named with "oid" using
- * the streaming interface and rehash it to do the same.
- */
+static int format_object_header_literally(char *str, size_t size,
+ const char *type, size_t objsize)
+{
+ return xsnprintf(str, size, "%s %"PRIuMAX, type, (uintmax_t)objsize) + 1;
+}
+
+int format_object_header(char *str, size_t size, enum object_type type,
+ size_t objsize)
+{
+ const char *name = type_name(type);
+
+ if (!name)
+ BUG("could not get a type name for 'enum object_type' value %d", type);
+
+ return format_object_header_literally(str, size, name, objsize);
+}
+
int check_object_signature(struct repository *r, const struct object_id *oid,
- void *map, unsigned long size, const char *type,
- struct object_id *real_oidp)
+ void *buf, unsigned long size,
+ enum object_type type)
{
- struct object_id tmp;
- struct object_id *real_oid = real_oidp ? real_oidp : &tmp;
+ struct object_id real_oid;
+
+ hash_object_file(r->hash_algo, buf, size, type, &real_oid);
+
+ return !oideq(oid, &real_oid) ? -1 : 0;
+}
+
+int stream_object_signature(struct repository *r, const struct object_id *oid)
+{
+ struct object_id real_oid;
+ unsigned long size;
enum object_type obj_type;
struct git_istream *st;
git_hash_ctx c;
char hdr[MAX_HEADER_LEN];
int hdrlen;
- if (map) {
- hash_object_file(r->hash_algo, map, size, type, real_oid);
- return !oideq(oid, real_oid) ? -1 : 0;
- }
-
st = open_istream(r, oid, &obj_type, &size, NULL);
if (!st)
return -1;
/* Generate the header */
- hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX , type_name(obj_type), (uintmax_t)size) + 1;
+ hdrlen = format_object_header(hdr, sizeof(hdr), obj_type, size);
/* Sha1.. */
r->hash_algo->init_fn(&c);
@@ -1094,9 +1110,9 @@ int check_object_signature(struct repository *r, const struct object_id *oid,
break;
r->hash_algo->update_fn(&c, buf, readlen);
}
- r->hash_algo->final_oid_fn(real_oid, &c);
+ r->hash_algo->final_oid_fn(&real_oid, &c);
close_istream(st);
- return !oideq(oid, real_oid) ? -1 : 0;
+ return !oideq(oid, &real_oid) ? -1 : 0;
}
int git_open_cloexec(const char *name, int flags)
@@ -1662,7 +1678,7 @@ int pretend_object_file(void *buf, unsigned long len, enum object_type type,
{
struct cached_object *co;
- hash_object_file(the_hash_algo, buf, len, type_name(type), oid);
+ hash_object_file(the_hash_algo, buf, len, type, oid);
if (has_object_file_with_flags(oid, OBJECT_INFO_QUICK | OBJECT_INFO_SKIP_FETCH_OBJECT) ||
find_cached_object(oid))
return 0;
@@ -1722,16 +1738,15 @@ void *read_object_file_extended(struct repository *r,
void *read_object_with_reference(struct repository *r,
const struct object_id *oid,
- const char *required_type_name,
+ enum object_type required_type,
unsigned long *size,
struct object_id *actual_oid_return)
{
- enum object_type type, required_type;
+ enum object_type type;
void *buffer;
unsigned long isize;
struct object_id actual_oid;
- required_type = type_from_string(required_type_name);
oidcpy(&actual_oid, oid);
while (1) {
int ref_length = -1;
@@ -1769,21 +1784,40 @@ void *read_object_with_reference(struct repository *r,
}
}
+static void hash_object_body(const struct git_hash_algo *algo, git_hash_ctx *c,
+ const void *buf, unsigned long len,
+ struct object_id *oid,
+ char *hdr, int *hdrlen)
+{
+ algo->init_fn(c);
+ algo->update_fn(c, hdr, *hdrlen);
+ algo->update_fn(c, buf, len);
+ algo->final_oid_fn(oid, c);
+}
+
static void write_object_file_prepare(const struct git_hash_algo *algo,
const void *buf, unsigned long len,
- const char *type, struct object_id *oid,
+ enum object_type type, struct object_id *oid,
char *hdr, int *hdrlen)
{
git_hash_ctx c;
/* Generate the header */
- *hdrlen = xsnprintf(hdr, *hdrlen, "%s %"PRIuMAX , type, (uintmax_t)len)+1;
+ *hdrlen = format_object_header(hdr, *hdrlen, type, len);
/* Sha1.. */
- algo->init_fn(&c);
- algo->update_fn(&c, hdr, *hdrlen);
- algo->update_fn(&c, buf, len);
- algo->final_oid_fn(oid, &c);
+ hash_object_body(algo, &c, buf, len, oid, hdr, hdrlen);
+}
+
+static void write_object_file_prepare_literally(const struct git_hash_algo *algo,
+ const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ char *hdr, int *hdrlen)
+{
+ git_hash_ctx c;
+
+ *hdrlen = format_object_header_literally(hdr, *hdrlen, type, len);
+ hash_object_body(algo, &c, buf, len, oid, hdr, hdrlen);
}
/*
@@ -1836,24 +1870,36 @@ static int write_buffer(int fd, const void *buf, size_t len)
return 0;
}
-int hash_object_file(const struct git_hash_algo *algo, const void *buf,
- unsigned long len, const char *type,
- struct object_id *oid)
+static void hash_object_file_literally(const struct git_hash_algo *algo,
+ const void *buf, unsigned long len,
+ const char *type, struct object_id *oid)
{
char hdr[MAX_HEADER_LEN];
int hdrlen = sizeof(hdr);
- write_object_file_prepare(algo, buf, len, type, oid, hdr, &hdrlen);
- return 0;
+
+ write_object_file_prepare_literally(algo, buf, len, type, oid, hdr, &hdrlen);
+}
+
+void hash_object_file(const struct git_hash_algo *algo, const void *buf,
+ unsigned long len, enum object_type type,
+ struct object_id *oid)
+{
+ hash_object_file_literally(algo, buf, len, type_name(type), oid);
}
/* Finalize a file on disk, and close it. */
-static void close_loose_object(int fd)
+static void close_loose_object(int fd, const char *filename)
{
- if (!the_repository->objects->odb->will_destroy) {
- if (fsync_object_files)
- fsync_or_die(fd, "loose object file");
- }
+ if (the_repository->objects->odb->will_destroy)
+ goto out;
+
+ if (fsync_object_files > 0)
+ fsync_or_die(fd, filename);
+ else
+ fsync_component_or_die(FSYNC_COMPONENT_LOOSE_OBJECT, fd,
+ filename);
+out:
if (close(fd) != 0)
die_errno(_("error when closing loose object file"));
}
@@ -1965,7 +2011,7 @@ static int write_loose_object(const struct object_id *oid, char *hdr,
die(_("confused by unstable object source data for %s"),
oid_to_hex(oid));
- close_loose_object(fd);
+ close_loose_object(fd, tmp_file.buf);
if (mtime) {
struct utimbuf utb;
@@ -1998,7 +2044,7 @@ static int freshen_packed_object(const struct object_id *oid)
}
int write_object_file_flags(const void *buf, unsigned long len,
- const char *type, struct object_id *oid,
+ enum object_type type, struct object_id *oid,
unsigned flags)
{
char hdr[MAX_HEADER_LEN];
@@ -2014,9 +2060,9 @@ int write_object_file_flags(const void *buf, unsigned long len,
return write_loose_object(oid, hdr, hdrlen, buf, len, 0, flags);
}
-int hash_object_file_literally(const void *buf, unsigned long len,
- const char *type, struct object_id *oid,
- unsigned flags)
+int write_object_file_literally(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ unsigned flags)
{
char *header;
int hdrlen, status = 0;
@@ -2024,8 +2070,8 @@ int hash_object_file_literally(const void *buf, unsigned long len,
/* type string, SP, %lu of the length plus NUL must fit this */
hdrlen = strlen(type) + MAX_HEADER_LEN;
header = xmalloc(hdrlen);
- write_object_file_prepare(the_hash_algo, buf, len, type, oid, header,
- &hdrlen);
+ write_object_file_prepare_literally(the_hash_algo, buf, len, type,
+ oid, header, &hdrlen);
if (!(flags & HASH_WRITE_OBJECT))
goto cleanup;
@@ -2052,7 +2098,7 @@ int force_object_loose(const struct object_id *oid, time_t mtime)
buf = read_object(the_repository, oid, &type, &len);
if (!buf)
return error(_("cannot read object for %s"), oid_to_hex(oid));
- hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX , type_name(type), (uintmax_t)len) + 1;
+ hdrlen = format_object_header(hdr, sizeof(hdr), type, len);
ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime, 0);
free(buf);
@@ -2118,7 +2164,8 @@ static int index_mem(struct index_state *istate,
enum object_type type,
const char *path, unsigned flags)
{
- int ret, re_allocated = 0;
+ int ret = 0;
+ int re_allocated = 0;
int write_object = flags & HASH_WRITE_OBJECT;
if (!type)
@@ -2145,10 +2192,9 @@ static int index_mem(struct index_state *istate,
}
if (write_object)
- ret = write_object_file(buf, size, type_name(type), oid);
+ ret = write_object_file(buf, size, type, oid);
else
- ret = hash_object_file(the_hash_algo, buf, size,
- type_name(type), oid);
+ hash_object_file(the_hash_algo, buf, size, type, oid);
if (re_allocated)
free(buf);
return ret;
@@ -2160,7 +2206,7 @@ static int index_stream_convert_blob(struct index_state *istate,
const char *path,
unsigned flags)
{
- int ret;
+ int ret = 0;
const int write_object = flags & HASH_WRITE_OBJECT;
struct strbuf sbuf = STRBUF_INIT;
@@ -2171,11 +2217,11 @@ static int index_stream_convert_blob(struct index_state *istate,
get_conv_flags(flags));
if (write_object)
- ret = write_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+ ret = write_object_file(sbuf.buf, sbuf.len, OBJ_BLOB,
oid);
else
- ret = hash_object_file(the_hash_algo, sbuf.buf, sbuf.len,
- type_name(OBJ_BLOB), oid);
+ hash_object_file(the_hash_algo, sbuf.buf, sbuf.len, OBJ_BLOB,
+ oid);
strbuf_release(&sbuf);
return ret;
}
@@ -2294,8 +2340,8 @@ int index_path(struct index_state *istate, struct object_id *oid,
return error_errno("readlink(\"%s\")", path);
if (!(flags & HASH_WRITE_OBJECT))
hash_object_file(the_hash_algo, sb.buf, sb.len,
- blob_type, oid);
- else if (write_object_file(sb.buf, sb.len, blob_type, oid))
+ OBJ_BLOB, oid);
+ else if (write_object_file(sb.buf, sb.len, OBJ_BLOB, oid))
rc = error(_("%s: failed to insert into database"), path);
strbuf_release(&sb);
break;
@@ -2599,9 +2645,10 @@ int read_loose_object(const char *path,
git_inflate_end(&stream);
goto out;
}
- if (check_object_signature(the_repository, expected_oid,
+ hash_object_file_literally(the_repository->hash_algo,
*contents, *size,
- oi->type_name->buf, real_oid))
+ oi->type_name->buf, real_oid);
+ if (!oideq(expected_oid, real_oid))
goto out;
}
diff --git a/object-name.c b/object-name.c
index fdff460..f0e327f 100644
--- a/object-name.c
+++ b/object-name.c
@@ -15,6 +15,7 @@
#include "submodule.h"
#include "midx.h"
#include "commit-reach.h"
+#include "date.h"
static int get_oid_oneline(struct repository *r, const char *, struct object_id *, struct commit_list *);
@@ -351,35 +352,118 @@ static int init_object_disambiguation(struct repository *r,
return 0;
}
+struct ambiguous_output {
+ const struct disambiguate_state *ds;
+ struct strbuf advice;
+ struct strbuf sb;
+};
+
static int show_ambiguous_object(const struct object_id *oid, void *data)
{
- const struct disambiguate_state *ds = data;
- struct strbuf desc = STRBUF_INIT;
+ struct ambiguous_output *state = data;
+ const struct disambiguate_state *ds = state->ds;
+ struct strbuf *advice = &state->advice;
+ struct strbuf *sb = &state->sb;
int type;
+ const char *hash;
if (ds->fn && !ds->fn(ds->repo, oid, ds->cb_data))
return 0;
+ hash = repo_find_unique_abbrev(ds->repo, oid, DEFAULT_ABBREV);
type = oid_object_info(ds->repo, oid, NULL);
+
+ if (type < 0) {
+ /*
+ * TRANSLATORS: This is a line of ambiguous object
+ * output shown when we cannot look up or parse the
+ * object in question. E.g. "deadbeef [bad object]".
+ */
+ strbuf_addf(sb, _("%s [bad object]"), hash);
+ goto out;
+ }
+
+ assert(type == OBJ_TREE || type == OBJ_COMMIT ||
+ type == OBJ_BLOB || type == OBJ_TAG);
+
if (type == OBJ_COMMIT) {
+ struct strbuf date = STRBUF_INIT;
+ struct strbuf msg = STRBUF_INIT;
struct commit *commit = lookup_commit(ds->repo, oid);
+
if (commit) {
struct pretty_print_context pp = {0};
pp.date_mode.type = DATE_SHORT;
- format_commit_message(commit, " %ad - %s", &desc, &pp);
+ format_commit_message(commit, "%ad", &date, &pp);
+ format_commit_message(commit, "%s", &msg, &pp);
}
+
+ /*
+ * TRANSLATORS: This is a line of ambiguous commit
+ * object output. E.g.:
+ *
+ * "deadbeef commit 2021-01-01 - Some Commit Message"
+ */
+ strbuf_addf(sb, _("%s commit %s - %s"), hash, date.buf,
+ msg.buf);
+
+ strbuf_release(&date);
+ strbuf_release(&msg);
} else if (type == OBJ_TAG) {
struct tag *tag = lookup_tag(ds->repo, oid);
- if (!parse_tag(tag) && tag->tag)
- strbuf_addf(&desc, " %s", tag->tag);
+
+ if (!parse_tag(tag) && tag->tag) {
+ /*
+ * TRANSLATORS: This is a line of ambiguous
+ * tag object output. E.g.:
+ *
+ * "deadbeef tag 2022-01-01 - Some Tag Message"
+ *
+ * The second argument is the YYYY-MM-DD found
+ * in the tag.
+ *
+ * The third argument is the "tag" string
+ * from object.c.
+ */
+ strbuf_addf(sb, _("%s tag %s - %s"), hash,
+ show_date(tag->date, 0, DATE_MODE(SHORT)),
+ tag->tag);
+ } else {
+ /*
+ * TRANSLATORS: This is a line of ambiguous
+ * tag object output where we couldn't parse
+ * the tag itself. E.g.:
+ *
+ * "deadbeef [bad tag, could not parse it]"
+ */
+ strbuf_addf(sb, _("%s [bad tag, could not parse it]"),
+ hash);
+ }
+ } else if (type == OBJ_TREE) {
+ /*
+ * TRANSLATORS: This is a line of ambiguous <type>
+ * object output. E.g. "deadbeef tree".
+ */
+ strbuf_addf(sb, _("%s tree"), hash);
+ } else if (type == OBJ_BLOB) {
+ /*
+ * TRANSLATORS: This is a line of ambiguous <type>
+ * object output. E.g. "deadbeef blob".
+ */
+ strbuf_addf(sb, _("%s blob"), hash);
}
- advise(" %s %s%s",
- repo_find_unique_abbrev(ds->repo, oid, DEFAULT_ABBREV),
- type_name(type) ? type_name(type) : "unknown type",
- desc.buf);
- strbuf_release(&desc);
+out:
+ /*
+ * TRANSLATORS: This is line item of ambiguous object output
+ * from describe_ambiguous_object() above. For RTL languages
+ * you'll probably want to swap the "%s" and leading " " space
+ * around.
+ */
+ strbuf_addf(advice, _(" %s\n"), sb->buf);
+
+ strbuf_reset(sb);
return 0;
}
@@ -476,6 +560,11 @@ static enum get_oid_result get_short_oid(struct repository *r,
if (!quietly && (status == SHORT_NAME_AMBIGUOUS)) {
struct oid_array collect = OID_ARRAY_INIT;
+ struct ambiguous_output out = {
+ .ds = &ds,
+ .sb = STRBUF_INIT,
+ .advice = STRBUF_INIT,
+ };
error(_("short object ID %s is ambiguous"), ds.hex_pfx);
@@ -488,13 +577,22 @@ static enum get_oid_result get_short_oid(struct repository *r,
if (!ds.ambiguous)
ds.fn = NULL;
- advise(_("The candidates are:"));
repo_for_each_abbrev(r, ds.hex_pfx, collect_ambiguous, &collect);
sort_ambiguous_oid_array(r, &collect);
- if (oid_array_for_each(&collect, show_ambiguous_object, &ds))
+ if (oid_array_for_each(&collect, show_ambiguous_object, &out))
BUG("show_ambiguous_object shouldn't return non-zero");
+
+ /*
+ * TRANSLATORS: The argument is the list of ambiguous
+ * objects composed in show_ambiguous_object(). See
+ * its "TRANSLATORS" comments for details.
+ */
+ advise(_("The candidates are:\n%s"), out.advice.buf);
+
oid_array_clear(&collect);
+ strbuf_release(&out.advice);
+ strbuf_release(&out.sb);
}
return status;
@@ -1795,13 +1893,13 @@ static enum get_oid_result get_oid_with_context_1(struct repository *repo,
const char *cp;
int only_to_die = flags & GET_OID_ONLY_TO_DIE;
- if (only_to_die)
- flags |= GET_OID_QUIETLY;
-
memset(oc, 0, sizeof(*oc));
oc->mode = S_IFINVALID;
strbuf_init(&oc->symlink_path, 0);
ret = get_oid_1(repo, name, namelen, oid, flags);
+ if (!ret && flags & GET_OID_REQUIRE_PATH)
+ die(_("<object>:<path> required, only <object> '%s' given"),
+ name);
if (!ret)
return ret;
/*
@@ -1932,7 +2030,7 @@ void maybe_die_on_misspelt_object_name(struct repository *r,
{
struct object_context oc;
struct object_id oid;
- get_oid_with_context_1(r, name, GET_OID_ONLY_TO_DIE,
+ get_oid_with_context_1(r, name, GET_OID_ONLY_TO_DIE | GET_OID_QUIETLY,
prefix, &oid, &oc);
}
diff --git a/object-store.h b/object-store.h
index 6f89482..bd2322e 100644
--- a/object-store.h
+++ b/object-store.h
@@ -245,22 +245,22 @@ static inline void *repo_read_object_file(struct repository *r,
/* Read and unpack an object file into memory, write memory to an object file */
int oid_object_info(struct repository *r, const struct object_id *, unsigned long *);
-int hash_object_file(const struct git_hash_algo *algo, const void *buf,
- unsigned long len, const char *type,
- struct object_id *oid);
+void hash_object_file(const struct git_hash_algo *algo, const void *buf,
+ unsigned long len, enum object_type type,
+ struct object_id *oid);
int write_object_file_flags(const void *buf, unsigned long len,
- const char *type, struct object_id *oid,
+ enum object_type type, struct object_id *oid,
unsigned flags);
static inline int write_object_file(const void *buf, unsigned long len,
- const char *type, struct object_id *oid)
+ enum object_type type, struct object_id *oid)
{
return write_object_file_flags(buf, len, type, oid, 0);
}
-int hash_object_file_literally(const void *buf, unsigned long len,
- const char *type, struct object_id *oid,
- unsigned flags);
+int write_object_file_literally(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ unsigned flags);
/*
* Add an object file to the in-memory object store, without writing it
@@ -331,6 +331,14 @@ int repo_has_object_file_with_flags(struct repository *r,
*/
int has_loose_object_nonlocal(const struct object_id *);
+/**
+ * format_object_header() is a thin wrapper around s xsnprintf() that
+ * writes the initial "<type> <obj-len>" part of the loose object
+ * header. It returns the size that snprintf() returns + 1.
+ */
+int format_object_header(char *str, size_t size, enum object_type type,
+ size_t objsize);
+
void assert_oid_type(const struct object_id *oid, enum object_type expect);
/*
diff --git a/object.c b/object.c
index c37501f..588b815 100644
--- a/object.c
+++ b/object.c
@@ -279,7 +279,7 @@ struct object *parse_object(struct repository *r, const struct object_id *oid)
if ((obj && obj->type == OBJ_BLOB && repo_has_object_file(r, oid)) ||
(!obj && repo_has_object_file(r, oid) &&
oid_object_info(r, oid, NULL) == OBJ_BLOB)) {
- if (check_object_signature(r, repl, NULL, 0, NULL, NULL) < 0) {
+ if (stream_object_signature(r, repl) < 0) {
error(_("hash mismatch %s"), oid_to_hex(oid));
return NULL;
}
@@ -289,8 +289,7 @@ struct object *parse_object(struct repository *r, const struct object_id *oid)
buffer = repo_read_object_file(r, oid, &type, &size);
if (buffer) {
- if (check_object_signature(r, repl, buffer, size,
- type_name(type), NULL) < 0) {
+ if (check_object_signature(r, repl, buffer, size, type) < 0) {
free(buffer);
error(_("hash mismatch %s"), oid_to_hex(repl));
return NULL;
diff --git a/object.h b/object.h
index cb556ab..a221946 100644
--- a/object.h
+++ b/object.h
@@ -75,7 +75,7 @@ struct object_array {
* builtin/fsck.c: 0--3
* builtin/gc.c: 0
* builtin/index-pack.c: 2021
- * builtin/reflog.c: 10--12
+ * reflog.c: 10--12
* builtin/show-branch.c: 0-------------------------------------------26
* builtin/unpack-objects.c: 2021
*/
diff --git a/pack-bitmap-write.c b/pack-bitmap-write.c
index 9c55c15..cf68154 100644
--- a/pack-bitmap-write.c
+++ b/pack-bitmap-write.c
@@ -575,15 +575,15 @@ void bitmap_writer_select_commits(struct commit **indexed_commits,
QSORT(indexed_commits, indexed_commits_nr, date_compare);
- if (writer.show_progress)
- writer.progress = start_progress("Selecting bitmap commits", 0);
-
if (indexed_commits_nr < 100) {
for (i = 0; i < indexed_commits_nr; ++i)
push_bitmapped_commit(indexed_commits[i]);
return;
}
+ if (writer.show_progress)
+ writer.progress = start_progress("Selecting bitmap commits", 0);
+
for (;;) {
struct commit *chosen = NULL;
@@ -719,7 +719,8 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
if (options & BITMAP_OPT_HASH_CACHE)
write_hash_cache(f, index, index_nr);
- finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
+ finalize_hashfile(f, NULL, FSYNC_COMPONENT_PACK_METADATA,
+ CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
if (adjust_shared_perm(tmp_file.buf))
die_errno("unable to make temporary bitmap file readable");
diff --git a/pack-bitmap.c b/pack-bitmap.c
index f772d3c..97909d4 100644
--- a/pack-bitmap.c
+++ b/pack-bitmap.c
@@ -358,7 +358,9 @@ static int open_midx_bitmap_1(struct bitmap_index *bitmap_git,
cleanup:
munmap(bitmap_git->map, bitmap_git->map_size);
bitmap_git->map_size = 0;
+ bitmap_git->map_pos = 0;
bitmap_git->map = NULL;
+ bitmap_git->midx = NULL;
return -1;
}
@@ -405,6 +407,8 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git
munmap(bitmap_git->map, bitmap_git->map_size);
bitmap_git->map = NULL;
bitmap_git->map_size = 0;
+ bitmap_git->map_pos = 0;
+ bitmap_git->pack = NULL;
return -1;
}
@@ -735,8 +739,7 @@ static int add_commit_to_bitmap(struct bitmap_index *bitmap_git,
static struct bitmap *find_objects(struct bitmap_index *bitmap_git,
struct rev_info *revs,
struct object_list *roots,
- struct bitmap *seen,
- struct list_objects_filter_options *filter)
+ struct bitmap *seen)
{
struct bitmap *base = NULL;
int needs_walk = 0;
@@ -819,9 +822,9 @@ static struct bitmap *find_objects(struct bitmap_index *bitmap_git,
show_data.bitmap_git = bitmap_git;
show_data.base = base;
- traverse_commit_list_filtered(filter, revs,
- show_commit, show_object,
- &show_data, NULL);
+ traverse_commit_list(revs,
+ show_commit, show_object,
+ &show_data);
revs->include_check = NULL;
revs->include_check_obj = NULL;
@@ -1215,7 +1218,6 @@ static int can_filter_bitmap(struct list_objects_filter_options *filter)
}
struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
- struct list_objects_filter_options *filter,
int filter_provided_objects)
{
unsigned int i;
@@ -1236,7 +1238,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
if (revs->prune)
return NULL;
- if (!can_filter_bitmap(filter))
+ if (!can_filter_bitmap(&revs->filter))
return NULL;
/* try to open a bitmapped pack, but don't parse it yet
@@ -1293,8 +1295,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
if (haves) {
revs->ignore_missing_links = 1;
- haves_bitmap = find_objects(bitmap_git, revs, haves, NULL,
- filter);
+ haves_bitmap = find_objects(bitmap_git, revs, haves, NULL);
reset_revision_walk();
revs->ignore_missing_links = 0;
@@ -1302,8 +1303,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
BUG("failed to perform bitmap walk");
}
- wants_bitmap = find_objects(bitmap_git, revs, wants, haves_bitmap,
- filter);
+ wants_bitmap = find_objects(bitmap_git, revs, wants, haves_bitmap);
if (!wants_bitmap)
BUG("failed to perform bitmap walk");
@@ -1311,8 +1311,10 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
if (haves_bitmap)
bitmap_and_not(wants_bitmap, haves_bitmap);
- filter_bitmap(bitmap_git, (filter && filter_provided_objects) ? NULL : wants,
- wants_bitmap, filter);
+ filter_bitmap(bitmap_git,
+ (revs->filter.choice && filter_provided_objects) ? NULL : wants,
+ wants_bitmap,
+ &revs->filter);
bitmap_git->result = wants_bitmap;
bitmap_git->haves = haves_bitmap;
diff --git a/pack-bitmap.h b/pack-bitmap.h
index 19a63fa..3d3ddd7 100644
--- a/pack-bitmap.h
+++ b/pack-bitmap.h
@@ -10,7 +10,6 @@
struct commit;
struct repository;
struct rev_info;
-struct list_objects_filter_options;
static const char BITMAP_IDX_SIGNATURE[] = {'B', 'I', 'T', 'M'};
@@ -54,7 +53,6 @@ void test_bitmap_walk(struct rev_info *revs);
int test_bitmap_commits(struct repository *r);
int test_bitmap_hashes(struct repository *r);
struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
- struct list_objects_filter_options *filter,
int filter_provided_objects);
uint32_t midx_preferred_pack(struct bitmap_index *bitmap_git);
int reuse_partial_packfile_from_bitmap(struct bitmap_index *,
diff --git a/pack-check.c b/pack-check.c
index 3f418e3..bfb593b 100644
--- a/pack-check.c
+++ b/pack-check.c
@@ -127,7 +127,7 @@ static int verify_packfile(struct repository *r,
if (type == OBJ_BLOB && big_file_threshold <= size) {
/*
- * Let check_object_signature() check it with
+ * Let stream_object_signature() check it with
* the streaming interface; no point slurping
* the data in-core only to discard.
*/
@@ -142,8 +142,11 @@ static int verify_packfile(struct repository *r,
err = error("cannot unpack %s from %s at offset %"PRIuMAX"",
oid_to_hex(&oid), p->pack_name,
(uintmax_t)entries[i].offset);
- else if (check_object_signature(r, &oid, data, size,
- type_name(type), NULL))
+ else if (data && check_object_signature(r, &oid, data, size,
+ type) < 0)
+ err = error("packed %s from %s is corrupt",
+ oid_to_hex(&oid), p->pack_name);
+ else if (!data && stream_object_signature(r, &oid) < 0)
err = error("packed %s from %s is corrupt",
oid_to_hex(&oid), p->pack_name);
else if (fn) {
diff --git a/pack-revindex.c b/pack-revindex.c
index 70d0fba..08dc160 100644
--- a/pack-revindex.c
+++ b/pack-revindex.c
@@ -298,9 +298,29 @@ int load_midx_revindex(struct multi_pack_index *m)
{
struct strbuf revindex_name = STRBUF_INIT;
int ret;
+
if (m->revindex_data)
return 0;
+ if (m->chunk_revindex) {
+ /*
+ * If the MIDX `m` has a `RIDX` chunk, then use its contents for
+ * the reverse index instead of trying to load a separate `.rev`
+ * file.
+ *
+ * Note that we do *not* set `m->revindex_map` here, since we do
+ * not want to accidentally call munmap() in the middle of the
+ * MIDX.
+ */
+ trace2_data_string("load_midx_revindex", the_repository,
+ "source", "midx");
+ m->revindex_data = (const uint32_t *)m->chunk_revindex;
+ return 0;
+ }
+
+ trace2_data_string("load_midx_revindex", the_repository,
+ "source", "rev");
+
get_midx_rev_filename(&revindex_name, m);
ret = load_revindex_from_disk(revindex_name.buf,
diff --git a/pack-write.c b/pack-write.c
index a5846f3..51812cb 100644
--- a/pack-write.c
+++ b/pack-write.c
@@ -159,9 +159,9 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
}
hashwrite(f, sha1, the_hash_algo->rawsz);
- finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_CLOSE |
- ((opts->flags & WRITE_IDX_VERIFY)
- ? 0 : CSUM_FSYNC));
+ finalize_hashfile(f, NULL, FSYNC_COMPONENT_PACK_METADATA,
+ CSUM_HASH_IN_STREAM | CSUM_CLOSE |
+ ((opts->flags & WRITE_IDX_VERIFY) ? 0 : CSUM_FSYNC));
return index_name;
}
@@ -281,8 +281,9 @@ const char *write_rev_file_order(const char *rev_name,
if (rev_name && adjust_shared_perm(rev_name) < 0)
die(_("failed to make %s readable"), rev_name);
- finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_CLOSE |
- ((flags & WRITE_IDX_VERIFY) ? 0 : CSUM_FSYNC));
+ finalize_hashfile(f, NULL, FSYNC_COMPONENT_PACK_METADATA,
+ CSUM_HASH_IN_STREAM | CSUM_CLOSE |
+ ((flags & WRITE_IDX_VERIFY) ? 0 : CSUM_FSYNC));
return rev_name;
}
@@ -390,7 +391,7 @@ void fixup_pack_header_footer(int pack_fd,
the_hash_algo->final_fn(partial_pack_hash, &old_hash_ctx);
the_hash_algo->final_fn(new_pack_hash, &new_hash_ctx);
write_or_die(pack_fd, new_pack_hash, the_hash_algo->rawsz);
- fsync_or_die(pack_fd, pack_name);
+ fsync_component_or_die(FSYNC_COMPONENT_PACK, pack_fd, pack_name);
}
char *index_pack_lockfile(int ip_out, int *is_well_formed)
diff --git a/parallel-checkout.c b/parallel-checkout.c
index 8dd7e7b..31a3d0e 100644
--- a/parallel-checkout.c
+++ b/parallel-checkout.c
@@ -39,8 +39,8 @@ void get_parallel_checkout_configs(int *num_workers, int *threshold)
if (env_workers && *env_workers) {
if (strtol_i(env_workers, 10, num_workers)) {
- die("invalid value for GIT_TEST_CHECKOUT_WORKERS: '%s'",
- env_workers);
+ die(_("invalid value for '%s': '%s'"),
+ "GIT_TEST_CHECKOUT_WORKERS", env_workers);
}
if (*num_workers < 1)
*num_workers = online_cpus();
diff --git a/parse-options.c b/parse-options.c
index a828303..6e57744 100644
--- a/parse-options.c
+++ b/parse-options.c
@@ -1079,3 +1079,50 @@ void NORETURN usage_msg_opt(const char *msg,
die_message("%s\n", msg); /* The extra \n is intentional */
usage_with_options(usagestr, options);
}
+
+void NORETURN usage_msg_optf(const char * const fmt,
+ const char * const *usagestr,
+ const struct option *options, ...)
+{
+ struct strbuf msg = STRBUF_INIT;
+ va_list ap;
+ va_start(ap, options);
+ strbuf_vaddf(&msg, fmt, ap);
+ va_end(ap);
+
+ usage_msg_opt(msg.buf, usagestr, options);
+}
+
+void die_for_incompatible_opt4(int opt1, const char *opt1_name,
+ int opt2, const char *opt2_name,
+ int opt3, const char *opt3_name,
+ int opt4, const char *opt4_name)
+{
+ int count = 0;
+ const char *options[4];
+
+ if (opt1)
+ options[count++] = opt1_name;
+ if (opt2)
+ options[count++] = opt2_name;
+ if (opt3)
+ options[count++] = opt3_name;
+ if (opt4)
+ options[count++] = opt4_name;
+ switch (count) {
+ case 4:
+ die(_("options '%s', '%s', '%s', and '%s' cannot be used together"),
+ opt1_name, opt2_name, opt3_name, opt4_name);
+ break;
+ case 3:
+ die(_("options '%s', '%s', and '%s' cannot be used together"),
+ options[0], options[1], options[2]);
+ break;
+ case 2:
+ die(_("options '%s' and '%s' cannot be used together"),
+ options[0], options[1]);
+ break;
+ default:
+ break;
+ }
+}
diff --git a/parse-options.h b/parse-options.h
index e22846d..685fcca 100644
--- a/parse-options.h
+++ b/parse-options.h
@@ -85,6 +85,11 @@ typedef enum parse_opt_result parse_opt_ll_cb(struct parse_opt_ctx_t *ctx,
* token to explain the kind of argument this option wants. Does not
* begin in capital letter, and does not end with a full stop.
* Should be wrapped by N_() for translation.
+ * Is automatically enclosed in brackets when printed, unless it
+ * contains any of the following characters: ()<>[]|
+ * E.g. "name" is shown as "<name>" to indicate that a name value
+ * needs to be supplied, not the literal string "name", but
+ * "<start>,<end>" and "(this|that)" are printed verbatim.
*
* `help`::
* the short help associated to what the option does.
@@ -225,6 +230,32 @@ NORETURN void usage_msg_opt(const char *msg,
const char * const *usagestr,
const struct option *options);
+/**
+ * usage_msg_optf() is like usage_msg_opt() except that the first
+ * argument is a format string, and optional format arguments follow
+ * after the 3rd option.
+ */
+__attribute__((format (printf,1,4)))
+void NORETURN usage_msg_optf(const char *fmt,
+ const char * const *usagestr,
+ const struct option *options, ...);
+
+void die_for_incompatible_opt4(int opt1, const char *opt1_name,
+ int opt2, const char *opt2_name,
+ int opt3, const char *opt3_name,
+ int opt4, const char *opt4_name);
+
+
+static inline void die_for_incompatible_opt3(int opt1, const char *opt1_name,
+ int opt2, const char *opt2_name,
+ int opt3, const char *opt3_name)
+{
+ die_for_incompatible_opt4(opt1, opt1_name,
+ opt2, opt2_name,
+ opt3, opt3_name,
+ 0, "");
+}
+
/*
* Use these assertions for callbacks that expect to be called with NONEG and
* NOARG respectively, and do not otherwise handle the "unset" and "arg"
diff --git a/path.h b/path.h
index b68691a..0a59c85 100644
--- a/path.h
+++ b/path.h
@@ -169,20 +169,6 @@ void report_linked_checkout_garbage(void);
return r->cached_paths.var; \
}
-struct path_cache {
- const char *squash_msg;
- const char *merge_msg;
- const char *merge_rr;
- const char *merge_mode;
- const char *merge_head;
- const char *merge_autostash;
- const char *auto_merge;
- const char *fetch_head;
- const char *shallow;
-};
-
-#define PATH_CACHE_INIT { 0 }
-
const char *git_path_squash_msg(struct repository *r);
const char *git_path_merge_msg(struct repository *r);
const char *git_path_merge_rr(struct repository *r);
diff --git a/perl/Git.pm b/perl/Git.pm
index 090a7df..080cdc2 100644
--- a/perl/Git.pm
+++ b/perl/Git.pm
@@ -1686,6 +1686,16 @@ sub _setup_git_cmd_env {
# by searching for it at proper places.
sub _execv_git_cmd { exec('git', @_); }
+sub _is_sig {
+ my ($v, $n) = @_;
+
+ # We are avoiding a "use POSIX qw(SIGPIPE SIGABRT)" in the hot
+ # Git.pm codepath.
+ require POSIX;
+ no strict 'refs';
+ $v == *{"POSIX::$n"}->();
+}
+
# Close pipe to a subprocess.
sub _cmd_close {
my $ctx = shift @_;
@@ -1698,9 +1708,16 @@ sub _cmd_close {
} elsif ($? >> 8) {
# The caller should pepper this.
throw Git::Error::Command($ctx, $? >> 8);
+ } elsif ($? & 127 && _is_sig($? & 127, "SIGPIPE")) {
+ # we might e.g. closed a live stream; the command
+ # dying of SIGPIPE would drive us here.
+ } elsif ($? & 127 && _is_sig($? & 127, "SIGABRT")) {
+ die sprintf('BUG?: got SIGABRT ($? = %d, $? & 127 = %d) when closing pipe',
+ $?, $? & 127);
+ } elsif ($? & 127) {
+ die sprintf('got signal ($? = %d, $? & 127 = %d) when closing pipe',
+ $?, $? & 127);
}
- # else we might e.g. closed a live stream; the command
- # dying of SIGPIPE would drive us here.
}
}
diff --git a/pretty.h b/pretty.h
index 2f16acd..f34e24c 100644
--- a/pretty.h
+++ b/pretty.h
@@ -2,6 +2,7 @@
#define PRETTY_H
#include "cache.h"
+#include "date.h"
#include "string-list.h"
struct commit;
@@ -163,4 +164,13 @@ int format_set_trailers_options(struct process_trailer_options *opts,
const char **arg,
char **invalid_arg);
+/*
+ * Like show_date, but pull the timestamp and tz parameters from
+ * the ident_split. It will also sanity-check the values and produce
+ * a well-known sentinel date if they appear bogus.
+ */
+const char *show_ident_date(const struct ident_split *id,
+ const struct date_mode *mode);
+
+
#endif /* PRETTY_H */
diff --git a/progress.c b/progress.c
index 680c6a8..0cdd875 100644
--- a/progress.c
+++ b/progress.c
@@ -311,32 +311,39 @@ struct progress *start_delayed_sparse_progress(const char *title,
static void finish_if_sparse(struct progress *progress)
{
- if (progress &&
- progress->sparse &&
+ if (progress->sparse &&
progress->last_value != progress->total)
display_progress(progress, progress->total);
}
-void stop_progress(struct progress **p_progress)
+static void force_last_update(struct progress *progress, const char *msg)
{
- if (!p_progress)
- BUG("don't provide NULL to stop_progress");
-
- finish_if_sparse(*p_progress);
-
- if (*p_progress) {
- trace2_data_intmax("progress", the_repository, "total_objects",
- (*p_progress)->total);
+ char *buf;
+ struct throughput *tp = progress->throughput;
+
+ if (tp) {
+ uint64_t now_ns = progress_getnanotime(progress);
+ unsigned int misecs, rate;
+ misecs = ((now_ns - progress->start_ns) * 4398) >> 32;
+ rate = tp->curr_total / (misecs ? misecs : 1);
+ throughput_string(&tp->display, tp->curr_total, rate);
+ }
+ progress_update = 1;
+ buf = xstrfmt(", %s.\n", msg);
+ display(progress, progress->last_value, buf);
+ free(buf);
+}
- if ((*p_progress)->throughput)
- trace2_data_intmax("progress", the_repository,
- "total_bytes",
- (*p_progress)->throughput->curr_total);
+static void log_trace2(struct progress *progress)
+{
+ trace2_data_intmax("progress", the_repository, "total_objects",
+ progress->total);
- trace2_region_leave("progress", (*p_progress)->title, the_repository);
- }
+ if (progress->throughput)
+ trace2_data_intmax("progress", the_repository, "total_bytes",
+ progress->throughput->curr_total);
- stop_progress_msg(p_progress, _("done"));
+ trace2_region_leave("progress", progress->title, the_repository);
}
void stop_progress_msg(struct progress **p_progress, const char *msg)
@@ -350,23 +357,12 @@ void stop_progress_msg(struct progress **p_progress, const char *msg)
if (!progress)
return;
*p_progress = NULL;
- if (progress->last_value != -1) {
- /* Force the last update */
- char *buf;
- struct throughput *tp = progress->throughput;
-
- if (tp) {
- uint64_t now_ns = progress_getnanotime(progress);
- unsigned int misecs, rate;
- misecs = ((now_ns - progress->start_ns) * 4398) >> 32;
- rate = tp->curr_total / (misecs ? misecs : 1);
- throughput_string(&tp->display, tp->curr_total, rate);
- }
- progress_update = 1;
- buf = xstrfmt(", %s.\n", msg);
- display(progress, progress->last_value, buf);
- free(buf);
- }
+
+ finish_if_sparse(progress);
+ if (progress->last_value != -1)
+ force_last_update(progress, msg);
+ log_trace2(progress);
+
clear_progress_signal();
strbuf_release(&progress->counters_sb);
if (progress->throughput)
diff --git a/progress.h b/progress.h
index f1913ac..3a94563 100644
--- a/progress.h
+++ b/progress.h
@@ -1,5 +1,6 @@
#ifndef PROGRESS_H
#define PROGRESS_H
+#include "gettext.h"
struct progress;
@@ -18,7 +19,9 @@ struct progress *start_sparse_progress(const char *title, uint64_t total);
struct progress *start_delayed_progress(const char *title, uint64_t total);
struct progress *start_delayed_sparse_progress(const char *title,
uint64_t total);
-void stop_progress(struct progress **progress);
-void stop_progress_msg(struct progress **progress, const char *msg);
-
+void stop_progress_msg(struct progress **p_progress, const char *msg);
+static inline void stop_progress(struct progress **p_progress)
+{
+ stop_progress_msg(p_progress, _("done"));
+}
#endif
diff --git a/range-diff.c b/range-diff.c
index 30a4de5..b72eb9f 100644
--- a/range-diff.c
+++ b/range-diff.c
@@ -40,6 +40,7 @@ static int read_patches(const char *range, struct string_list *list,
char *line, *current_filename = NULL;
ssize_t len;
size_t size;
+ int ret = -1;
strvec_pushl(&cp.args, "log", "--no-color", "-p", "--no-merges",
"--reverse", "--date-order", "--decorate=no",
@@ -68,10 +69,10 @@ static int read_patches(const char *range, struct string_list *list,
if (strbuf_read(&contents, cp.out, 0) < 0) {
error_errno(_("could not read `log` output"));
finish_command(&cp);
- return -1;
+ goto cleanup;
}
if (finish_command(&cp))
- return -1;
+ goto cleanup;
line = contents.buf;
size = contents.len;
@@ -95,12 +96,9 @@ static int read_patches(const char *range, struct string_list *list,
CALLOC_ARRAY(util, 1);
if (get_oid(p, &util->oid)) {
error(_("could not parse commit '%s'"), p);
- free(util);
- free(current_filename);
+ FREE_AND_NULL(util);
string_list_clear(list, 1);
- strbuf_release(&buf);
- strbuf_release(&contents);
- return -1;
+ goto cleanup;
}
util->matching = -1;
in_header = 1;
@@ -111,11 +109,8 @@ static int read_patches(const char *range, struct string_list *list,
error(_("could not parse first line of `log` output: "
"did not start with 'commit ': '%s'"),
line);
- free(current_filename);
string_list_clear(list, 1);
- strbuf_release(&buf);
- strbuf_release(&contents);
- return -1;
+ goto cleanup;
}
if (starts_with(line, "diff --git")) {
@@ -136,12 +131,9 @@ static int read_patches(const char *range, struct string_list *list,
if (len < 0) {
error(_("could not parse git header '%.*s'"),
orig_len, line);
- free(util);
- free(current_filename);
+ FREE_AND_NULL(util);
string_list_clear(list, 1);
- strbuf_release(&buf);
- strbuf_release(&contents);
- return -1;
+ goto cleanup;
}
strbuf_addstr(&buf, " ## ");
if (patch.is_new > 0)
@@ -165,6 +157,7 @@ static int read_patches(const char *range, struct string_list *list,
patch.old_mode, patch.new_mode);
strbuf_addstr(&buf, " ##");
+ release_patch(&patch);
} else if (in_header) {
if (starts_with(line, "Author: ")) {
strbuf_addstr(&buf, " ## Metadata ##\n");
@@ -218,6 +211,9 @@ static int read_patches(const char *range, struct string_list *list,
strbuf_addch(&buf, '\n');
util->diffsize++;
}
+
+ ret = 0;
+cleanup:
strbuf_release(&contents);
if (util)
@@ -225,7 +221,7 @@ static int read_patches(const char *range, struct string_list *list,
strbuf_release(&buf);
free(current_filename);
- return 0;
+ return ret;
}
static int patch_util_cmp(const void *dummy, const struct patch_util *a,
diff --git a/reachable.c b/reachable.c
index 84e3d0d..b9f4ad8 100644
--- a/reachable.c
+++ b/reachable.c
@@ -205,7 +205,7 @@ void mark_reachable_objects(struct rev_info *revs, int mark_reflog,
cp.progress = progress;
cp.count = 0;
- bitmap_git = prepare_bitmap_walk(revs, NULL, 0);
+ bitmap_git = prepare_bitmap_walk(revs, 0);
if (bitmap_git) {
traverse_bitmap_commit_list(bitmap_git, revs, mark_object_seen);
free_bitmap_index(bitmap_git);
diff --git a/read-cache.c b/read-cache.c
index cbe73f1..4df97e1 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -28,6 +28,7 @@
#include "sparse-index.h"
#include "csum-file.h"
#include "promisor-remote.h"
+#include "hook.h"
/* Mask for the name length in ce_flags in the on-disk index */
@@ -133,7 +134,7 @@ static void replace_index_entry(struct index_state *istate, int nr, struct cache
void rename_index_entry_at(struct index_state *istate, int nr, const char *new_name)
{
- struct cache_entry *old_entry = istate->cache[nr], *new_entry;
+ struct cache_entry *old_entry = istate->cache[nr], *new_entry, *refreshed;
int namelen = strlen(new_name);
new_entry = make_empty_cache_entry(istate, namelen);
@@ -146,7 +147,20 @@ void rename_index_entry_at(struct index_state *istate, int nr, const char *new_n
cache_tree_invalidate_path(istate, old_entry->name);
untracked_cache_remove_from_index(istate, old_entry->name);
remove_index_entry_at(istate, nr);
- add_index_entry(istate, new_entry, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
+
+ /*
+ * Refresh the new index entry. Using 'refresh_cache_entry' ensures
+ * we only update stat info if the entry is otherwise up-to-date (i.e.,
+ * the contents/mode haven't changed). This ensures that we reflect the
+ * 'ctime' of the rename in the index without (incorrectly) updating
+ * the cached stat info to reflect unstaged changes on disk.
+ */
+ refreshed = refresh_cache_entry(istate, new_entry, CE_MATCH_REFRESH);
+ if (refreshed && refreshed != new_entry) {
+ add_index_entry(istate, refreshed, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
+ discard_cache_entry(new_entry);
+ } else
+ add_index_entry(istate, new_entry, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
}
void fill_stat_data(struct stat_data *sd, struct stat *st)
@@ -735,7 +749,7 @@ static struct cache_entry *create_alias_ce(struct index_state *istate,
void set_object_name_for_intent_to_add_entry(struct cache_entry *ce)
{
struct object_id oid;
- if (write_object_file("", 0, blob_type, &oid))
+ if (write_object_file("", 0, OBJ_BLOB, &oid))
die(_("cannot create an empty blob in the object database"));
oidcpy(&ce->oid, &oid);
}
@@ -1339,9 +1353,6 @@ static int add_index_entry_with_check(struct index_state *istate, struct cache_e
int skip_df_check = option & ADD_CACHE_SKIP_DFCHECK;
int new_only = option & ADD_CACHE_NEW_ONLY;
- if (!(option & ADD_CACHE_KEEP_CACHE_TREE))
- cache_tree_invalidate_path(istate, ce->name);
-
/*
* If this entry's path sorts after the last entry in the index,
* we can avoid searching for it.
@@ -1352,6 +1363,13 @@ static int add_index_entry_with_check(struct index_state *istate, struct cache_e
else
pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce), EXPAND_SPARSE);
+ /*
+ * Cache tree path should be invalidated only after index_name_stage_pos,
+ * in case it expands a sparse index.
+ */
+ if (!(option & ADD_CACHE_KEEP_CACHE_TREE))
+ cache_tree_invalidate_path(istate, ce->name);
+
/* existing match? Just replace it. */
if (pos >= 0) {
if (!new_only)
@@ -2775,7 +2793,7 @@ static int repo_verify_index(struct repository *repo)
return verify_index_from(repo->index, repo->index_file);
}
-static int has_racy_timestamp(struct index_state *istate)
+int has_racy_timestamp(struct index_state *istate)
{
int entries = istate->cache_nr;
int i;
@@ -2837,7 +2855,7 @@ static int record_ieot(void)
* rely on it.
*/
static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
- int strip_extensions)
+ int strip_extensions, unsigned flags)
{
uint64_t start = getnanotime();
struct hashfile *f;
@@ -2851,6 +2869,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
int drop_cache_tree = istate->drop_cache_tree;
off_t offset;
+ int csum_fsync_flag;
int ieot_entries = 1;
struct index_entry_offset_table *ieot = NULL;
int nr, nr_threads;
@@ -3009,6 +3028,9 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
!is_null_oid(&istate->split_index->base_oid)) {
struct strbuf sb = STRBUF_INIT;
+ if (istate->sparse_index)
+ die(_("cannot write split index for a sparse index"));
+
err = write_link_extension(&sb, istate) < 0 ||
write_index_ext_header(f, eoie_c, CACHE_EXT_LINK,
sb.len) < 0;
@@ -3081,7 +3103,13 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
return -1;
}
- finalize_hashfile(f, istate->oid.hash, CSUM_HASH_IN_STREAM);
+ csum_fsync_flag = 0;
+ if (!alternate_index_output && (flags & COMMIT_LOCK))
+ csum_fsync_flag = CSUM_FSYNC;
+
+ finalize_hashfile(f, istate->oid.hash, FSYNC_COMPONENT_INDEX,
+ CSUM_HASH_IN_STREAM | csum_fsync_flag);
+
if (close_tempfile_gently(tempfile)) {
error(_("could not close '%s'"), get_tempfile_path(tempfile));
return -1;
@@ -3136,7 +3164,7 @@ static int do_write_locked_index(struct index_state *istate, struct lock_file *l
*/
trace2_region_enter_printf("index", "do_write_index", the_repository,
"%s", get_lock_file_path(lock));
- ret = do_write_index(istate, lock->tempfile, 0);
+ ret = do_write_index(istate, lock->tempfile, 0, flags);
trace2_region_leave_printf("index", "do_write_index", the_repository,
"%s", get_lock_file_path(lock));
@@ -3150,7 +3178,7 @@ static int do_write_locked_index(struct index_state *istate, struct lock_file *l
else
ret = close_lock_file_gently(lock);
- run_hook_le(NULL, "post-index-change",
+ run_hooks_l("post-index-change",
istate->updated_workdir ? "1" : "0",
istate->updated_skipworktree ? "1" : "0", NULL);
istate->updated_workdir = 0;
@@ -3230,7 +3258,7 @@ static int clean_shared_index_files(const char *current_hex)
}
static int write_shared_index(struct index_state *istate,
- struct tempfile **temp)
+ struct tempfile **temp, unsigned flags)
{
struct split_index *si = istate->split_index;
int ret, was_full = !istate->sparse_index;
@@ -3240,7 +3268,7 @@ static int write_shared_index(struct index_state *istate,
trace2_region_enter_printf("index", "shared/do_write_index",
the_repository, "%s", get_tempfile_path(*temp));
- ret = do_write_index(si->base, *temp, 1);
+ ret = do_write_index(si->base, *temp, 1, flags);
trace2_region_leave_printf("index", "shared/do_write_index",
the_repository, "%s", get_tempfile_path(*temp));
@@ -3349,7 +3377,7 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock,
ret = do_write_locked_index(istate, lock, flags);
goto out;
}
- ret = write_shared_index(istate, &temp);
+ ret = write_shared_index(istate, &temp, flags);
saved_errno = errno;
if (is_tempfile_active(temp))
diff --git a/ref-filter.c b/ref-filter.c
index f7a2f17..7838bd2 100644
--- a/ref-filter.c
+++ b/ref-filter.c
@@ -1251,7 +1251,7 @@ static void grab_date(const char *buf, struct atom_value *v, const char *atomnam
char *zone;
timestamp_t timestamp;
long tz;
- struct date_mode date_mode = { DATE_NORMAL };
+ struct date_mode date_mode = DATE_MODE_INIT;
const char *formatp;
/*
@@ -1276,6 +1276,7 @@ static void grab_date(const char *buf, struct atom_value *v, const char *atomnam
goto bad;
v->s = xstrdup(show_date(timestamp, tz, &date_mode));
v->value = timestamp;
+ date_mode_release(&date_mode);
return;
bad:
v->s = xstrdup("");
diff --git a/reflog-walk.h b/reflog-walk.h
index f26408f..e9e00ff 100644
--- a/reflog-walk.h
+++ b/reflog-walk.h
@@ -5,6 +5,7 @@
struct commit;
struct reflog_walk_info;
+struct date_mode;
void init_reflog_walk(struct reflog_walk_info **info);
int add_reflog_for_walk(struct reflog_walk_info *info,
diff --git a/reflog.c b/reflog.c
new file mode 100644
index 0000000..135a1a6
--- /dev/null
+++ b/reflog.c
@@ -0,0 +1,434 @@
+#include "cache.h"
+#include "object-store.h"
+#include "reflog.h"
+#include "refs.h"
+#include "revision.h"
+#include "worktree.h"
+
+/* Remember to update object flag allocation in object.h */
+#define INCOMPLETE (1u<<10)
+#define STUDYING (1u<<11)
+#define REACHABLE (1u<<12)
+
+static int tree_is_complete(const struct object_id *oid)
+{
+ struct tree_desc desc;
+ struct name_entry entry;
+ int complete;
+ struct tree *tree;
+
+ tree = lookup_tree(the_repository, oid);
+ if (!tree)
+ return 0;
+ if (tree->object.flags & SEEN)
+ return 1;
+ if (tree->object.flags & INCOMPLETE)
+ return 0;
+
+ if (!tree->buffer) {
+ enum object_type type;
+ unsigned long size;
+ void *data = read_object_file(oid, &type, &size);
+ if (!data) {
+ tree->object.flags |= INCOMPLETE;
+ return 0;
+ }
+ tree->buffer = data;
+ tree->size = size;
+ }
+ init_tree_desc(&desc, tree->buffer, tree->size);
+ complete = 1;
+ while (tree_entry(&desc, &entry)) {
+ if (!has_object_file(&entry.oid) ||
+ (S_ISDIR(entry.mode) && !tree_is_complete(&entry.oid))) {
+ tree->object.flags |= INCOMPLETE;
+ complete = 0;
+ }
+ }
+ free_tree_buffer(tree);
+
+ if (complete)
+ tree->object.flags |= SEEN;
+ return complete;
+}
+
+static int commit_is_complete(struct commit *commit)
+{
+ struct object_array study;
+ struct object_array found;
+ int is_incomplete = 0;
+ int i;
+
+ /* early return */
+ if (commit->object.flags & SEEN)
+ return 1;
+ if (commit->object.flags & INCOMPLETE)
+ return 0;
+ /*
+ * Find all commits that are reachable and are not marked as
+ * SEEN. Then make sure the trees and blobs contained are
+ * complete. After that, mark these commits also as SEEN.
+ * If some of the objects that are needed to complete this
+ * commit are missing, mark this commit as INCOMPLETE.
+ */
+ memset(&study, 0, sizeof(study));
+ memset(&found, 0, sizeof(found));
+ add_object_array(&commit->object, NULL, &study);
+ add_object_array(&commit->object, NULL, &found);
+ commit->object.flags |= STUDYING;
+ while (study.nr) {
+ struct commit *c;
+ struct commit_list *parent;
+
+ c = (struct commit *)object_array_pop(&study);
+ if (!c->object.parsed && !parse_object(the_repository, &c->object.oid))
+ c->object.flags |= INCOMPLETE;
+
+ if (c->object.flags & INCOMPLETE) {
+ is_incomplete = 1;
+ break;
+ }
+ else if (c->object.flags & SEEN)
+ continue;
+ for (parent = c->parents; parent; parent = parent->next) {
+ struct commit *p = parent->item;
+ if (p->object.flags & STUDYING)
+ continue;
+ p->object.flags |= STUDYING;
+ add_object_array(&p->object, NULL, &study);
+ add_object_array(&p->object, NULL, &found);
+ }
+ }
+ if (!is_incomplete) {
+ /*
+ * make sure all commits in "found" array have all the
+ * necessary objects.
+ */
+ for (i = 0; i < found.nr; i++) {
+ struct commit *c =
+ (struct commit *)found.objects[i].item;
+ if (!tree_is_complete(get_commit_tree_oid(c))) {
+ is_incomplete = 1;
+ c->object.flags |= INCOMPLETE;
+ }
+ }
+ if (!is_incomplete) {
+ /* mark all found commits as complete, iow SEEN */
+ for (i = 0; i < found.nr; i++)
+ found.objects[i].item->flags |= SEEN;
+ }
+ }
+ /* clear flags from the objects we traversed */
+ for (i = 0; i < found.nr; i++)
+ found.objects[i].item->flags &= ~STUDYING;
+ if (is_incomplete)
+ commit->object.flags |= INCOMPLETE;
+ else {
+ /*
+ * If we come here, we have (1) traversed the ancestry chain
+ * from the "commit" until we reach SEEN commits (which are
+ * known to be complete), and (2) made sure that the commits
+ * encountered during the above traversal refer to trees that
+ * are complete. Which means that we know *all* the commits
+ * we have seen during this process are complete.
+ */
+ for (i = 0; i < found.nr; i++)
+ found.objects[i].item->flags |= SEEN;
+ }
+ /* free object arrays */
+ object_array_clear(&study);
+ object_array_clear(&found);
+ return !is_incomplete;
+}
+
+static int keep_entry(struct commit **it, struct object_id *oid)
+{
+ struct commit *commit;
+
+ if (is_null_oid(oid))
+ return 1;
+ commit = lookup_commit_reference_gently(the_repository, oid, 1);
+ if (!commit)
+ return 0;
+
+ /*
+ * Make sure everything in this commit exists.
+ *
+ * We have walked all the objects reachable from the refs
+ * and cache earlier. The commits reachable by this commit
+ * must meet SEEN commits -- and then we should mark them as
+ * SEEN as well.
+ */
+ if (!commit_is_complete(commit))
+ return 0;
+ *it = commit;
+ return 1;
+}
+
+/*
+ * Starting from commits in the cb->mark_list, mark commits that are
+ * reachable from them. Stop the traversal at commits older than
+ * the expire_limit and queue them back, so that the caller can call
+ * us again to restart the traversal with longer expire_limit.
+ */
+static void mark_reachable(struct expire_reflog_policy_cb *cb)
+{
+ struct commit_list *pending;
+ timestamp_t expire_limit = cb->mark_limit;
+ struct commit_list *leftover = NULL;
+
+ for (pending = cb->mark_list; pending; pending = pending->next)
+ pending->item->object.flags &= ~REACHABLE;
+
+ pending = cb->mark_list;
+ while (pending) {
+ struct commit_list *parent;
+ struct commit *commit = pop_commit(&pending);
+ if (commit->object.flags & REACHABLE)
+ continue;
+ if (parse_commit(commit))
+ continue;
+ commit->object.flags |= REACHABLE;
+ if (commit->date < expire_limit) {
+ commit_list_insert(commit, &leftover);
+ continue;
+ }
+ commit->object.flags |= REACHABLE;
+ parent = commit->parents;
+ while (parent) {
+ commit = parent->item;
+ parent = parent->next;
+ if (commit->object.flags & REACHABLE)
+ continue;
+ commit_list_insert(commit, &pending);
+ }
+ }
+ cb->mark_list = leftover;
+}
+
+static int unreachable(struct expire_reflog_policy_cb *cb, struct commit *commit, struct object_id *oid)
+{
+ /*
+ * We may or may not have the commit yet - if not, look it
+ * up using the supplied sha1.
+ */
+ if (!commit) {
+ if (is_null_oid(oid))
+ return 0;
+
+ commit = lookup_commit_reference_gently(the_repository, oid,
+ 1);
+
+ /* Not a commit -- keep it */
+ if (!commit)
+ return 0;
+ }
+
+ /* Reachable from the current ref? Don't prune. */
+ if (commit->object.flags & REACHABLE)
+ return 0;
+
+ if (cb->mark_list && cb->mark_limit) {
+ cb->mark_limit = 0; /* dig down to the root */
+ mark_reachable(cb);
+ }
+
+ return !(commit->object.flags & REACHABLE);
+}
+
+/*
+ * Return true iff the specified reflog entry should be expired.
+ */
+int should_expire_reflog_ent(struct object_id *ooid, struct object_id *noid,
+ const char *email, timestamp_t timestamp, int tz,
+ const char *message, void *cb_data)
+{
+ struct expire_reflog_policy_cb *cb = cb_data;
+ struct commit *old_commit, *new_commit;
+
+ if (timestamp < cb->cmd.expire_total)
+ return 1;
+
+ old_commit = new_commit = NULL;
+ if (cb->cmd.stalefix &&
+ (!keep_entry(&old_commit, ooid) || !keep_entry(&new_commit, noid)))
+ return 1;
+
+ if (timestamp < cb->cmd.expire_unreachable) {
+ switch (cb->unreachable_expire_kind) {
+ case UE_ALWAYS:
+ return 1;
+ case UE_NORMAL:
+ case UE_HEAD:
+ if (unreachable(cb, old_commit, ooid) || unreachable(cb, new_commit, noid))
+ return 1;
+ break;
+ }
+ }
+
+ if (cb->cmd.recno && --(cb->cmd.recno) == 0)
+ return 1;
+
+ return 0;
+}
+
+int should_expire_reflog_ent_verbose(struct object_id *ooid,
+ struct object_id *noid,
+ const char *email,
+ timestamp_t timestamp, int tz,
+ const char *message, void *cb_data)
+{
+ struct expire_reflog_policy_cb *cb = cb_data;
+ int expire;
+
+ expire = should_expire_reflog_ent(ooid, noid, email, timestamp, tz,
+ message, cb);
+
+ if (!expire)
+ printf("keep %s", message);
+ else if (cb->dry_run)
+ printf("would prune %s", message);
+ else
+ printf("prune %s", message);
+
+ return expire;
+}
+
+static int push_tip_to_list(const char *refname, const struct object_id *oid,
+ int flags, void *cb_data)
+{
+ struct commit_list **list = cb_data;
+ struct commit *tip_commit;
+ if (flags & REF_ISSYMREF)
+ return 0;
+ tip_commit = lookup_commit_reference_gently(the_repository, oid, 1);
+ if (!tip_commit)
+ return 0;
+ commit_list_insert(tip_commit, list);
+ return 0;
+}
+
+static int is_head(const char *refname)
+{
+ switch (ref_type(refname)) {
+ case REF_TYPE_OTHER_PSEUDOREF:
+ case REF_TYPE_MAIN_PSEUDOREF:
+ if (parse_worktree_ref(refname, NULL, NULL, &refname))
+ BUG("not a worktree ref: %s", refname);
+ break;
+ default:
+ break;
+ }
+ return !strcmp(refname, "HEAD");
+}
+
+void reflog_expiry_prepare(const char *refname,
+ const struct object_id *oid,
+ void *cb_data)
+{
+ struct expire_reflog_policy_cb *cb = cb_data;
+ struct commit_list *elem;
+ struct commit *commit = NULL;
+
+ if (!cb->cmd.expire_unreachable || is_head(refname)) {
+ cb->unreachable_expire_kind = UE_HEAD;
+ } else {
+ commit = lookup_commit(the_repository, oid);
+ if (commit && is_null_oid(&commit->object.oid))
+ commit = NULL;
+ cb->unreachable_expire_kind = commit ? UE_NORMAL : UE_ALWAYS;
+ }
+
+ if (cb->cmd.expire_unreachable <= cb->cmd.expire_total)
+ cb->unreachable_expire_kind = UE_ALWAYS;
+
+ switch (cb->unreachable_expire_kind) {
+ case UE_ALWAYS:
+ return;
+ case UE_HEAD:
+ for_each_ref(push_tip_to_list, &cb->tips);
+ for (elem = cb->tips; elem; elem = elem->next)
+ commit_list_insert(elem->item, &cb->mark_list);
+ break;
+ case UE_NORMAL:
+ commit_list_insert(commit, &cb->mark_list);
+ /* For reflog_expiry_cleanup() below */
+ cb->tip_commit = commit;
+ }
+ cb->mark_limit = cb->cmd.expire_total;
+ mark_reachable(cb);
+}
+
+void reflog_expiry_cleanup(void *cb_data)
+{
+ struct expire_reflog_policy_cb *cb = cb_data;
+ struct commit_list *elem;
+
+ switch (cb->unreachable_expire_kind) {
+ case UE_ALWAYS:
+ return;
+ case UE_HEAD:
+ for (elem = cb->tips; elem; elem = elem->next)
+ clear_commit_marks(elem->item, REACHABLE);
+ free_commit_list(cb->tips);
+ break;
+ case UE_NORMAL:
+ clear_commit_marks(cb->tip_commit, REACHABLE);
+ break;
+ }
+}
+
+int count_reflog_ent(struct object_id *ooid, struct object_id *noid,
+ const char *email, timestamp_t timestamp, int tz,
+ const char *message, void *cb_data)
+{
+ struct cmd_reflog_expire_cb *cb = cb_data;
+ if (!cb->expire_total || timestamp < cb->expire_total)
+ cb->recno++;
+ return 0;
+}
+
+int reflog_delete(const char *rev, enum expire_reflog_flags flags, int verbose)
+{
+ struct cmd_reflog_expire_cb cmd = { 0 };
+ int status = 0;
+ reflog_expiry_should_prune_fn *should_prune_fn = should_expire_reflog_ent;
+ const char *spec = strstr(rev, "@{");
+ char *ep, *ref;
+ int recno;
+ struct expire_reflog_policy_cb cb = {
+ .dry_run = !!(flags & EXPIRE_REFLOGS_DRY_RUN),
+ };
+
+ if (verbose)
+ should_prune_fn = should_expire_reflog_ent_verbose;
+
+ if (!spec)
+ return error(_("not a reflog: %s"), rev);
+
+ if (!dwim_log(rev, spec - rev, NULL, &ref)) {
+ status |= error(_("no reflog for '%s'"), rev);
+ goto cleanup;
+ }
+
+ recno = strtoul(spec + 2, &ep, 10);
+ if (*ep == '}') {
+ cmd.recno = -recno;
+ for_each_reflog_ent(ref, count_reflog_ent, &cmd);
+ } else {
+ cmd.expire_total = approxidate(spec + 2);
+ for_each_reflog_ent(ref, count_reflog_ent, &cmd);
+ cmd.expire_total = 0;
+ }
+
+ cb.cmd = cmd;
+ status |= reflog_expire(ref, flags,
+ reflog_expiry_prepare,
+ should_prune_fn,
+ reflog_expiry_cleanup,
+ &cb);
+
+ cleanup:
+ free(ref);
+ return status;
+}
diff --git a/reflog.h b/reflog.h
new file mode 100644
index 0000000..d2906fb
--- /dev/null
+++ b/reflog.h
@@ -0,0 +1,43 @@
+#ifndef REFLOG_H
+#define REFLOG_H
+#include "refs.h"
+
+struct cmd_reflog_expire_cb {
+ int stalefix;
+ int explicit_expiry;
+ timestamp_t expire_total;
+ timestamp_t expire_unreachable;
+ int recno;
+};
+
+struct expire_reflog_policy_cb {
+ enum {
+ UE_NORMAL,
+ UE_ALWAYS,
+ UE_HEAD
+ } unreachable_expire_kind;
+ struct commit_list *mark_list;
+ unsigned long mark_limit;
+ struct cmd_reflog_expire_cb cmd;
+ struct commit *tip_commit;
+ struct commit_list *tips;
+ unsigned int dry_run:1;
+};
+
+int reflog_delete(const char *rev, enum expire_reflog_flags flags,
+ int verbose);
+void reflog_expiry_cleanup(void *cb_data);
+void reflog_expiry_prepare(const char *refname, const struct object_id *oid,
+ void *cb_data);
+int should_expire_reflog_ent(struct object_id *ooid, struct object_id *noid,
+ const char *email, timestamp_t timestamp, int tz,
+ const char *message, void *cb_data);
+int count_reflog_ent(struct object_id *ooid, struct object_id *noid,
+ const char *email, timestamp_t timestamp, int tz,
+ const char *message, void *cb_data);
+int should_expire_reflog_ent_verbose(struct object_id *ooid,
+ struct object_id *noid,
+ const char *email,
+ timestamp_t timestamp, int tz,
+ const char *message, void *cb_data);
+#endif /* REFLOG_H */
diff --git a/refs.c b/refs.c
index addb262..1a96450 100644
--- a/refs.c
+++ b/refs.c
@@ -19,6 +19,7 @@
#include "strvec.h"
#include "repository.h"
#include "sigchain.h"
+#include "date.h"
/*
* List of all available backends
@@ -269,10 +270,9 @@ char *refs_resolve_refdup(struct ref_store *refs,
struct object_id *oid, int *flags)
{
const char *result;
- int ignore_errno;
result = refs_resolve_ref_unsafe(refs, refname, resolve_flags,
- oid, flags, &ignore_errno);
+ oid, flags);
return xstrdup_or_null(result);
}
@@ -294,11 +294,10 @@ struct ref_filter {
int read_ref_full(const char *refname, int resolve_flags, struct object_id *oid, int *flags)
{
- int ignore_errno;
struct ref_store *refs = get_main_ref_store(the_repository);
if (refs_resolve_ref_unsafe(refs, refname, resolve_flags,
- oid, flags, &ignore_errno))
+ oid, flags))
return 0;
return -1;
}
@@ -310,9 +309,8 @@ int read_ref(const char *refname, struct object_id *oid)
int refs_ref_exists(struct ref_store *refs, const char *refname)
{
- int ignore_errno;
return !!refs_resolve_ref_unsafe(refs, refname, RESOLVE_REF_READING,
- NULL, NULL, &ignore_errno);
+ NULL, NULL);
}
int ref_exists(const char *refname)
@@ -656,15 +654,13 @@ int expand_ref(struct repository *repo, const char *str, int len,
struct object_id *this_result;
int flag;
struct ref_store *refs = get_main_ref_store(repo);
- int ignore_errno;
this_result = refs_found ? &oid_from_ref : oid;
strbuf_reset(&fullref);
strbuf_addf(&fullref, *p, len, str);
r = refs_resolve_ref_unsafe(refs, fullref.buf,
RESOLVE_REF_READING,
- this_result, &flag,
- &ignore_errno);
+ this_result, &flag);
if (r) {
if (!refs_found++)
*ref = xstrdup(r);
@@ -693,14 +689,12 @@ int repo_dwim_log(struct repository *r, const char *str, int len,
for (p = ref_rev_parse_rules; *p; p++) {
struct object_id hash;
const char *ref, *it;
- int ignore_errno;
strbuf_reset(&path);
strbuf_addf(&path, *p, len, str);
ref = refs_resolve_ref_unsafe(refs, path.buf,
RESOLVE_REF_READING,
- oid ? &hash : NULL, NULL,
- &ignore_errno);
+ oid ? &hash : NULL, NULL);
if (!ref)
continue;
if (refs_reflog_exists(refs, path.buf))
@@ -800,7 +794,7 @@ int refs_delete_ref(struct ref_store *refs, const char *msg,
struct ref_transaction *transaction;
struct strbuf err = STRBUF_INIT;
- transaction = ref_store_transaction_begin(refs, &err);
+ transaction = ref_store_transaction_begin(refs, 0, &err);
if (!transaction ||
ref_transaction_delete(transaction, refname, old_oid,
flags, msg, &err) ||
@@ -1005,6 +999,7 @@ int read_ref_at(struct ref_store *refs, const char *refname,
}
struct ref_transaction *ref_store_transaction_begin(struct ref_store *refs,
+ unsigned int flags,
struct strbuf *err)
{
struct ref_transaction *tr;
@@ -1012,12 +1007,13 @@ struct ref_transaction *ref_store_transaction_begin(struct ref_store *refs,
CALLOC_ARRAY(tr, 1);
tr->ref_store = refs;
+ tr->flags = flags;
return tr;
}
struct ref_transaction *ref_transaction_begin(struct strbuf *err)
{
- return ref_store_transaction_begin(get_main_ref_store(the_repository), err);
+ return ref_store_transaction_begin(get_main_ref_store(the_repository), 0, err);
}
void ref_transaction_free(struct ref_transaction *transaction)
@@ -1156,7 +1152,7 @@ int refs_update_ref(struct ref_store *refs, const char *msg,
struct strbuf err = STRBUF_INIT;
int ret = 0;
- t = ref_store_transaction_begin(refs, &err);
+ t = ref_store_transaction_begin(refs, 0, &err);
if (!t ||
ref_transaction_update(t, refname, new_oid, old_oid, flags, msg,
&err) ||
@@ -1390,10 +1386,9 @@ int refs_head_ref(struct ref_store *refs, each_ref_fn fn, void *cb_data)
{
struct object_id oid;
int flag;
- int ignore_errno;
if (refs_resolve_ref_unsafe(refs, "HEAD", RESOLVE_REF_READING,
- &oid, &flag, &ignore_errno))
+ &oid, &flag))
return fn("HEAD", &oid, flag, cb_data);
return 0;
@@ -1678,19 +1673,23 @@ int refs_read_raw_ref(struct ref_store *ref_store, const char *refname,
type, failure_errno);
}
+int refs_read_symbolic_ref(struct ref_store *ref_store, const char *refname,
+ struct strbuf *referent)
+{
+ return ref_store->be->read_symbolic_ref(ref_store, refname, referent);
+}
+
const char *refs_resolve_ref_unsafe(struct ref_store *refs,
const char *refname,
int resolve_flags,
struct object_id *oid,
- int *flags, int *failure_errno)
+ int *flags)
{
static struct strbuf sb_refname = STRBUF_INIT;
struct object_id unused_oid;
int unused_flags;
int symref_count;
- assert(failure_errno);
-
if (!oid)
oid = &unused_oid;
if (!flags)
@@ -1700,10 +1699,8 @@ const char *refs_resolve_ref_unsafe(struct ref_store *refs,
if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) {
if (!(resolve_flags & RESOLVE_REF_ALLOW_BAD_NAME) ||
- !refname_is_safe(refname)) {
- *failure_errno = EINVAL;
+ !refname_is_safe(refname))
return NULL;
- }
/*
* dwim_ref() uses REF_ISBROKEN to distinguish between
@@ -1718,9 +1715,10 @@ const char *refs_resolve_ref_unsafe(struct ref_store *refs,
for (symref_count = 0; symref_count < SYMREF_MAXDEPTH; symref_count++) {
unsigned int read_flags = 0;
+ int failure_errno;
if (refs_read_raw_ref(refs, refname, oid, &sb_refname,
- &read_flags, failure_errno)) {
+ &read_flags, &failure_errno)) {
*flags |= read_flags;
/* In reading mode, refs must eventually resolve */
@@ -1732,9 +1730,9 @@ const char *refs_resolve_ref_unsafe(struct ref_store *refs,
* may show errors besides ENOENT if there are
* similarly-named refs.
*/
- if (*failure_errno != ENOENT &&
- *failure_errno != EISDIR &&
- *failure_errno != ENOTDIR)
+ if (failure_errno != ENOENT &&
+ failure_errno != EISDIR &&
+ failure_errno != ENOTDIR)
return NULL;
oidclr(oid);
@@ -1760,16 +1758,13 @@ const char *refs_resolve_ref_unsafe(struct ref_store *refs,
}
if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) {
if (!(resolve_flags & RESOLVE_REF_ALLOW_BAD_NAME) ||
- !refname_is_safe(refname)) {
- *failure_errno = EINVAL;
+ !refname_is_safe(refname))
return NULL;
- }
*flags |= REF_ISBROKEN | REF_BAD_NAME;
}
}
- *failure_errno = ELOOP;
return NULL;
}
@@ -1784,10 +1779,8 @@ int refs_init_db(struct strbuf *err)
const char *resolve_ref_unsafe(const char *refname, int resolve_flags,
struct object_id *oid, int *flags)
{
- int ignore_errno;
-
return refs_resolve_ref_unsafe(get_main_ref_store(the_repository), refname,
- resolve_flags, oid, flags, &ignore_errno);
+ resolve_flags, oid, flags);
}
int resolve_gitlink_ref(const char *submodule, const char *refname,
@@ -1795,15 +1788,14 @@ int resolve_gitlink_ref(const char *submodule, const char *refname,
{
struct ref_store *refs;
int flags;
- int ignore_errno;
refs = get_submodule_ref_store(submodule);
if (!refs)
return -1;
- if (!refs_resolve_ref_unsafe(refs, refname, 0, oid, &flags,
- &ignore_errno) || is_null_oid(oid))
+ if (!refs_resolve_ref_unsafe(refs, refname, 0, oid, &flags) ||
+ is_null_oid(oid))
return -1;
return 0;
}
@@ -2082,6 +2074,9 @@ static int run_transaction_hook(struct ref_transaction *transaction,
const char *hook;
int ret = 0, i;
+ if (transaction->flags & REF_TRANSACTION_SKIP_HOOK)
+ return 0;
+
hook = find_hook("reference-transaction");
if (!hook)
return ret;
@@ -2429,6 +2424,22 @@ int initial_ref_transaction_commit(struct ref_transaction *transaction,
return refs->be->initial_transaction_commit(refs, transaction, err);
}
+void ref_transaction_for_each_queued_update(struct ref_transaction *transaction,
+ ref_transaction_for_each_queued_update_fn cb,
+ void *cb_data)
+{
+ int i;
+
+ for (i = 0; i < transaction->nr; i++) {
+ struct ref_update *update = transaction->updates[i];
+
+ cb(update->refname,
+ (update->flags & REF_HAVE_OLD) ? &update->old_oid : NULL,
+ (update->flags & REF_HAVE_NEW) ? &update->new_oid : NULL,
+ cb_data);
+ }
+}
+
int refs_delete_refs(struct ref_store *refs, const char *logmsg,
struct string_list *refnames, unsigned int flags)
{
diff --git a/refs.h b/refs.h
index 8f91a7f..23479c7 100644
--- a/refs.h
+++ b/refs.h
@@ -58,11 +58,6 @@ struct worktree;
* resolved. The function returns NULL for such ref names.
* Caps and underscores refers to the special refs, such as HEAD,
* FETCH_HEAD and friends, that all live outside of the refs/ directory.
- *
- * Callers should not inspect "errno" on failure, but rather pass in a
- * "failure_errno" parameter, on failure the "errno" will indicate the
- * type of failure encountered, but not necessarily one that came from
- * a syscall. We might have faked it up.
*/
#define RESOLVE_REF_READING 0x01
#define RESOLVE_REF_NO_RECURSE 0x02
@@ -72,7 +67,7 @@ const char *refs_resolve_ref_unsafe(struct ref_store *refs,
const char *refname,
int resolve_flags,
struct object_id *oid,
- int *flags, int *failure_errno);
+ int *flags);
const char *resolve_ref_unsafe(const char *refname, int resolve_flags,
struct object_id *oid, int *flags);
@@ -87,6 +82,9 @@ int read_ref_full(const char *refname, int resolve_flags,
struct object_id *oid, int *flags);
int read_ref(const char *refname, struct object_id *oid);
+int refs_read_symbolic_ref(struct ref_store *ref_store, const char *refname,
+ struct strbuf *referent);
+
/*
* Return 0 if a reference named refname could be created without
* conflicting with the name of an existing reference. Otherwise,
@@ -231,7 +229,7 @@ char *repo_default_branch_name(struct repository *r, int quiet);
* struct strbuf err = STRBUF_INIT;
* int ret = 0;
*
- * transaction = ref_store_transaction_begin(refs, &err);
+ * transaction = ref_store_transaction_begin(refs, 0, &err);
* if (!transaction ||
* ref_transaction_update(...) ||
* ref_transaction_create(...) ||
@@ -569,10 +567,16 @@ enum action_on_err {
};
/*
+ * Skip executing the reference-transaction hook.
+ */
+#define REF_TRANSACTION_SKIP_HOOK (1 << 0)
+
+/*
* Begin a reference transaction. The reference transaction must
* be freed by calling ref_transaction_free().
*/
struct ref_transaction *ref_store_transaction_begin(struct ref_store *refs,
+ unsigned int flags,
struct strbuf *err);
struct ref_transaction *ref_transaction_begin(struct strbuf *err);
@@ -776,6 +780,20 @@ int initial_ref_transaction_commit(struct ref_transaction *transaction,
struct strbuf *err);
/*
+ * Execute the given callback function for each of the reference updates which
+ * have been queued in the given transaction. `old_oid` and `new_oid` may be
+ * `NULL` pointers depending on whether the update has these object IDs set or
+ * not.
+ */
+typedef void ref_transaction_for_each_queued_update_fn(const char *refname,
+ const struct object_id *old_oid,
+ const struct object_id *new_oid,
+ void *cb_data);
+void ref_transaction_for_each_queued_update(struct ref_transaction *transaction,
+ ref_transaction_for_each_queued_update_fn cb,
+ void *cb_data);
+
+/*
* Free `*transaction` and all associated data.
*/
void ref_transaction_free(struct ref_transaction *transaction);
diff --git a/refs/debug.c b/refs/debug.c
index 2b0771c..eed8bc9 100644
--- a/refs/debug.c
+++ b/refs/debug.c
@@ -220,8 +220,9 @@ static int debug_ref_iterator_abort(struct ref_iterator *ref_iterator)
}
static struct ref_iterator_vtable debug_ref_iterator_vtable = {
- debug_ref_iterator_advance, debug_ref_iterator_peel,
- debug_ref_iterator_abort
+ .advance = debug_ref_iterator_advance,
+ .peel = debug_ref_iterator_peel,
+ .abort = debug_ref_iterator_abort,
};
static struct ref_iterator *
@@ -261,6 +262,24 @@ static int debug_read_raw_ref(struct ref_store *ref_store, const char *refname,
return res;
}
+static int debug_read_symbolic_ref(struct ref_store *ref_store, const char *refname,
+ struct strbuf *referent)
+{
+ struct debug_ref_store *drefs = (struct debug_ref_store *)ref_store;
+ struct ref_store *refs = drefs->refs;
+ int res;
+
+ res = refs->be->read_symbolic_ref(refs, refname, referent);
+ if (!res)
+ trace_printf_key(&trace_refs, "read_symbolic_ref: %s: (%s)\n",
+ refname, referent->buf);
+ else
+ trace_printf_key(&trace_refs,
+ "read_symbolic_ref: %s: %d\n", refname, res);
+ return res;
+
+}
+
static struct ref_iterator *
debug_reflog_iterator_begin(struct ref_store *ref_store)
{
@@ -418,29 +437,37 @@ static int debug_reflog_expire(struct ref_store *ref_store, const char *refname,
}
struct ref_storage_be refs_be_debug = {
- NULL,
- "debug",
- NULL,
- debug_init_db,
- debug_transaction_prepare,
- debug_transaction_finish,
- debug_transaction_abort,
- debug_initial_transaction_commit,
-
- debug_pack_refs,
- debug_create_symref,
- debug_delete_refs,
- debug_rename_ref,
- debug_copy_ref,
-
- debug_ref_iterator_begin,
- debug_read_raw_ref,
-
- debug_reflog_iterator_begin,
- debug_for_each_reflog_ent,
- debug_for_each_reflog_ent_reverse,
- debug_reflog_exists,
- debug_create_reflog,
- debug_delete_reflog,
- debug_reflog_expire,
+ .next = NULL,
+ .name = "debug",
+ .init = NULL,
+ .init_db = debug_init_db,
+
+ /*
+ * None of these should be NULL. If the "files" backend (in
+ * "struct ref_storage_be refs_be_files" in files-backend.c)
+ * has a function we should also have a wrapper for it here.
+ * Test the output with "GIT_TRACE_REFS=1".
+ */
+ .transaction_prepare = debug_transaction_prepare,
+ .transaction_finish = debug_transaction_finish,
+ .transaction_abort = debug_transaction_abort,
+ .initial_transaction_commit = debug_initial_transaction_commit,
+
+ .pack_refs = debug_pack_refs,
+ .create_symref = debug_create_symref,
+ .delete_refs = debug_delete_refs,
+ .rename_ref = debug_rename_ref,
+ .copy_ref = debug_copy_ref,
+
+ .iterator_begin = debug_ref_iterator_begin,
+ .read_raw_ref = debug_read_raw_ref,
+ .read_symbolic_ref = debug_read_symbolic_ref,
+
+ .reflog_iterator_begin = debug_reflog_iterator_begin,
+ .for_each_reflog_ent = debug_for_each_reflog_ent,
+ .for_each_reflog_ent_reverse = debug_for_each_reflog_ent_reverse,
+ .reflog_exists = debug_reflog_exists,
+ .create_reflog = debug_create_reflog,
+ .delete_reflog = debug_delete_reflog,
+ .reflog_expire = debug_reflog_expire,
};
diff --git a/refs/files-backend.c b/refs/files-backend.c
index 43a3b88..95acab7 100644
--- a/refs/files-backend.c
+++ b/refs/files-backend.c
@@ -277,11 +277,10 @@ static void loose_fill_ref_dir(struct ref_store *ref_store,
create_dir_entry(dir->cache, refname.buf,
refname.len));
} else {
- int ignore_errno;
if (!refs_resolve_ref_unsafe(&refs->base,
refname.buf,
RESOLVE_REF_READING,
- &oid, &flag, &ignore_errno)) {
+ &oid, &flag)) {
oidclr(&oid);
flag |= REF_ISBROKEN;
} else if (is_null_oid(&oid)) {
@@ -339,9 +338,9 @@ static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs)
return refs->loose;
}
-static int files_read_raw_ref(struct ref_store *ref_store, const char *refname,
- struct object_id *oid, struct strbuf *referent,
- unsigned int *type, int *failure_errno)
+static int read_ref_internal(struct ref_store *ref_store, const char *refname,
+ struct object_id *oid, struct strbuf *referent,
+ unsigned int *type, int *failure_errno, int skip_packed_refs)
{
struct files_ref_store *refs =
files_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
@@ -382,7 +381,7 @@ stat_ref:
if (lstat(path, &st) < 0) {
int ignore_errno;
myerr = errno;
- if (myerr != ENOENT)
+ if (myerr != ENOENT || skip_packed_refs)
goto out;
if (refs_read_raw_ref(refs->packed_ref_store, refname, oid,
referent, type, &ignore_errno)) {
@@ -426,7 +425,8 @@ stat_ref:
* ref is supposed to be, there could still be a
* packed ref:
*/
- if (refs_read_raw_ref(refs->packed_ref_store, refname, oid,
+ if (skip_packed_refs ||
+ refs_read_raw_ref(refs->packed_ref_store, refname, oid,
referent, type, &ignore_errno)) {
myerr = EISDIR;
goto out;
@@ -471,6 +471,27 @@ out:
return ret;
}
+static int files_read_raw_ref(struct ref_store *ref_store, const char *refname,
+ struct object_id *oid, struct strbuf *referent,
+ unsigned int *type, int *failure_errno)
+{
+ return read_ref_internal(ref_store, refname, oid, referent, type, failure_errno, 0);
+}
+
+static int files_read_symbolic_ref(struct ref_store *ref_store, const char *refname,
+ struct strbuf *referent)
+{
+ struct object_id oid;
+ int failure_errno, ret;
+ unsigned int type;
+
+ ret = read_ref_internal(ref_store, refname, &oid, referent, &type, &failure_errno, 1);
+ if (ret)
+ return ret;
+
+ return !(type & REF_ISSYMREF);
+}
+
int parse_loose_ref_contents(const char *buf, struct object_id *oid,
struct strbuf *referent, unsigned int *type,
int *failure_errno)
@@ -801,9 +822,9 @@ static int files_ref_iterator_abort(struct ref_iterator *ref_iterator)
}
static struct ref_iterator_vtable files_ref_iterator_vtable = {
- files_ref_iterator_advance,
- files_ref_iterator_peel,
- files_ref_iterator_abort
+ .advance = files_ref_iterator_advance,
+ .peel = files_ref_iterator_peel,
+ .abort = files_ref_iterator_abort,
};
static struct ref_iterator *files_ref_iterator_begin(
@@ -1006,7 +1027,6 @@ static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs,
{
struct strbuf ref_file = STRBUF_INIT;
struct ref_lock *lock;
- int ignore_errno;
files_assert_main_repository(refs, "lock_ref_oid_basic");
assert(err);
@@ -1034,7 +1054,7 @@ static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs,
}
if (!refs_resolve_ref_unsafe(&refs->base, lock->ref_name, 0,
- &lock->old_oid, NULL, &ignore_errno))
+ &lock->old_oid, NULL))
oidclr(&lock->old_oid);
goto out;
@@ -1116,7 +1136,8 @@ static void prune_ref(struct files_ref_store *refs, struct ref_to_prune *r)
if (check_refname_format(r->name, 0))
return;
- transaction = ref_store_transaction_begin(&refs->base, &err);
+ transaction = ref_store_transaction_begin(&refs->base,
+ REF_TRANSACTION_SKIP_HOOK, &err);
if (!transaction)
goto cleanup;
ref_transaction_add_update(
@@ -1187,7 +1208,8 @@ static int files_pack_refs(struct ref_store *ref_store, unsigned int flags)
struct strbuf err = STRBUF_INIT;
struct ref_transaction *transaction;
- transaction = ref_store_transaction_begin(refs->packed_ref_store, &err);
+ transaction = ref_store_transaction_begin(refs->packed_ref_store,
+ REF_TRANSACTION_SKIP_HOOK, &err);
if (!transaction)
return -1;
@@ -1244,6 +1266,7 @@ static int files_delete_refs(struct ref_store *ref_store, const char *msg,
{
struct files_ref_store *refs =
files_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
+ struct ref_transaction *transaction = NULL;
struct strbuf err = STRBUF_INIT;
int i, result = 0;
@@ -1253,10 +1276,15 @@ static int files_delete_refs(struct ref_store *ref_store, const char *msg,
if (packed_refs_lock(refs->packed_ref_store, 0, &err))
goto error;
- if (refs_delete_refs(refs->packed_ref_store, msg, refnames, flags)) {
- packed_refs_unlock(refs->packed_ref_store);
+ transaction = ref_store_transaction_begin(refs->packed_ref_store,
+ REF_TRANSACTION_SKIP_HOOK, &err);
+ if (!transaction)
+ goto error;
+
+ result = packed_refs_delete_refs(refs->packed_ref_store,
+ transaction, msg, refnames, flags);
+ if (result)
goto error;
- }
packed_refs_unlock(refs->packed_ref_store);
@@ -1267,6 +1295,7 @@ static int files_delete_refs(struct ref_store *ref_store, const char *msg,
result |= error(_("could not remove reference %s"), refname);
}
+ ref_transaction_free(transaction);
strbuf_release(&err);
return result;
@@ -1283,6 +1312,7 @@ error:
else
error(_("could not delete references: %s"), err.buf);
+ ref_transaction_free(transaction);
strbuf_release(&err);
return -1;
}
@@ -1399,7 +1429,6 @@ static int files_copy_or_rename_ref(struct ref_store *ref_store,
struct strbuf tmp_renamed_log = STRBUF_INIT;
int log, ret;
struct strbuf err = STRBUF_INIT;
- int ignore_errno;
files_reflog_path(refs, &sb_oldref, oldrefname);
files_reflog_path(refs, &sb_newref, newrefname);
@@ -1413,7 +1442,7 @@ static int files_copy_or_rename_ref(struct ref_store *ref_store,
if (!refs_resolve_ref_unsafe(&refs->base, oldrefname,
RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
- &orig_oid, &flag, &ignore_errno)) {
+ &orig_oid, &flag)) {
ret = error("refname %s not found", oldrefname);
goto out;
}
@@ -1459,7 +1488,7 @@ static int files_copy_or_rename_ref(struct ref_store *ref_store,
*/
if (!copy && refs_resolve_ref_unsafe(&refs->base, newrefname,
RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
- NULL, NULL, &ignore_errno) &&
+ NULL, NULL) &&
refs_delete_ref(&refs->base, NULL, newrefname,
NULL, REF_NO_DEREF)) {
if (errno == EISDIR) {
@@ -1780,6 +1809,7 @@ static int write_ref_to_lockfile(struct ref_lock *lock,
fd = get_lock_file_fd(&lock->lk);
if (write_in_full(fd, oid_to_hex(oid), the_hash_algo->hexsz) < 0 ||
write_in_full(fd, &term, 1) < 0 ||
+ fsync_component(FSYNC_COMPONENT_REFERENCE, get_lock_file_fd(&lock->lk)) < 0 ||
close_ref_gently(lock) < 0) {
strbuf_addf(err,
"couldn't write '%s'", get_lock_file_path(&lock->lk));
@@ -1828,12 +1858,10 @@ static int commit_ref_update(struct files_ref_store *refs,
*/
int head_flag;
const char *head_ref;
- int ignore_errno;
head_ref = refs_resolve_ref_unsafe(&refs->base, "HEAD",
RESOLVE_REF_READING,
- NULL, &head_flag,
- &ignore_errno);
+ NULL, &head_flag);
if (head_ref && (head_flag & REF_ISSYMREF) &&
!strcmp(head_ref, lock->ref_name)) {
struct strbuf log_err = STRBUF_INIT;
@@ -1877,12 +1905,10 @@ static void update_symref_reflog(struct files_ref_store *refs,
{
struct strbuf err = STRBUF_INIT;
struct object_id new_oid;
- int ignore_errno;
if (logmsg &&
refs_resolve_ref_unsafe(&refs->base, target,
- RESOLVE_REF_READING, &new_oid, NULL,
- &ignore_errno) &&
+ RESOLVE_REF_READING, &new_oid, NULL) &&
files_log_ref_write(refs, refname, &lock->old_oid,
&new_oid, logmsg, 0, &err)) {
error("%s", err.buf);
@@ -2156,7 +2182,6 @@ static int files_reflog_iterator_advance(struct ref_iterator *ref_iterator)
(struct files_reflog_iterator *)ref_iterator;
struct dir_iterator *diter = iter->dir_iterator;
int ok;
- int ignore_errno;
while ((ok = dir_iterator_advance(diter)) == ITER_OK) {
int flags;
@@ -2170,8 +2195,7 @@ static int files_reflog_iterator_advance(struct ref_iterator *ref_iterator)
if (!refs_resolve_ref_unsafe(iter->ref_store,
diter->relative_path, 0,
- &iter->oid, &flags,
- &ignore_errno)) {
+ &iter->oid, &flags)) {
error("bad ref for %s", diter->path.buf);
continue;
}
@@ -2208,9 +2232,9 @@ static int files_reflog_iterator_abort(struct ref_iterator *ref_iterator)
}
static struct ref_iterator_vtable files_reflog_iterator_vtable = {
- files_reflog_iterator_advance,
- files_reflog_iterator_peel,
- files_reflog_iterator_abort
+ .advance = files_reflog_iterator_advance,
+ .peel = files_reflog_iterator_peel,
+ .abort = files_reflog_iterator_abort,
};
static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store,
@@ -2515,11 +2539,9 @@ static int lock_ref_for_update(struct files_ref_store *refs,
* the transaction, so we have to read it here
* to record and possibly check old_oid:
*/
- int ignore_errno;
if (!refs_resolve_ref_unsafe(&refs->base,
referent.buf, 0,
- &lock->old_oid, NULL,
- &ignore_errno)) {
+ &lock->old_oid, NULL)) {
if (update->flags & REF_HAVE_OLD) {
strbuf_addf(err, "cannot lock ref '%s': "
"error reading reference",
@@ -2762,7 +2784,8 @@ static int files_transaction_prepare(struct ref_store *ref_store,
*/
if (!packed_transaction) {
packed_transaction = ref_store_transaction_begin(
- refs->packed_ref_store, err);
+ refs->packed_ref_store,
+ REF_TRANSACTION_SKIP_HOOK, err);
if (!packed_transaction) {
ret = TRANSACTION_GENERIC_ERROR;
goto cleanup;
@@ -3033,7 +3056,8 @@ static int files_initial_transaction_commit(struct ref_store *ref_store,
&affected_refnames))
BUG("initial ref transaction called with existing refs");
- packed_transaction = ref_store_transaction_begin(refs->packed_ref_store, err);
+ packed_transaction = ref_store_transaction_begin(refs->packed_ref_store,
+ REF_TRANSACTION_SKIP_HOOK, err);
if (!packed_transaction) {
ret = TRANSACTION_GENERIC_ERROR;
goto cleanup;
@@ -3208,14 +3232,12 @@ static int files_reflog_expire(struct ref_store *ref_store,
if ((expire_flags & EXPIRE_REFLOGS_UPDATE_REF) &&
!is_null_oid(&cb.last_kept_oid)) {
- int ignore_errno;
int type;
const char *ref;
ref = refs_resolve_ref_unsafe(&refs->base, refname,
RESOLVE_REF_NO_RECURSE,
- NULL, &type,
- &ignore_errno);
+ NULL, &type);
update = !!(ref && !(type & REF_ISSYMREF));
}
@@ -3270,29 +3292,30 @@ static int files_init_db(struct ref_store *ref_store, struct strbuf *err)
}
struct ref_storage_be refs_be_files = {
- NULL,
- "files",
- files_ref_store_create,
- files_init_db,
- files_transaction_prepare,
- files_transaction_finish,
- files_transaction_abort,
- files_initial_transaction_commit,
-
- files_pack_refs,
- files_create_symref,
- files_delete_refs,
- files_rename_ref,
- files_copy_ref,
-
- files_ref_iterator_begin,
- files_read_raw_ref,
-
- files_reflog_iterator_begin,
- files_for_each_reflog_ent,
- files_for_each_reflog_ent_reverse,
- files_reflog_exists,
- files_create_reflog,
- files_delete_reflog,
- files_reflog_expire
+ .next = NULL,
+ .name = "files",
+ .init = files_ref_store_create,
+ .init_db = files_init_db,
+ .transaction_prepare = files_transaction_prepare,
+ .transaction_finish = files_transaction_finish,
+ .transaction_abort = files_transaction_abort,
+ .initial_transaction_commit = files_initial_transaction_commit,
+
+ .pack_refs = files_pack_refs,
+ .create_symref = files_create_symref,
+ .delete_refs = files_delete_refs,
+ .rename_ref = files_rename_ref,
+ .copy_ref = files_copy_ref,
+
+ .iterator_begin = files_ref_iterator_begin,
+ .read_raw_ref = files_read_raw_ref,
+ .read_symbolic_ref = files_read_symbolic_ref,
+
+ .reflog_iterator_begin = files_reflog_iterator_begin,
+ .for_each_reflog_ent = files_for_each_reflog_ent,
+ .for_each_reflog_ent_reverse = files_for_each_reflog_ent_reverse,
+ .reflog_exists = files_reflog_exists,
+ .create_reflog = files_create_reflog,
+ .delete_reflog = files_delete_reflog,
+ .reflog_expire = files_reflog_expire
};
diff --git a/refs/iterator.c b/refs/iterator.c
index a89d132..b2e56ba 100644
--- a/refs/iterator.c
+++ b/refs/iterator.c
@@ -64,9 +64,9 @@ static int empty_ref_iterator_abort(struct ref_iterator *ref_iterator)
}
static struct ref_iterator_vtable empty_ref_iterator_vtable = {
- empty_ref_iterator_advance,
- empty_ref_iterator_peel,
- empty_ref_iterator_abort
+ .advance = empty_ref_iterator_advance,
+ .peel = empty_ref_iterator_peel,
+ .abort = empty_ref_iterator_abort,
};
struct ref_iterator *empty_ref_iterator_begin(void)
@@ -201,9 +201,9 @@ static int merge_ref_iterator_abort(struct ref_iterator *ref_iterator)
}
static struct ref_iterator_vtable merge_ref_iterator_vtable = {
- merge_ref_iterator_advance,
- merge_ref_iterator_peel,
- merge_ref_iterator_abort
+ .advance = merge_ref_iterator_advance,
+ .peel = merge_ref_iterator_peel,
+ .abort = merge_ref_iterator_abort,
};
struct ref_iterator *merge_ref_iterator_begin(
@@ -378,9 +378,9 @@ static int prefix_ref_iterator_abort(struct ref_iterator *ref_iterator)
}
static struct ref_iterator_vtable prefix_ref_iterator_vtable = {
- prefix_ref_iterator_advance,
- prefix_ref_iterator_peel,
- prefix_ref_iterator_abort
+ .advance = prefix_ref_iterator_advance,
+ .peel = prefix_ref_iterator_peel,
+ .abort = prefix_ref_iterator_abort,
};
struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
diff --git a/refs/packed-backend.c b/refs/packed-backend.c
index d91a201..66c4574 100644
--- a/refs/packed-backend.c
+++ b/refs/packed-backend.c
@@ -911,9 +911,9 @@ static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
}
static struct ref_iterator_vtable packed_ref_iterator_vtable = {
- packed_ref_iterator_advance,
- packed_ref_iterator_peel,
- packed_ref_iterator_abort
+ .advance = packed_ref_iterator_advance,
+ .peel = packed_ref_iterator_peel,
+ .abort = packed_ref_iterator_abort
};
static struct ref_iterator *packed_ref_iterator_begin(
@@ -1262,7 +1262,8 @@ static int write_with_updates(struct packed_ref_store *refs,
goto error;
}
- if (close_tempfile_gently(refs->tempfile)) {
+ if (fsync_component(FSYNC_COMPONENT_REFERENCE, get_tempfile_fd(refs->tempfile)) ||
+ close_tempfile_gently(refs->tempfile)) {
strbuf_addf(err, "error closing file %s: %s",
get_tempfile_path(refs->tempfile),
strerror(errno));
@@ -1521,15 +1522,10 @@ static int packed_initial_transaction_commit(struct ref_store *ref_store,
static int packed_delete_refs(struct ref_store *ref_store, const char *msg,
struct string_list *refnames, unsigned int flags)
{
- struct packed_ref_store *refs =
- packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
struct strbuf err = STRBUF_INIT;
struct ref_transaction *transaction;
- struct string_list_item *item;
int ret;
- (void)refs; /* We need the check above, but don't use the variable */
-
if (!refnames->nr)
return 0;
@@ -1539,10 +1535,30 @@ static int packed_delete_refs(struct ref_store *ref_store, const char *msg,
* updates into a single transaction.
*/
- transaction = ref_store_transaction_begin(ref_store, &err);
+ transaction = ref_store_transaction_begin(ref_store, 0, &err);
if (!transaction)
return -1;
+ ret = packed_refs_delete_refs(ref_store, transaction,
+ msg, refnames, flags);
+
+ ref_transaction_free(transaction);
+ return ret;
+}
+
+int packed_refs_delete_refs(struct ref_store *ref_store,
+ struct ref_transaction *transaction,
+ const char *msg,
+ struct string_list *refnames,
+ unsigned int flags)
+{
+ struct strbuf err = STRBUF_INIT;
+ struct string_list_item *item;
+ int ret;
+
+ /* Assert that the ref store refers to a packed backend. */
+ packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
+
for_each_string_list_item(item, refnames) {
if (ref_transaction_delete(transaction, item->string, NULL,
flags, msg, &err)) {
@@ -1562,7 +1578,6 @@ static int packed_delete_refs(struct ref_store *ref_store, const char *msg,
error(_("could not delete references: %s"), err.buf);
}
- ref_transaction_free(transaction);
strbuf_release(&err);
return ret;
}
@@ -1577,105 +1592,36 @@ static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)
return 0;
}
-static int packed_create_symref(struct ref_store *ref_store,
- const char *refname, const char *target,
- const char *logmsg)
-{
- BUG("packed reference store does not support symrefs");
-}
-
-static int packed_rename_ref(struct ref_store *ref_store,
- const char *oldrefname, const char *newrefname,
- const char *logmsg)
-{
- BUG("packed reference store does not support renaming references");
-}
-
-static int packed_copy_ref(struct ref_store *ref_store,
- const char *oldrefname, const char *newrefname,
- const char *logmsg)
-{
- BUG("packed reference store does not support copying references");
-}
-
static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)
{
return empty_ref_iterator_begin();
}
-static int packed_for_each_reflog_ent(struct ref_store *ref_store,
- const char *refname,
- each_reflog_ent_fn fn, void *cb_data)
-{
- BUG("packed reference store does not support reflogs");
- return 0;
-}
-
-static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store,
- const char *refname,
- each_reflog_ent_fn fn,
- void *cb_data)
-{
- BUG("packed reference store does not support reflogs");
- return 0;
-}
-
-static int packed_reflog_exists(struct ref_store *ref_store,
- const char *refname)
-{
- BUG("packed reference store does not support reflogs");
- return 0;
-}
-
-static int packed_create_reflog(struct ref_store *ref_store,
- const char *refname, struct strbuf *err)
-{
- BUG("packed reference store does not support reflogs");
-}
-
-static int packed_delete_reflog(struct ref_store *ref_store,
- const char *refname)
-{
- BUG("packed reference store does not support reflogs");
- return 0;
-}
-
-static int packed_reflog_expire(struct ref_store *ref_store,
- const char *refname,
- unsigned int flags,
- reflog_expiry_prepare_fn prepare_fn,
- reflog_expiry_should_prune_fn should_prune_fn,
- reflog_expiry_cleanup_fn cleanup_fn,
- void *policy_cb_data)
-{
- BUG("packed reference store does not support reflogs");
- return 0;
-}
-
struct ref_storage_be refs_be_packed = {
- NULL,
- "packed",
- packed_ref_store_create,
- packed_init_db,
- packed_transaction_prepare,
- packed_transaction_finish,
- packed_transaction_abort,
- packed_initial_transaction_commit,
-
- packed_pack_refs,
- packed_create_symref,
- packed_delete_refs,
- packed_rename_ref,
- packed_copy_ref,
-
- packed_ref_iterator_begin,
- packed_read_raw_ref,
-
- packed_reflog_iterator_begin,
- packed_for_each_reflog_ent,
- packed_for_each_reflog_ent_reverse,
- packed_reflog_exists,
- packed_create_reflog,
- packed_delete_reflog,
- packed_reflog_expire
+ .next = NULL,
+ .name = "packed",
+ .init = packed_ref_store_create,
+ .init_db = packed_init_db,
+ .transaction_prepare = packed_transaction_prepare,
+ .transaction_finish = packed_transaction_finish,
+ .transaction_abort = packed_transaction_abort,
+ .initial_transaction_commit = packed_initial_transaction_commit,
+
+ .pack_refs = packed_pack_refs,
+ .create_symref = NULL,
+ .delete_refs = packed_delete_refs,
+ .rename_ref = NULL,
+ .copy_ref = NULL,
+
+ .iterator_begin = packed_ref_iterator_begin,
+ .read_raw_ref = packed_read_raw_ref,
+ .read_symbolic_ref = NULL,
+
+ .reflog_iterator_begin = packed_reflog_iterator_begin,
+ .for_each_reflog_ent = NULL,
+ .for_each_reflog_ent_reverse = NULL,
+ .reflog_exists = NULL,
+ .create_reflog = NULL,
+ .delete_reflog = NULL,
+ .reflog_expire = NULL,
};
diff --git a/refs/packed-backend.h b/refs/packed-backend.h
index 9dd8a34..52e0490 100644
--- a/refs/packed-backend.h
+++ b/refs/packed-backend.h
@@ -3,6 +3,7 @@
struct repository;
struct ref_transaction;
+struct string_list;
/*
* Support for storing references in a `packed-refs` file.
@@ -27,6 +28,12 @@ int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
void packed_refs_unlock(struct ref_store *ref_store);
int packed_refs_is_locked(struct ref_store *ref_store);
+int packed_refs_delete_refs(struct ref_store *ref_store,
+ struct ref_transaction *transaction,
+ const char *msg,
+ struct string_list *refnames,
+ unsigned int flags);
+
/*
* Return true if `transaction` really needs to be carried out against
* the specified packed_ref_store, or false if it can be skipped
diff --git a/refs/ref-cache.c b/refs/ref-cache.c
index be4aa5e..3080ef9 100644
--- a/refs/ref-cache.c
+++ b/refs/ref-cache.c
@@ -456,9 +456,9 @@ static int cache_ref_iterator_abort(struct ref_iterator *ref_iterator)
}
static struct ref_iterator_vtable cache_ref_iterator_vtable = {
- cache_ref_iterator_advance,
- cache_ref_iterator_peel,
- cache_ref_iterator_abort
+ .advance = cache_ref_iterator_advance,
+ .peel = cache_ref_iterator_peel,
+ .abort = cache_ref_iterator_abort
};
struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache,
diff --git a/refs/refs-internal.h b/refs/refs-internal.h
index 7ff6fba..001ef15 100644
--- a/refs/refs-internal.h
+++ b/refs/refs-internal.h
@@ -213,6 +213,7 @@ struct ref_transaction {
size_t nr;
enum ref_transaction_state state;
void *backend_data;
+ unsigned int flags;
};
/*
@@ -648,6 +649,21 @@ typedef int read_raw_ref_fn(struct ref_store *ref_store, const char *refname,
struct object_id *oid, struct strbuf *referent,
unsigned int *type, int *failure_errno);
+/*
+ * Read a symbolic reference from the specified reference store. This function
+ * is optional: if not implemented by a backend, then `read_raw_ref_fn` is used
+ * to read the symbolcic reference instead. It is intended to be implemented
+ * only in case the backend can optimize the reading of symbolic references.
+ *
+ * Return 0 on success, or -1 on failure. `referent` will be set to the target
+ * of the symbolic reference on success. This function explicitly does not
+ * distinguish between error cases and the reference not being a symbolic
+ * reference to allow backends to optimize this operation in case symbolic and
+ * non-symbolic references are treated differently.
+ */
+typedef int read_symbolic_ref_fn(struct ref_store *ref_store, const char *refname,
+ struct strbuf *referent);
+
struct ref_storage_be {
struct ref_storage_be *next;
const char *name;
@@ -667,6 +683,7 @@ struct ref_storage_be {
ref_iterator_begin_fn *iterator_begin;
read_raw_ref_fn *read_raw_ref;
+ read_symbolic_ref_fn *read_symbolic_ref;
reflog_iterator_begin_fn *reflog_iterator_begin;
for_each_reflog_ent_fn *for_each_reflog_ent;
diff --git a/refspec.c b/refspec.c
index e3d852c..63e3112 100644
--- a/refspec.c
+++ b/refspec.c
@@ -4,13 +4,13 @@
#include "refspec.h"
static struct refspec_item s_tag_refspec = {
- 0,
- 1,
- 0,
- 0,
- 0,
- "refs/tags/*",
- "refs/tags/*"
+ .force = 0,
+ .pattern = 1,
+ .matching = 0,
+ .exact_sha1 = 0,
+ .negative = 0,
+ .src = "refs/tags/*",
+ .dst = "refs/tags/*",
};
/* See TAG_REFSPEC for the string version */
diff --git a/reftable/block.c b/reftable/block.c
index 855e3f5..34d4d07 100644
--- a/reftable/block.c
+++ b/reftable/block.c
@@ -88,8 +88,9 @@ uint8_t block_writer_type(struct block_writer *bw)
return bw->buf[bw->header_off];
}
-/* adds the reftable_record to the block. Returns -1 if it does not fit, 0 on
- success */
+/* Adds the reftable_record to the block. Returns -1 if it does not fit, 0 on
+ success. Returns REFTABLE_API_ERROR if attempting to write a record with
+ empty key. */
int block_writer_add(struct block_writer *w, struct reftable_record *rec)
{
struct strbuf empty = STRBUF_INIT;
@@ -105,8 +106,14 @@ int block_writer_add(struct block_writer *w, struct reftable_record *rec)
int is_restart = 0;
struct strbuf key = STRBUF_INIT;
int n = 0;
+ int err = -1;
reftable_record_key(rec, &key);
+ if (!key.len) {
+ err = REFTABLE_API_ERROR;
+ goto done;
+ }
+
n = reftable_encode_key(&is_restart, out, last, key,
reftable_record_val_type(rec));
if (n < 0)
@@ -118,16 +125,11 @@ int block_writer_add(struct block_writer *w, struct reftable_record *rec)
goto done;
string_view_consume(&out, n);
- if (block_writer_register_restart(w, start.len - out.len, is_restart,
- &key) < 0)
- goto done;
-
- strbuf_release(&key);
- return 0;
-
+ err = block_writer_register_restart(w, start.len - out.len, is_restart,
+ &key);
done:
strbuf_release(&key);
- return -1;
+ return err;
}
int block_writer_finish(struct block_writer *w)
@@ -188,13 +190,16 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
uint32_t full_block_size = table_block_size;
uint8_t typ = block->data[header_off];
uint32_t sz = get_be24(block->data + header_off + 1);
-
+ int err = 0;
uint16_t restart_count = 0;
uint32_t restart_start = 0;
uint8_t *restart_bytes = NULL;
+ uint8_t *uncompressed = NULL;
- if (!reftable_is_block_type(typ))
- return REFTABLE_FORMAT_ERROR;
+ if (!reftable_is_block_type(typ)) {
+ err = REFTABLE_FORMAT_ERROR;
+ goto done;
+ }
if (typ == BLOCK_TYPE_LOG) {
int block_header_skip = 4 + header_off;
@@ -203,7 +208,7 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
uLongf src_len = block->len - block_header_skip;
/* Log blocks specify the *uncompressed* size in their header.
*/
- uint8_t *uncompressed = reftable_malloc(sz);
+ uncompressed = reftable_malloc(sz);
/* Copy over the block header verbatim. It's not compressed. */
memcpy(uncompressed, block->data, block_header_skip);
@@ -212,16 +217,19 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
if (Z_OK !=
uncompress2(uncompressed + block_header_skip, &dst_len,
block->data + block_header_skip, &src_len)) {
- reftable_free(uncompressed);
- return REFTABLE_ZLIB_ERROR;
+ err = REFTABLE_ZLIB_ERROR;
+ goto done;
}
- if (dst_len + block_header_skip != sz)
- return REFTABLE_FORMAT_ERROR;
+ if (dst_len + block_header_skip != sz) {
+ err = REFTABLE_FORMAT_ERROR;
+ goto done;
+ }
/* We're done with the input data. */
reftable_block_done(block);
block->data = uncompressed;
+ uncompressed = NULL;
block->len = sz;
block->source = malloc_block_source();
full_block_size = src_len + block_header_skip;
@@ -251,7 +259,9 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
br->restart_count = restart_count;
br->restart_bytes = restart_bytes;
- return 0;
+done:
+ reftable_free(uncompressed);
+ return err;
}
static uint32_t block_reader_restart_offset(struct block_reader *br, int i)
@@ -324,6 +334,9 @@ int block_iter_next(struct block_iter *it, struct reftable_record *rec)
if (n < 0)
return -1;
+ if (!key.len)
+ return REFTABLE_FORMAT_ERROR;
+
string_view_consume(&in, n);
n = reftable_record_decode(rec, key, extra, in, it->br->hash_size);
if (n < 0)
@@ -350,6 +363,8 @@ int block_reader_first_key(struct block_reader *br, struct strbuf *key)
int n = reftable_decode_key(key, &extra, empty, in);
if (n < 0)
return n;
+ if (!key->len)
+ return REFTABLE_FORMAT_ERROR;
return 0;
}
@@ -413,7 +428,7 @@ int block_reader_seek(struct block_reader *br, struct block_iter *it,
done:
strbuf_release(&key);
strbuf_release(&next.last_key);
- reftable_record_destroy(&rec);
+ reftable_record_release(&rec);
return err;
}
diff --git a/reftable/block_test.c b/reftable/block_test.c
index 4b3ea26..cb88af4 100644
--- a/reftable/block_test.c
+++ b/reftable/block_test.c
@@ -26,8 +26,9 @@ static void test_block_read_write(void)
struct block_writer bw = {
.last_key = STRBUF_INIT,
};
- struct reftable_ref_record ref = { NULL };
- struct reftable_record rec = { NULL };
+ struct reftable_record rec = {
+ .type = BLOCK_TYPE_REF,
+ };
int i = 0;
int n;
struct block_reader br = { 0 };
@@ -40,7 +41,11 @@ static void test_block_read_write(void)
block.source = malloc_block_source();
block_writer_init(&bw, BLOCK_TYPE_REF, block.data, block_size,
header_off, hash_size(GIT_SHA1_FORMAT_ID));
- reftable_record_from_ref(&rec, &ref);
+
+ rec.u.ref.refname = "";
+ rec.u.ref.value_type = REFTABLE_REF_DELETION;
+ n = block_writer_add(&bw, &rec);
+ EXPECT(n == REFTABLE_API_ERROR);
for (i = 0; i < N; i++) {
char name[100];
@@ -48,14 +53,14 @@ static void test_block_read_write(void)
snprintf(name, sizeof(name), "branch%02d", i);
memset(hash, i, sizeof(hash));
- ref.refname = name;
- ref.value_type = REFTABLE_REF_VAL1;
- ref.value.val1 = hash;
+ rec.u.ref.refname = name;
+ rec.u.ref.value_type = REFTABLE_REF_VAL1;
+ rec.u.ref.value.val1 = hash;
names[i] = xstrdup(name);
n = block_writer_add(&bw, &rec);
- ref.refname = NULL;
- ref.value_type = REFTABLE_REF_DELETION;
+ rec.u.ref.refname = NULL;
+ rec.u.ref.value_type = REFTABLE_REF_DELETION;
EXPECT(n == 0);
}
@@ -74,7 +79,7 @@ static void test_block_read_write(void)
if (r > 0) {
break;
}
- EXPECT_STREQ(names[j], ref.refname);
+ EXPECT_STREQ(names[j], rec.u.ref.refname);
j++;
}
@@ -92,7 +97,7 @@ static void test_block_read_write(void)
n = block_iter_next(&it, &rec);
EXPECT(n == 0);
- EXPECT_STREQ(names[i], ref.refname);
+ EXPECT_STREQ(names[i], rec.u.ref.refname);
want.len--;
n = block_reader_seek(&br, &it, &want);
@@ -100,7 +105,7 @@ static void test_block_read_write(void)
n = block_iter_next(&it, &rec);
EXPECT(n == 0);
- EXPECT_STREQ(names[10 * (i / 10)], ref.refname);
+ EXPECT_STREQ(names[10 * (i / 10)], rec.u.ref.refname);
block_iter_close(&it);
}
diff --git a/reftable/blocksource.c b/reftable/blocksource.c
index 0044eec..2605371 100644
--- a/reftable/blocksource.c
+++ b/reftable/blocksource.c
@@ -134,8 +134,10 @@ int reftable_block_source_from_file(struct reftable_block_source *bs,
}
err = fstat(fd, &st);
- if (err < 0)
- return -1;
+ if (err < 0) {
+ close(fd);
+ return REFTABLE_IO_ERROR;
+ }
p = reftable_calloc(sizeof(struct file_block_source));
p->size = st.st_size;
diff --git a/reftable/generic.c b/reftable/generic.c
index 7a8a738..57f8032 100644
--- a/reftable/generic.c
+++ b/reftable/generic.c
@@ -7,6 +7,7 @@ https://developers.google.com/open-source/licenses/bsd
*/
#include "basics.h"
+#include "constants.h"
#include "record.h"
#include "generic.h"
#include "reftable-iterator.h"
@@ -15,23 +16,21 @@ https://developers.google.com/open-source/licenses/bsd
int reftable_table_seek_ref(struct reftable_table *tab,
struct reftable_iterator *it, const char *name)
{
- struct reftable_ref_record ref = {
- .refname = (char *)name,
- };
- struct reftable_record rec = { NULL };
- reftable_record_from_ref(&rec, &ref);
+ struct reftable_record rec = { .type = BLOCK_TYPE_REF,
+ .u.ref = {
+ .refname = (char *)name,
+ } };
return tab->ops->seek_record(tab->table_arg, it, &rec);
}
int reftable_table_seek_log(struct reftable_table *tab,
struct reftable_iterator *it, const char *name)
{
- struct reftable_log_record log = {
- .refname = (char *)name,
- .update_index = ~((uint64_t)0),
- };
- struct reftable_record rec = { NULL };
- reftable_record_from_log(&rec, &log);
+ struct reftable_record rec = { .type = BLOCK_TYPE_LOG,
+ .u.log = {
+ .refname = (char *)name,
+ .update_index = ~((uint64_t)0),
+ } };
return tab->ops->seek_record(tab->table_arg, it, &rec);
}
@@ -129,17 +128,29 @@ void reftable_iterator_destroy(struct reftable_iterator *it)
int reftable_iterator_next_ref(struct reftable_iterator *it,
struct reftable_ref_record *ref)
{
- struct reftable_record rec = { NULL };
- reftable_record_from_ref(&rec, ref);
- return iterator_next(it, &rec);
+ struct reftable_record rec = {
+ .type = BLOCK_TYPE_REF,
+ .u = {
+ .ref = *ref
+ },
+ };
+ int err = iterator_next(it, &rec);
+ *ref = rec.u.ref;
+ return err;
}
int reftable_iterator_next_log(struct reftable_iterator *it,
struct reftable_log_record *log)
{
- struct reftable_record rec = { NULL };
- reftable_record_from_log(&rec, log);
- return iterator_next(it, &rec);
+ struct reftable_record rec = {
+ .type = BLOCK_TYPE_LOG,
+ .u = {
+ .log = *log,
+ },
+ };
+ int err = iterator_next(it, &rec);
+ *log = rec.u.log;
+ return err;
}
int iterator_next(struct reftable_iterator *it, struct reftable_record *rec)
diff --git a/reftable/iter.c b/reftable/iter.c
index 93d04f7..a8d174c 100644
--- a/reftable/iter.c
+++ b/reftable/iter.c
@@ -32,7 +32,7 @@ static int filtering_ref_iterator_next(void *iter_arg,
struct reftable_record *rec)
{
struct filtering_ref_iterator *fri = iter_arg;
- struct reftable_ref_record *ref = rec->data;
+ struct reftable_ref_record *ref = &rec->u.ref;
int err = 0;
while (1) {
err = reftable_iterator_next_ref(&fri->it, ref);
@@ -127,7 +127,7 @@ static int indexed_table_ref_iter_next_block(struct indexed_table_ref_iter *it)
static int indexed_table_ref_iter_next(void *p, struct reftable_record *rec)
{
struct indexed_table_ref_iter *it = p;
- struct reftable_ref_record *ref = rec->data;
+ struct reftable_ref_record *ref = &rec->u.ref;
while (1) {
int err = block_iter_next(&it->cur, rec);
diff --git a/reftable/merged.c b/reftable/merged.c
index e5b53da..2a6efa1 100644
--- a/reftable/merged.c
+++ b/reftable/merged.c
@@ -30,7 +30,7 @@ static int merged_iter_init(struct merged_iter *mi)
if (err > 0) {
reftable_iterator_destroy(&mi->stack[i]);
- reftable_record_destroy(&rec);
+ reftable_record_release(&rec);
} else {
struct pq_entry e = {
.rec = rec,
@@ -57,18 +57,17 @@ static void merged_iter_close(void *p)
static int merged_iter_advance_nonnull_subiter(struct merged_iter *mi,
size_t idx)
{
- struct reftable_record rec = reftable_new_record(mi->typ);
struct pq_entry e = {
- .rec = rec,
+ .rec = reftable_new_record(mi->typ),
.index = idx,
};
- int err = iterator_next(&mi->stack[idx], &rec);
+ int err = iterator_next(&mi->stack[idx], &e.rec);
if (err < 0)
return err;
if (err > 0) {
reftable_iterator_destroy(&mi->stack[idx]);
- reftable_record_destroy(&rec);
+ reftable_record_release(&e.rec);
return 0;
}
@@ -126,11 +125,11 @@ static int merged_iter_next_entry(struct merged_iter *mi,
if (err < 0) {
return err;
}
- reftable_record_destroy(&top.rec);
+ reftable_record_release(&top.rec);
}
reftable_record_copy_from(rec, &entry.rec, hash_size(mi->hash_id));
- reftable_record_destroy(&entry.rec);
+ reftable_record_release(&entry.rec);
strbuf_release(&entry_key);
return 0;
}
@@ -290,11 +289,12 @@ int reftable_merged_table_seek_ref(struct reftable_merged_table *mt,
struct reftable_iterator *it,
const char *name)
{
- struct reftable_ref_record ref = {
- .refname = (char *)name,
+ struct reftable_record rec = {
+ .type = BLOCK_TYPE_REF,
+ .u.ref = {
+ .refname = (char *)name,
+ },
};
- struct reftable_record rec = { NULL };
- reftable_record_from_ref(&rec, &ref);
return merged_table_seek_record(mt, it, &rec);
}
@@ -302,12 +302,11 @@ int reftable_merged_table_seek_log_at(struct reftable_merged_table *mt,
struct reftable_iterator *it,
const char *name, uint64_t update_index)
{
- struct reftable_log_record log = {
- .refname = (char *)name,
- .update_index = update_index,
- };
- struct reftable_record rec = { NULL };
- reftable_record_from_log(&rec, &log);
+ struct reftable_record rec = { .type = BLOCK_TYPE_LOG,
+ .u.log = {
+ .refname = (char *)name,
+ .update_index = update_index,
+ } };
return merged_table_seek_record(mt, it, &rec);
}
diff --git a/reftable/pq.c b/reftable/pq.c
index efc4740..96ca6dd 100644
--- a/reftable/pq.c
+++ b/reftable/pq.c
@@ -74,6 +74,7 @@ struct pq_entry merged_iter_pqueue_remove(struct merged_iter_pqueue *pq)
void merged_iter_pqueue_add(struct merged_iter_pqueue *pq, struct pq_entry e)
{
int i = 0;
+
if (pq->len == pq->cap) {
pq->cap = 2 * pq->cap + 1;
pq->heap = reftable_realloc(pq->heap,
@@ -98,7 +99,7 @@ void merged_iter_pqueue_release(struct merged_iter_pqueue *pq)
{
int i = 0;
for (i = 0; i < pq->len; i++) {
- reftable_record_destroy(&pq->heap[i].rec);
+ reftable_record_release(&pq->heap[i].rec);
}
FREE_AND_NULL(pq->heap);
pq->len = pq->cap = 0;
diff --git a/reftable/pq_test.c b/reftable/pq_test.c
index c9bb05e..7de5e88 100644
--- a/reftable/pq_test.c
+++ b/reftable/pq_test.c
@@ -31,7 +31,7 @@ static void test_pq(void)
int N = ARRAY_SIZE(names) - 1;
struct merged_iter_pqueue pq = { NULL };
- const char *last = NULL;
+ char *last = NULL;
int i = 0;
for (i = 0; i < N; i++) {
@@ -42,12 +42,10 @@ static void test_pq(void)
i = 1;
do {
- struct reftable_record rec =
- reftable_new_record(BLOCK_TYPE_REF);
- struct pq_entry e = { 0 };
-
- reftable_record_as_ref(&rec)->refname = names[i];
- e.rec = rec;
+ struct pq_entry e = { .rec = { .type = BLOCK_TYPE_REF,
+ .u.ref = {
+ .refname = names[i],
+ } } };
merged_iter_pqueue_add(&pq, e);
merged_iter_pqueue_check(pq);
i = (i * 7) % N;
@@ -55,19 +53,18 @@ static void test_pq(void)
while (!merged_iter_pqueue_is_empty(pq)) {
struct pq_entry e = merged_iter_pqueue_remove(&pq);
- struct reftable_ref_record *ref =
- reftable_record_as_ref(&e.rec);
-
+ struct reftable_record *rec = &e.rec;
merged_iter_pqueue_check(pq);
+ EXPECT(reftable_record_type(rec) == BLOCK_TYPE_REF);
if (last) {
- EXPECT(strcmp(last, ref->refname) < 0);
+ EXPECT(strcmp(last, rec->u.ref.refname) < 0);
}
- last = ref->refname;
- ref->refname = NULL;
- reftable_free(ref);
+ // this is names[i], so don't dealloc.
+ last = rec->u.ref.refname;
+ rec->u.ref.refname = NULL;
+ reftable_record_release(rec);
}
-
for (i = 0; i < N; i++) {
reftable_free(names[i]);
}
diff --git a/reftable/reader.c b/reftable/reader.c
index 006709a..54b4025 100644
--- a/reftable/reader.c
+++ b/reftable/reader.c
@@ -155,6 +155,11 @@ static int parse_footer(struct reftable_reader *r, uint8_t *footer,
r->log_offsets.is_present = (first_block_typ == BLOCK_TYPE_LOG ||
r->log_offsets.offset > 0);
r->obj_offsets.is_present = r->obj_offsets.offset > 0;
+ if (r->obj_offsets.is_present && !r->object_id_len) {
+ err = REFTABLE_FORMAT_ERROR;
+ goto done;
+ }
+
err = 0;
done:
return err;
@@ -239,8 +244,7 @@ static int table_iter_next_in_block(struct table_iter *ti,
{
int res = block_iter_next(&ti->bi, rec);
if (res == 0 && reftable_record_type(rec) == BLOCK_TYPE_REF) {
- ((struct reftable_ref_record *)rec->data)->update_index +=
- ti->r->min_update_index;
+ rec->u.ref.update_index += ti->r->min_update_index;
}
return res;
@@ -290,28 +294,33 @@ int reader_init_block_reader(struct reftable_reader *r, struct block_reader *br,
err = reader_get_block(r, &block, next_off, guess_block_size);
if (err < 0)
- return err;
+ goto done;
block_size = extract_block_size(block.data, &block_typ, next_off,
r->version);
- if (block_size < 0)
- return block_size;
-
+ if (block_size < 0) {
+ err = block_size;
+ goto done;
+ }
if (want_typ != BLOCK_TYPE_ANY && block_typ != want_typ) {
- reftable_block_done(&block);
- return 1;
+ err = 1;
+ goto done;
}
if (block_size > guess_block_size) {
reftable_block_done(&block);
err = reader_get_block(r, &block, next_off, block_size);
if (err < 0) {
- return err;
+ goto done;
}
}
- return block_reader_init(br, &block, header_off, r->block_size,
- hash_size(r->hash_id));
+ err = block_reader_init(br, &block, header_off, r->block_size,
+ hash_size(r->hash_id));
+done:
+ reftable_block_done(&block);
+
+ return err;
}
static int table_iter_next_block(struct table_iter *dest,
@@ -475,7 +484,7 @@ static int reader_seek_linear(struct reftable_reader *r, struct table_iter *ti,
done:
block_iter_close(&next.bi);
- reftable_record_destroy(&rec);
+ reftable_record_release(&rec);
strbuf_release(&want_key);
strbuf_release(&got_key);
return err;
@@ -485,34 +494,35 @@ static int reader_seek_indexed(struct reftable_reader *r,
struct reftable_iterator *it,
struct reftable_record *rec)
{
- struct reftable_index_record want_index = { .last_key = STRBUF_INIT };
- struct reftable_record want_index_rec = { NULL };
- struct reftable_index_record index_result = { .last_key = STRBUF_INIT };
- struct reftable_record index_result_rec = { NULL };
+ struct reftable_record want_index = {
+ .type = BLOCK_TYPE_INDEX, .u.idx = { .last_key = STRBUF_INIT }
+ };
+ struct reftable_record index_result = {
+ .type = BLOCK_TYPE_INDEX,
+ .u.idx = { .last_key = STRBUF_INIT },
+ };
struct table_iter index_iter = TABLE_ITER_INIT;
struct table_iter next = TABLE_ITER_INIT;
int err = 0;
- reftable_record_key(rec, &want_index.last_key);
- reftable_record_from_index(&want_index_rec, &want_index);
- reftable_record_from_index(&index_result_rec, &index_result);
-
+ reftable_record_key(rec, &want_index.u.idx.last_key);
err = reader_start(r, &index_iter, reftable_record_type(rec), 1);
if (err < 0)
goto done;
- err = reader_seek_linear(r, &index_iter, &want_index_rec);
+ err = reader_seek_linear(r, &index_iter, &want_index);
while (1) {
- err = table_iter_next(&index_iter, &index_result_rec);
+ err = table_iter_next(&index_iter, &index_result);
table_iter_block_done(&index_iter);
if (err != 0)
goto done;
- err = reader_table_iter_at(r, &next, index_result.offset, 0);
+ err = reader_table_iter_at(r, &next, index_result.u.idx.offset,
+ 0);
if (err != 0)
goto done;
- err = block_iter_seek(&next.bi, &want_index.last_key);
+ err = block_iter_seek(&next.bi, &want_index.u.idx.last_key);
if (err < 0)
goto done;
@@ -540,8 +550,8 @@ static int reader_seek_indexed(struct reftable_reader *r,
done:
block_iter_close(&next.bi);
table_iter_close(&index_iter);
- reftable_record_release(&want_index_rec);
- reftable_record_release(&index_result_rec);
+ reftable_record_release(&want_index);
+ reftable_record_release(&index_result);
return err;
}
@@ -590,11 +600,12 @@ static int reader_seek(struct reftable_reader *r, struct reftable_iterator *it,
int reftable_reader_seek_ref(struct reftable_reader *r,
struct reftable_iterator *it, const char *name)
{
- struct reftable_ref_record ref = {
- .refname = (char *)name,
+ struct reftable_record rec = {
+ .type = BLOCK_TYPE_REF,
+ .u.ref = {
+ .refname = (char *)name,
+ },
};
- struct reftable_record rec = { NULL };
- reftable_record_from_ref(&rec, &ref);
return reader_seek(r, it, &rec);
}
@@ -602,12 +613,11 @@ int reftable_reader_seek_log_at(struct reftable_reader *r,
struct reftable_iterator *it, const char *name,
uint64_t update_index)
{
- struct reftable_log_record log = {
- .refname = (char *)name,
- .update_index = update_index,
- };
- struct reftable_record rec = { NULL };
- reftable_record_from_log(&rec, &log);
+ struct reftable_record rec = { .type = BLOCK_TYPE_LOG,
+ .u.log = {
+ .refname = (char *)name,
+ .update_index = update_index,
+ } };
return reader_seek(r, it, &rec);
}
@@ -641,6 +651,8 @@ int reftable_new_reader(struct reftable_reader **p,
void reftable_reader_free(struct reftable_reader *r)
{
+ if (!r)
+ return;
reader_close(r);
reftable_free(r);
}
@@ -649,31 +661,33 @@ static int reftable_reader_refs_for_indexed(struct reftable_reader *r,
struct reftable_iterator *it,
uint8_t *oid)
{
- struct reftable_obj_record want = {
- .hash_prefix = oid,
- .hash_prefix_len = r->object_id_len,
+ struct reftable_record want = {
+ .type = BLOCK_TYPE_OBJ,
+ .u.obj = {
+ .hash_prefix = oid,
+ .hash_prefix_len = r->object_id_len,
+ },
};
- struct reftable_record want_rec = { NULL };
struct reftable_iterator oit = { NULL };
- struct reftable_obj_record got = { NULL };
- struct reftable_record got_rec = { NULL };
+ struct reftable_record got = {
+ .type = BLOCK_TYPE_OBJ,
+ .u.obj = { 0 },
+ };
int err = 0;
struct indexed_table_ref_iter *itr = NULL;
/* Look through the reverse index. */
- reftable_record_from_obj(&want_rec, &want);
- err = reader_seek(r, &oit, &want_rec);
+ err = reader_seek(r, &oit, &want);
if (err != 0)
goto done;
/* read out the reftable_obj_record */
- reftable_record_from_obj(&got_rec, &got);
- err = iterator_next(&oit, &got_rec);
+ err = iterator_next(&oit, &got);
if (err < 0)
goto done;
- if (err > 0 ||
- memcmp(want.hash_prefix, got.hash_prefix, r->object_id_len)) {
+ if (err > 0 || memcmp(want.u.obj.hash_prefix, got.u.obj.hash_prefix,
+ r->object_id_len)) {
/* didn't find it; return empty iterator */
iterator_set_empty(it);
err = 0;
@@ -681,15 +695,16 @@ static int reftable_reader_refs_for_indexed(struct reftable_reader *r,
}
err = new_indexed_table_ref_iter(&itr, r, oid, hash_size(r->hash_id),
- got.offsets, got.offset_len);
+ got.u.obj.offsets,
+ got.u.obj.offset_len);
if (err < 0)
goto done;
- got.offsets = NULL;
+ got.u.obj.offsets = NULL;
iterator_from_indexed_table_ref_iter(it, itr);
done:
reftable_iterator_destroy(&oit);
- reftable_record_release(&got_rec);
+ reftable_record_release(&got);
return err;
}
diff --git a/reftable/readwrite_test.c b/reftable/readwrite_test.c
index 70c7aed..469ab79 100644
--- a/reftable/readwrite_test.c
+++ b/reftable/readwrite_test.c
@@ -100,7 +100,7 @@ static void write_table(char ***names, struct strbuf *buf, int N,
n = reftable_writer_close(w);
EXPECT(n == 0);
- stats = writer_stats(w);
+ stats = reftable_writer_stats(w);
for (i = 0; i < stats->ref_stats.blocks; i++) {
int off = i * opts.block_size;
if (off == 0) {
@@ -239,7 +239,7 @@ static void test_log_write_read(void)
n = reftable_writer_close(w);
EXPECT(n == 0);
- stats = writer_stats(w);
+ stats = reftable_writer_stats(w);
EXPECT(stats->log_stats.blocks > 0);
reftable_writer_free(w);
w = NULL;
@@ -288,6 +288,71 @@ static void test_log_write_read(void)
reader_close(&rd);
}
+static void test_log_zlib_corruption(void)
+{
+ struct reftable_write_options opts = {
+ .block_size = 256,
+ };
+ struct reftable_iterator it = { 0 };
+ struct reftable_reader rd = { 0 };
+ struct reftable_block_source source = { 0 };
+ struct strbuf buf = STRBUF_INIT;
+ struct reftable_writer *w =
+ reftable_new_writer(&strbuf_add_void, &buf, &opts);
+ const struct reftable_stats *stats = NULL;
+ uint8_t hash1[GIT_SHA1_RAWSZ] = { 1 };
+ uint8_t hash2[GIT_SHA1_RAWSZ] = { 2 };
+ char message[100] = { 0 };
+ int err, i, n;
+
+ struct reftable_log_record log = {
+ .refname = "refname",
+ .value_type = REFTABLE_LOG_UPDATE,
+ .value = {
+ .update = {
+ .new_hash = hash1,
+ .old_hash = hash2,
+ .name = "My Name",
+ .email = "myname@invalid",
+ .message = message,
+ },
+ },
+ };
+
+ for (i = 0; i < sizeof(message) - 1; i++)
+ message[i] = (uint8_t)(rand() % 64 + ' ');
+
+ reftable_writer_set_limits(w, 1, 1);
+
+ err = reftable_writer_add_log(w, &log);
+ EXPECT_ERR(err);
+
+ n = reftable_writer_close(w);
+ EXPECT(n == 0);
+
+ stats = reftable_writer_stats(w);
+ EXPECT(stats->log_stats.blocks > 0);
+ reftable_writer_free(w);
+ w = NULL;
+
+ /* corrupt the data. */
+ buf.buf[50] ^= 0x99;
+
+ block_source_from_strbuf(&source, &buf);
+
+ err = init_reader(&rd, &source, "file.log");
+ EXPECT_ERR(err);
+
+ err = reftable_reader_seek_log(&rd, &it, "refname");
+ EXPECT(err == REFTABLE_ZLIB_ERROR);
+
+ reftable_iterator_destroy(&it);
+
+ /* cleanup. */
+ strbuf_release(&buf);
+ reader_close(&rd);
+}
+
static void test_table_read_write_sequential(void)
{
char **names;
@@ -602,6 +667,102 @@ static void test_write_empty_table(void)
strbuf_release(&buf);
}
+static void test_write_object_id_min_length(void)
+{
+ struct reftable_write_options opts = {
+ .block_size = 75,
+ };
+ struct strbuf buf = STRBUF_INIT;
+ struct reftable_writer *w =
+ reftable_new_writer(&strbuf_add_void, &buf, &opts);
+ uint8_t hash[GIT_SHA1_RAWSZ] = {42};
+ struct reftable_ref_record ref = {
+ .update_index = 1,
+ .value_type = REFTABLE_REF_VAL1,
+ .value.val1 = hash,
+ };
+ int err;
+ int i;
+
+ reftable_writer_set_limits(w, 1, 1);
+
+ /* Write the same hash in many refs. If there is only 1 hash, the
+ * disambiguating prefix is length 0 */
+ for (i = 0; i < 256; i++) {
+ char name[256];
+ snprintf(name, sizeof(name), "ref%05d", i);
+ ref.refname = name;
+ err = reftable_writer_add_ref(w, &ref);
+ EXPECT_ERR(err);
+ }
+
+ err = reftable_writer_close(w);
+ EXPECT_ERR(err);
+ EXPECT(reftable_writer_stats(w)->object_id_len == 2);
+ reftable_writer_free(w);
+ strbuf_release(&buf);
+}
+
+static void test_write_object_id_length(void)
+{
+ struct reftable_write_options opts = {
+ .block_size = 75,
+ };
+ struct strbuf buf = STRBUF_INIT;
+ struct reftable_writer *w =
+ reftable_new_writer(&strbuf_add_void, &buf, &opts);
+ uint8_t hash[GIT_SHA1_RAWSZ] = {42};
+ struct reftable_ref_record ref = {
+ .update_index = 1,
+ .value_type = REFTABLE_REF_VAL1,
+ .value.val1 = hash,
+ };
+ int err;
+ int i;
+
+ reftable_writer_set_limits(w, 1, 1);
+
+ /* Write the same hash in many refs. If there is only 1 hash, the
+ * disambiguating prefix is length 0 */
+ for (i = 0; i < 256; i++) {
+ char name[256];
+ snprintf(name, sizeof(name), "ref%05d", i);
+ ref.refname = name;
+ ref.value.val1[15] = i;
+ err = reftable_writer_add_ref(w, &ref);
+ EXPECT_ERR(err);
+ }
+
+ err = reftable_writer_close(w);
+ EXPECT_ERR(err);
+ EXPECT(reftable_writer_stats(w)->object_id_len == 16);
+ reftable_writer_free(w);
+ strbuf_release(&buf);
+}
+
+static void test_write_empty_key(void)
+{
+ struct reftable_write_options opts = { 0 };
+ struct strbuf buf = STRBUF_INIT;
+ struct reftable_writer *w =
+ reftable_new_writer(&strbuf_add_void, &buf, &opts);
+ struct reftable_ref_record ref = {
+ .refname = "",
+ .update_index = 1,
+ .value_type = REFTABLE_REF_DELETION,
+ };
+ int err;
+
+ reftable_writer_set_limits(w, 1, 1);
+ err = reftable_writer_add_ref(w, &ref);
+ EXPECT(err == REFTABLE_API_ERROR);
+
+ err = reftable_writer_close(w);
+ EXPECT(err == REFTABLE_EMPTY_TABLE_ERROR);
+ reftable_writer_free(w);
+ strbuf_release(&buf);
+}
+
static void test_write_key_order(void)
{
struct reftable_write_options opts = { 0 };
@@ -631,7 +792,6 @@ static void test_write_key_order(void)
err = reftable_writer_add_ref(w, &refs[0]);
EXPECT_ERR(err);
err = reftable_writer_add_ref(w, &refs[1]);
- printf("%d\n", err);
EXPECT(err == REFTABLE_API_ERROR);
reftable_writer_close(w);
reftable_writer_free(w);
@@ -667,6 +827,7 @@ static void test_corrupt_table(void)
int readwrite_test_main(int argc, const char *argv[])
{
+ RUN_TEST(test_log_zlib_corruption);
RUN_TEST(test_corrupt_table);
RUN_TEST(test_corrupt_table_empty);
RUN_TEST(test_log_write_read);
@@ -681,7 +842,10 @@ int readwrite_test_main(int argc, const char *argv[])
RUN_TEST(test_table_read_write_seek_index);
RUN_TEST(test_table_refs_for_no_index);
RUN_TEST(test_table_refs_for_obj_index);
+ RUN_TEST(test_write_empty_key);
RUN_TEST(test_write_empty_table);
RUN_TEST(test_log_overflow);
+ RUN_TEST(test_write_object_id_length);
+ RUN_TEST(test_write_object_id_min_length);
return 0;
}
diff --git a/reftable/record.c b/reftable/record.c
index 6a5dac3..fbaa1fb 100644
--- a/reftable/record.c
+++ b/reftable/record.c
@@ -15,6 +15,10 @@ https://developers.google.com/open-source/licenses/bsd
#include "reftable-error.h"
#include "basics.h"
+static struct reftable_record_vtable *
+reftable_record_vtable(struct reftable_record *rec);
+static void *reftable_record_data(struct reftable_record *rec);
+
int get_var_int(uint64_t *dest, struct string_view *in)
{
int ptr = 0;
@@ -72,7 +76,7 @@ int reftable_is_block_type(uint8_t typ)
return 0;
}
-uint8_t *reftable_ref_record_val1(struct reftable_ref_record *rec)
+uint8_t *reftable_ref_record_val1(const struct reftable_ref_record *rec)
{
switch (rec->value_type) {
case REFTABLE_REF_VAL1:
@@ -84,7 +88,7 @@ uint8_t *reftable_ref_record_val1(struct reftable_ref_record *rec)
}
}
-uint8_t *reftable_ref_record_val2(struct reftable_ref_record *rec)
+uint8_t *reftable_ref_record_val2(const struct reftable_ref_record *rec)
{
switch (rec->value_type) {
case REFTABLE_REF_VAL2:
@@ -251,24 +255,24 @@ static void hex_format(char *dest, uint8_t *src, int hash_size)
}
}
-void reftable_ref_record_print(struct reftable_ref_record *ref,
- uint32_t hash_id)
+static void reftable_ref_record_print_sz(const struct reftable_ref_record *ref,
+ int hash_size)
{
- char hex[2 * GIT_SHA256_RAWSZ + 1] = { 0 }; /* BUG */
+ char hex[GIT_MAX_HEXSZ + 1] = { 0 }; /* BUG */
printf("ref{%s(%" PRIu64 ") ", ref->refname, ref->update_index);
switch (ref->value_type) {
case REFTABLE_REF_SYMREF:
printf("=> %s", ref->value.symref);
break;
case REFTABLE_REF_VAL2:
- hex_format(hex, ref->value.val2.value, hash_size(hash_id));
+ hex_format(hex, ref->value.val2.value, hash_size);
printf("val 2 %s", hex);
hex_format(hex, ref->value.val2.target_value,
- hash_size(hash_id));
+ hash_size);
printf("(T %s)", hex);
break;
case REFTABLE_REF_VAL1:
- hex_format(hex, ref->value.val1, hash_size(hash_id));
+ hex_format(hex, ref->value.val1, hash_size);
printf("val 1 %s", hex);
break;
case REFTABLE_REF_DELETION:
@@ -278,6 +282,11 @@ void reftable_ref_record_print(struct reftable_ref_record *ref,
printf("}\n");
}
+void reftable_ref_record_print(const struct reftable_ref_record *ref,
+ uint32_t hash_id) {
+ reftable_ref_record_print_sz(ref, hash_size(hash_id));
+}
+
static void reftable_ref_record_release_void(void *rec)
{
reftable_ref_record_release(rec);
@@ -430,6 +439,21 @@ static int reftable_ref_record_is_deletion_void(const void *p)
(const struct reftable_ref_record *)p);
}
+
+static int reftable_ref_record_equal_void(const void *a,
+ const void *b, int hash_size)
+{
+ struct reftable_ref_record *ra = (struct reftable_ref_record *) a;
+ struct reftable_ref_record *rb = (struct reftable_ref_record *) b;
+ return reftable_ref_record_equal(ra, rb, hash_size);
+}
+
+static void reftable_ref_record_print_void(const void *rec,
+ int hash_size)
+{
+ reftable_ref_record_print_sz((struct reftable_ref_record *) rec, hash_size);
+}
+
static struct reftable_record_vtable reftable_ref_record_vtable = {
.key = &reftable_ref_record_key,
.type = BLOCK_TYPE_REF,
@@ -439,6 +463,8 @@ static struct reftable_record_vtable reftable_ref_record_vtable = {
.decode = &reftable_ref_record_decode,
.release = &reftable_ref_record_release_void,
.is_deletion = &reftable_ref_record_is_deletion_void,
+ .equal = &reftable_ref_record_equal_void,
+ .print = &reftable_ref_record_print_void,
};
static void reftable_obj_record_key(const void *r, struct strbuf *dest)
@@ -457,6 +483,21 @@ static void reftable_obj_record_release(void *rec)
memset(obj, 0, sizeof(struct reftable_obj_record));
}
+static void reftable_obj_record_print(const void *rec, int hash_size)
+{
+ const struct reftable_obj_record *obj = rec;
+ char hex[GIT_MAX_HEXSZ + 1] = { 0 };
+ struct strbuf offset_str = STRBUF_INIT;
+ int i;
+
+ for (i = 0; i < obj->offset_len; i++)
+ strbuf_addf(&offset_str, "%" PRIu64 " ", obj->offsets[i]);
+ hex_format(hex, obj->hash_prefix, obj->hash_prefix_len);
+ printf("prefix %s (len %d), offsets [%s]\n",
+ hex, obj->hash_prefix_len, offset_str.buf);
+ strbuf_release(&offset_str);
+}
+
static void reftable_obj_record_copy_from(void *rec, const void *src_rec,
int hash_size)
{
@@ -465,12 +506,14 @@ static void reftable_obj_record_copy_from(void *rec, const void *src_rec,
(const struct reftable_obj_record *)src_rec;
reftable_obj_record_release(obj);
- *obj = *src;
- obj->hash_prefix = reftable_malloc(obj->hash_prefix_len);
- memcpy(obj->hash_prefix, src->hash_prefix, obj->hash_prefix_len);
+ obj->hash_prefix = reftable_malloc(src->hash_prefix_len);
+ obj->hash_prefix_len = src->hash_prefix_len;
+ if (src->hash_prefix_len)
+ memcpy(obj->hash_prefix, src->hash_prefix, obj->hash_prefix_len);
- obj->offsets = reftable_malloc(obj->offset_len * sizeof(uint64_t));
- COPY_ARRAY(obj->offsets, src->offsets, obj->offset_len);
+ obj->offsets = reftable_malloc(src->offset_len * sizeof(uint64_t));
+ obj->offset_len = src->offset_len;
+ COPY_ARRAY(obj->offsets, src->offsets, src->offset_len);
}
static uint8_t reftable_obj_record_val_type(const void *rec)
@@ -572,6 +615,25 @@ static int not_a_deletion(const void *p)
return 0;
}
+static int reftable_obj_record_equal_void(const void *a, const void *b, int hash_size)
+{
+ struct reftable_obj_record *ra = (struct reftable_obj_record *) a;
+ struct reftable_obj_record *rb = (struct reftable_obj_record *) b;
+
+ if (ra->hash_prefix_len != rb->hash_prefix_len
+ || ra->offset_len != rb->offset_len)
+ return 0;
+
+ if (ra->hash_prefix_len &&
+ memcmp(ra->hash_prefix, rb->hash_prefix, ra->hash_prefix_len))
+ return 0;
+ if (ra->offset_len &&
+ memcmp(ra->offsets, rb->offsets, ra->offset_len * sizeof(uint64_t)))
+ return 0;
+
+ return 1;
+}
+
static struct reftable_record_vtable reftable_obj_record_vtable = {
.key = &reftable_obj_record_key,
.type = BLOCK_TYPE_OBJ,
@@ -580,32 +642,43 @@ static struct reftable_record_vtable reftable_obj_record_vtable = {
.encode = &reftable_obj_record_encode,
.decode = &reftable_obj_record_decode,
.release = &reftable_obj_record_release,
- .is_deletion = not_a_deletion,
+ .is_deletion = &not_a_deletion,
+ .equal = &reftable_obj_record_equal_void,
+ .print = &reftable_obj_record_print,
};
-void reftable_log_record_print(struct reftable_log_record *log,
- uint32_t hash_id)
+static void reftable_log_record_print_sz(struct reftable_log_record *log,
+ int hash_size)
{
- char hex[GIT_SHA256_RAWSZ + 1] = { 0 };
+ char hex[GIT_MAX_HEXSZ + 1] = { 0 };
switch (log->value_type) {
case REFTABLE_LOG_DELETION:
- printf("log{%s(%" PRIu64 ") delete", log->refname,
+ printf("log{%s(%" PRIu64 ") delete\n", log->refname,
log->update_index);
break;
case REFTABLE_LOG_UPDATE:
printf("log{%s(%" PRIu64 ") %s <%s> %" PRIu64 " %04d\n",
- log->refname, log->update_index, log->value.update.name,
- log->value.update.email, log->value.update.time,
+ log->refname, log->update_index,
+ log->value.update.name ? log->value.update.name : "",
+ log->value.update.email ? log->value.update.email : "",
+ log->value.update.time,
log->value.update.tz_offset);
- hex_format(hex, log->value.update.old_hash, hash_size(hash_id));
+ hex_format(hex, log->value.update.old_hash, hash_size);
printf("%s => ", hex);
- hex_format(hex, log->value.update.new_hash, hash_size(hash_id));
- printf("%s\n\n%s\n}\n", hex, log->value.update.message);
+ hex_format(hex, log->value.update.new_hash, hash_size);
+ printf("%s\n\n%s\n}\n", hex,
+ log->value.update.message ? log->value.update.message : "");
break;
}
}
+void reftable_log_record_print(struct reftable_log_record *log,
+ uint32_t hash_id)
+{
+ reftable_log_record_print_sz(log, hash_size(hash_id));
+}
+
static void reftable_log_record_key(const void *r, struct strbuf *dest)
{
const struct reftable_log_record *rec =
@@ -881,8 +954,16 @@ static int zero_hash_eq(uint8_t *a, uint8_t *b, int sz)
return !memcmp(a, b, sz);
}
-int reftable_log_record_equal(struct reftable_log_record *a,
- struct reftable_log_record *b, int hash_size)
+static int reftable_log_record_equal_void(const void *a,
+ const void *b, int hash_size)
+{
+ return reftable_log_record_equal((struct reftable_log_record *) a,
+ (struct reftable_log_record *) b,
+ hash_size);
+}
+
+int reftable_log_record_equal(const struct reftable_log_record *a,
+ const struct reftable_log_record *b, int hash_size)
{
if (!(null_streq(a->refname, b->refname) &&
a->update_index == b->update_index &&
@@ -915,6 +996,11 @@ static int reftable_log_record_is_deletion_void(const void *p)
(const struct reftable_log_record *)p);
}
+static void reftable_log_record_print_void(const void *rec, int hash_size)
+{
+ reftable_log_record_print_sz((struct reftable_log_record*)rec, hash_size);
+}
+
static struct reftable_record_vtable reftable_log_record_vtable = {
.key = &reftable_log_record_key,
.type = BLOCK_TYPE_LOG,
@@ -924,60 +1010,10 @@ static struct reftable_record_vtable reftable_log_record_vtable = {
.decode = &reftable_log_record_decode,
.release = &reftable_log_record_release_void,
.is_deletion = &reftable_log_record_is_deletion_void,
+ .equal = &reftable_log_record_equal_void,
+ .print = &reftable_log_record_print_void,
};
-struct reftable_record reftable_new_record(uint8_t typ)
-{
- struct reftable_record rec = { NULL };
- switch (typ) {
- case BLOCK_TYPE_REF: {
- struct reftable_ref_record *r =
- reftable_calloc(sizeof(struct reftable_ref_record));
- reftable_record_from_ref(&rec, r);
- return rec;
- }
-
- case BLOCK_TYPE_OBJ: {
- struct reftable_obj_record *r =
- reftable_calloc(sizeof(struct reftable_obj_record));
- reftable_record_from_obj(&rec, r);
- return rec;
- }
- case BLOCK_TYPE_LOG: {
- struct reftable_log_record *r =
- reftable_calloc(sizeof(struct reftable_log_record));
- reftable_record_from_log(&rec, r);
- return rec;
- }
- case BLOCK_TYPE_INDEX: {
- struct reftable_index_record empty = { .last_key =
- STRBUF_INIT };
- struct reftable_index_record *r =
- reftable_calloc(sizeof(struct reftable_index_record));
- *r = empty;
- reftable_record_from_index(&rec, r);
- return rec;
- }
- }
- abort();
- return rec;
-}
-
-/* clear out the record, yielding the reftable_record data that was
- * encapsulated. */
-static void *reftable_record_yield(struct reftable_record *rec)
-{
- void *p = rec->data;
- rec->data = NULL;
- return p;
-}
-
-void reftable_record_destroy(struct reftable_record *rec)
-{
- reftable_record_release(rec);
- reftable_free(reftable_record_yield(rec));
-}
-
static void reftable_index_record_key(const void *r, struct strbuf *dest)
{
const struct reftable_index_record *rec = r;
@@ -1042,6 +1078,21 @@ static int reftable_index_record_decode(void *rec, struct strbuf key,
return start.len - in.len;
}
+static int reftable_index_record_equal(const void *a, const void *b, int hash_size)
+{
+ struct reftable_index_record *ia = (struct reftable_index_record *) a;
+ struct reftable_index_record *ib = (struct reftable_index_record *) b;
+
+ return ia->offset == ib->offset && !strbuf_cmp(&ia->last_key, &ib->last_key);
+}
+
+static void reftable_index_record_print(const void *rec, int hash_size)
+{
+ const struct reftable_index_record *idx = rec;
+ /* TODO: escape null chars? */
+ printf("\"%s\" %" PRIu64 "\n", idx->last_key.buf, idx->offset);
+}
+
static struct reftable_record_vtable reftable_index_record_vtable = {
.key = &reftable_index_record_key,
.type = BLOCK_TYPE_INDEX,
@@ -1051,95 +1102,66 @@ static struct reftable_record_vtable reftable_index_record_vtable = {
.decode = &reftable_index_record_decode,
.release = &reftable_index_record_release,
.is_deletion = &not_a_deletion,
+ .equal = &reftable_index_record_equal,
+ .print = &reftable_index_record_print,
};
void reftable_record_key(struct reftable_record *rec, struct strbuf *dest)
{
- rec->ops->key(rec->data, dest);
+ reftable_record_vtable(rec)->key(reftable_record_data(rec), dest);
}
uint8_t reftable_record_type(struct reftable_record *rec)
{
- return rec->ops->type;
+ return rec->type;
}
int reftable_record_encode(struct reftable_record *rec, struct string_view dest,
int hash_size)
{
- return rec->ops->encode(rec->data, dest, hash_size);
+ return reftable_record_vtable(rec)->encode(reftable_record_data(rec),
+ dest, hash_size);
}
void reftable_record_copy_from(struct reftable_record *rec,
struct reftable_record *src, int hash_size)
{
- assert(src->ops->type == rec->ops->type);
+ assert(src->type == rec->type);
- rec->ops->copy_from(rec->data, src->data, hash_size);
+ reftable_record_vtable(rec)->copy_from(reftable_record_data(rec),
+ reftable_record_data(src),
+ hash_size);
}
uint8_t reftable_record_val_type(struct reftable_record *rec)
{
- return rec->ops->val_type(rec->data);
+ return reftable_record_vtable(rec)->val_type(reftable_record_data(rec));
}
int reftable_record_decode(struct reftable_record *rec, struct strbuf key,
uint8_t extra, struct string_view src, int hash_size)
{
- return rec->ops->decode(rec->data, key, extra, src, hash_size);
+ return reftable_record_vtable(rec)->decode(reftable_record_data(rec),
+ key, extra, src, hash_size);
}
void reftable_record_release(struct reftable_record *rec)
{
- rec->ops->release(rec->data);
+ reftable_record_vtable(rec)->release(reftable_record_data(rec));
}
int reftable_record_is_deletion(struct reftable_record *rec)
{
- return rec->ops->is_deletion(rec->data);
+ return reftable_record_vtable(rec)->is_deletion(
+ reftable_record_data(rec));
}
-void reftable_record_from_ref(struct reftable_record *rec,
- struct reftable_ref_record *ref_rec)
+int reftable_record_equal(struct reftable_record *a, struct reftable_record *b, int hash_size)
{
- assert(!rec->ops);
- rec->data = ref_rec;
- rec->ops = &reftable_ref_record_vtable;
-}
-
-void reftable_record_from_obj(struct reftable_record *rec,
- struct reftable_obj_record *obj_rec)
-{
- assert(!rec->ops);
- rec->data = obj_rec;
- rec->ops = &reftable_obj_record_vtable;
-}
-
-void reftable_record_from_index(struct reftable_record *rec,
- struct reftable_index_record *index_rec)
-{
- assert(!rec->ops);
- rec->data = index_rec;
- rec->ops = &reftable_index_record_vtable;
-}
-
-void reftable_record_from_log(struct reftable_record *rec,
- struct reftable_log_record *log_rec)
-{
- assert(!rec->ops);
- rec->data = log_rec;
- rec->ops = &reftable_log_record_vtable;
-}
-
-struct reftable_ref_record *reftable_record_as_ref(struct reftable_record *rec)
-{
- assert(reftable_record_type(rec) == BLOCK_TYPE_REF);
- return rec->data;
-}
-
-struct reftable_log_record *reftable_record_as_log(struct reftable_record *rec)
-{
- assert(reftable_record_type(rec) == BLOCK_TYPE_LOG);
- return rec->data;
+ if (a->type != b->type)
+ return 0;
+ return reftable_record_vtable(a)->equal(
+ reftable_record_data(a), reftable_record_data(b), hash_size);
}
static int hash_equal(uint8_t *a, uint8_t *b, int hash_size)
@@ -1150,13 +1172,15 @@ static int hash_equal(uint8_t *a, uint8_t *b, int hash_size)
return a == b;
}
-int reftable_ref_record_equal(struct reftable_ref_record *a,
- struct reftable_ref_record *b, int hash_size)
+int reftable_ref_record_equal(const struct reftable_ref_record *a,
+ const struct reftable_ref_record *b, int hash_size)
{
assert(hash_size > 0);
- if (!(0 == strcmp(a->refname, b->refname) &&
- a->update_index == b->update_index &&
- a->value_type == b->value_type))
+ if (!null_streq(a->refname, b->refname))
+ return 0;
+
+ if (a->update_index != b->update_index ||
+ a->value_type != b->value_type)
return 0;
switch (a->value_type) {
@@ -1210,3 +1234,81 @@ void string_view_consume(struct string_view *s, int n)
s->buf += n;
s->len -= n;
}
+
+static void *reftable_record_data(struct reftable_record *rec)
+{
+ switch (rec->type) {
+ case BLOCK_TYPE_REF:
+ return &rec->u.ref;
+ case BLOCK_TYPE_LOG:
+ return &rec->u.log;
+ case BLOCK_TYPE_INDEX:
+ return &rec->u.idx;
+ case BLOCK_TYPE_OBJ:
+ return &rec->u.obj;
+ }
+ abort();
+}
+
+static struct reftable_record_vtable *
+reftable_record_vtable(struct reftable_record *rec)
+{
+ switch (rec->type) {
+ case BLOCK_TYPE_REF:
+ return &reftable_ref_record_vtable;
+ case BLOCK_TYPE_LOG:
+ return &reftable_log_record_vtable;
+ case BLOCK_TYPE_INDEX:
+ return &reftable_index_record_vtable;
+ case BLOCK_TYPE_OBJ:
+ return &reftable_obj_record_vtable;
+ }
+ abort();
+}
+
+struct reftable_record reftable_new_record(uint8_t typ)
+{
+ struct reftable_record clean = {
+ .type = typ,
+ };
+
+ /* the following is involved, but the naive solution (just return
+ * `clean` as is, except for BLOCK_TYPE_INDEX), returns a garbage
+ * clean.u.obj.offsets pointer on Windows VS CI. Go figure.
+ */
+ switch (typ) {
+ case BLOCK_TYPE_OBJ:
+ {
+ struct reftable_obj_record obj = { 0 };
+ clean.u.obj = obj;
+ break;
+ }
+ case BLOCK_TYPE_INDEX:
+ {
+ struct reftable_index_record idx = {
+ .last_key = STRBUF_INIT,
+ };
+ clean.u.idx = idx;
+ break;
+ }
+ case BLOCK_TYPE_REF:
+ {
+ struct reftable_ref_record ref = { 0 };
+ clean.u.ref = ref;
+ break;
+ }
+ case BLOCK_TYPE_LOG:
+ {
+ struct reftable_log_record log = { 0 };
+ clean.u.log = log;
+ break;
+ }
+ }
+ return clean;
+}
+
+void reftable_record_print(struct reftable_record *rec, int hash_size)
+{
+ printf("'%c': ", rec->type);
+ reftable_record_vtable(rec)->print(reftable_record_data(rec), hash_size);
+}
diff --git a/reftable/record.h b/reftable/record.h
index 498e8c5..fd80cd4 100644
--- a/reftable/record.h
+++ b/reftable/record.h
@@ -58,18 +58,18 @@ struct reftable_record_vtable {
/* is this a tombstone? */
int (*is_deletion)(const void *rec);
-};
-/* record is a generic wrapper for different types of records. */
-struct reftable_record {
- void *data;
- struct reftable_record_vtable *ops;
+ /* Are two records equal? This assumes they have the same type. Returns 0 for non-equal. */
+ int (*equal)(const void *a, const void *b, int hash_size);
+
+ /* Print on stdout, for debugging. */
+ void (*print)(const void *rec, int hash_size);
};
/* returns true for recognized block types. Block start with the block type. */
int reftable_is_block_type(uint8_t typ);
-/* creates a malloced record of the given type. Dispose with record_destroy */
+/* return an initialized record for the given type */
struct reftable_record reftable_new_record(uint8_t typ);
/* Encode `key` into `dest`. Sets `is_restart` to indicate a restart. Returns
@@ -97,8 +97,25 @@ struct reftable_obj_record {
int offset_len;
};
-/* see struct record_vtable */
+/* record is a generic wrapper for different types of records. It is normally
+ * created on the stack, or embedded within another struct. If the type is
+ * known, a fresh instance can be initialized explicitly. Otherwise, use
+ * reftable_new_record() to initialize generically (as the index_record is not
+ * valid as 0-initialized structure)
+ */
+struct reftable_record {
+ uint8_t type;
+ union {
+ struct reftable_ref_record ref;
+ struct reftable_log_record log;
+ struct reftable_obj_record obj;
+ struct reftable_index_record idx;
+ } u;
+};
+/* see struct record_vtable */
+int reftable_record_equal(struct reftable_record *a, struct reftable_record *b, int hash_size);
+void reftable_record_print(struct reftable_record *rec, int hash_size);
void reftable_record_key(struct reftable_record *rec, struct strbuf *dest);
uint8_t reftable_record_type(struct reftable_record *rec);
void reftable_record_copy_from(struct reftable_record *rec,
@@ -111,25 +128,9 @@ int reftable_record_decode(struct reftable_record *rec, struct strbuf key,
int hash_size);
int reftable_record_is_deletion(struct reftable_record *rec);
-/* zeroes out the embedded record */
+/* frees and zeroes out the embedded record */
void reftable_record_release(struct reftable_record *rec);
-/* clear and deallocate embedded record, and zero `rec`. */
-void reftable_record_destroy(struct reftable_record *rec);
-
-/* initialize generic records from concrete records. The generic record should
- * be zeroed out. */
-void reftable_record_from_obj(struct reftable_record *rec,
- struct reftable_obj_record *objrec);
-void reftable_record_from_index(struct reftable_record *rec,
- struct reftable_index_record *idxrec);
-void reftable_record_from_ref(struct reftable_record *rec,
- struct reftable_ref_record *refrec);
-void reftable_record_from_log(struct reftable_record *rec,
- struct reftable_log_record *logrec);
-struct reftable_ref_record *reftable_record_as_ref(struct reftable_record *ref);
-struct reftable_log_record *reftable_record_as_log(struct reftable_record *ref);
-
/* for qsort. */
int reftable_ref_record_compare_name(const void *a, const void *b);
diff --git a/reftable/record_test.c b/reftable/record_test.c
index f4ad7ca..70ae78f 100644
--- a/reftable/record_test.c
+++ b/reftable/record_test.c
@@ -16,24 +16,20 @@
static void test_copy(struct reftable_record *rec)
{
- struct reftable_record copy =
- reftable_new_record(reftable_record_type(rec));
+ struct reftable_record copy = { 0 };
+ uint8_t typ;
+
+ typ = reftable_record_type(rec);
+ copy = reftable_new_record(typ);
reftable_record_copy_from(&copy, rec, GIT_SHA1_RAWSZ);
/* do it twice to catch memory leaks */
reftable_record_copy_from(&copy, rec, GIT_SHA1_RAWSZ);
- switch (reftable_record_type(&copy)) {
- case BLOCK_TYPE_REF:
- EXPECT(reftable_ref_record_equal(reftable_record_as_ref(&copy),
- reftable_record_as_ref(rec),
- GIT_SHA1_RAWSZ));
- break;
- case BLOCK_TYPE_LOG:
- EXPECT(reftable_log_record_equal(reftable_record_as_log(&copy),
- reftable_record_as_log(rec),
- GIT_SHA1_RAWSZ));
- break;
- }
- reftable_record_destroy(&copy);
+ EXPECT(reftable_record_equal(rec, &copy, GIT_SHA1_RAWSZ));
+
+ puts("testing print coverage:\n");
+ reftable_record_print(&copy, GIT_SHA1_RAWSZ);
+
+ reftable_record_release(&copy);
}
static void test_varint_roundtrip(void)
@@ -106,61 +102,58 @@ static void test_reftable_ref_record_roundtrip(void)
int i = 0;
for (i = REFTABLE_REF_DELETION; i < REFTABLE_NR_REF_VALUETYPES; i++) {
- struct reftable_ref_record in = { NULL };
- struct reftable_ref_record out = { NULL };
- struct reftable_record rec_out = { NULL };
+ struct reftable_record in = {
+ .type = BLOCK_TYPE_REF,
+ };
+ struct reftable_record out = { .type = BLOCK_TYPE_REF };
struct strbuf key = STRBUF_INIT;
- struct reftable_record rec = { NULL };
uint8_t buffer[1024] = { 0 };
struct string_view dest = {
.buf = buffer,
.len = sizeof(buffer),
};
-
int n, m;
- in.value_type = i;
+ in.u.ref.value_type = i;
switch (i) {
case REFTABLE_REF_DELETION:
break;
case REFTABLE_REF_VAL1:
- in.value.val1 = reftable_malloc(GIT_SHA1_RAWSZ);
- set_hash(in.value.val1, 1);
+ in.u.ref.value.val1 = reftable_malloc(GIT_SHA1_RAWSZ);
+ set_hash(in.u.ref.value.val1, 1);
break;
case REFTABLE_REF_VAL2:
- in.value.val2.value = reftable_malloc(GIT_SHA1_RAWSZ);
- set_hash(in.value.val2.value, 1);
- in.value.val2.target_value =
+ in.u.ref.value.val2.value =
reftable_malloc(GIT_SHA1_RAWSZ);
- set_hash(in.value.val2.target_value, 2);
+ set_hash(in.u.ref.value.val2.value, 1);
+ in.u.ref.value.val2.target_value =
+ reftable_malloc(GIT_SHA1_RAWSZ);
+ set_hash(in.u.ref.value.val2.target_value, 2);
break;
case REFTABLE_REF_SYMREF:
- in.value.symref = xstrdup("target");
+ in.u.ref.value.symref = xstrdup("target");
break;
}
- in.refname = xstrdup("refs/heads/master");
+ in.u.ref.refname = xstrdup("refs/heads/master");
- reftable_record_from_ref(&rec, &in);
- test_copy(&rec);
+ test_copy(&in);
- EXPECT(reftable_record_val_type(&rec) == i);
+ EXPECT(reftable_record_val_type(&in) == i);
- reftable_record_key(&rec, &key);
- n = reftable_record_encode(&rec, dest, GIT_SHA1_RAWSZ);
+ reftable_record_key(&in, &key);
+ n = reftable_record_encode(&in, dest, GIT_SHA1_RAWSZ);
EXPECT(n > 0);
/* decode into a non-zero reftable_record to test for leaks. */
-
- reftable_record_from_ref(&rec_out, &out);
- m = reftable_record_decode(&rec_out, key, i, dest,
- GIT_SHA1_RAWSZ);
+ m = reftable_record_decode(&out, key, i, dest, GIT_SHA1_RAWSZ);
EXPECT(n == m);
- EXPECT(reftable_ref_record_equal(&in, &out, GIT_SHA1_RAWSZ));
- reftable_record_release(&rec_out);
+ EXPECT(reftable_ref_record_equal(&in.u.ref, &out.u.ref,
+ GIT_SHA1_RAWSZ));
+ reftable_record_release(&in);
strbuf_release(&key);
- reftable_ref_record_release(&in);
+ reftable_record_release(&out);
}
}
@@ -187,7 +180,8 @@ static void test_reftable_log_record_equal(void)
static void test_reftable_log_record_roundtrip(void)
{
int i;
- struct reftable_log_record in[2] = {
+
+ struct reftable_log_record in[] = {
{
.refname = xstrdup("refs/heads/master"),
.update_index = 42,
@@ -208,12 +202,26 @@ static void test_reftable_log_record_roundtrip(void)
.refname = xstrdup("refs/heads/master"),
.update_index = 22,
.value_type = REFTABLE_LOG_DELETION,
+ },
+ {
+ .refname = xstrdup("branch"),
+ .update_index = 33,
+ .value_type = REFTABLE_LOG_UPDATE,
+ .value = {
+ .update = {
+ .old_hash = reftable_malloc(GIT_SHA1_RAWSZ),
+ .new_hash = reftable_malloc(GIT_SHA1_RAWSZ),
+ /* rest of fields left empty. */
+ },
+ },
}
};
set_test_hash(in[0].value.update.new_hash, 1);
set_test_hash(in[0].value.update.old_hash, 2);
+ set_test_hash(in[2].value.update.new_hash, 3);
+ set_test_hash(in[2].value.update.old_hash, 4);
for (i = 0; i < ARRAY_SIZE(in); i++) {
- struct reftable_record rec = { NULL };
+ struct reftable_record rec = { .type = BLOCK_TYPE_LOG };
struct strbuf key = STRBUF_INIT;
uint8_t buffer[1024] = { 0 };
struct string_view dest = {
@@ -221,23 +229,25 @@ static void test_reftable_log_record_roundtrip(void)
.len = sizeof(buffer),
};
/* populate out, to check for leaks. */
- struct reftable_log_record out = {
- .refname = xstrdup("old name"),
- .value_type = REFTABLE_LOG_UPDATE,
- .value = {
- .update = {
- .new_hash = reftable_calloc(GIT_SHA1_RAWSZ),
- .old_hash = reftable_calloc(GIT_SHA1_RAWSZ),
- .name = xstrdup("old name"),
- .email = xstrdup("old@email"),
- .message = xstrdup("old message"),
+ struct reftable_record out = {
+ .type = BLOCK_TYPE_LOG,
+ .u.log = {
+ .refname = xstrdup("old name"),
+ .value_type = REFTABLE_LOG_UPDATE,
+ .value = {
+ .update = {
+ .new_hash = reftable_calloc(GIT_SHA1_RAWSZ),
+ .old_hash = reftable_calloc(GIT_SHA1_RAWSZ),
+ .name = xstrdup("old name"),
+ .email = xstrdup("old@email"),
+ .message = xstrdup("old message"),
+ },
},
},
};
- struct reftable_record rec_out = { NULL };
int n, m, valtype;
- reftable_record_from_log(&rec, &in[i]);
+ rec.u.log = in[i];
test_copy(&rec);
@@ -245,16 +255,16 @@ static void test_reftable_log_record_roundtrip(void)
n = reftable_record_encode(&rec, dest, GIT_SHA1_RAWSZ);
EXPECT(n >= 0);
- reftable_record_from_log(&rec_out, &out);
valtype = reftable_record_val_type(&rec);
- m = reftable_record_decode(&rec_out, key, valtype, dest,
+ m = reftable_record_decode(&out, key, valtype, dest,
GIT_SHA1_RAWSZ);
EXPECT(n == m);
- EXPECT(reftable_log_record_equal(&in[i], &out, GIT_SHA1_RAWSZ));
+ EXPECT(reftable_log_record_equal(&in[i], &out.u.log,
+ GIT_SHA1_RAWSZ));
reftable_log_record_release(&in[i]);
strbuf_release(&key);
- reftable_record_release(&rec_out);
+ reftable_record_release(&out);
}
}
@@ -322,47 +332,45 @@ static void test_reftable_obj_record_roundtrip(void)
} };
int i = 0;
for (i = 0; i < ARRAY_SIZE(recs); i++) {
- struct reftable_obj_record in = recs[i];
uint8_t buffer[1024] = { 0 };
struct string_view dest = {
.buf = buffer,
.len = sizeof(buffer),
};
- struct reftable_record rec = { NULL };
+ struct reftable_record in = {
+ .type = BLOCK_TYPE_OBJ,
+ .u = {
+ .obj = recs[i],
+ },
+ };
struct strbuf key = STRBUF_INIT;
- struct reftable_obj_record out = { NULL };
- struct reftable_record rec_out = { NULL };
+ struct reftable_record out = { .type = BLOCK_TYPE_OBJ };
int n, m;
uint8_t extra;
- reftable_record_from_obj(&rec, &in);
- test_copy(&rec);
- reftable_record_key(&rec, &key);
- n = reftable_record_encode(&rec, dest, GIT_SHA1_RAWSZ);
+ test_copy(&in);
+ reftable_record_key(&in, &key);
+ n = reftable_record_encode(&in, dest, GIT_SHA1_RAWSZ);
EXPECT(n > 0);
- extra = reftable_record_val_type(&rec);
- reftable_record_from_obj(&rec_out, &out);
- m = reftable_record_decode(&rec_out, key, extra, dest,
+ extra = reftable_record_val_type(&in);
+ m = reftable_record_decode(&out, key, extra, dest,
GIT_SHA1_RAWSZ);
EXPECT(n == m);
- EXPECT(in.hash_prefix_len == out.hash_prefix_len);
- EXPECT(in.offset_len == out.offset_len);
-
- EXPECT(!memcmp(in.hash_prefix, out.hash_prefix,
- in.hash_prefix_len));
- EXPECT(0 == memcmp(in.offsets, out.offsets,
- sizeof(uint64_t) * in.offset_len));
+ EXPECT(reftable_record_equal(&in, &out, GIT_SHA1_RAWSZ));
strbuf_release(&key);
- reftable_record_release(&rec_out);
+ reftable_record_release(&out);
}
}
static void test_reftable_index_record_roundtrip(void)
{
- struct reftable_index_record in = {
- .offset = 42,
- .last_key = STRBUF_INIT,
+ struct reftable_record in = {
+ .type = BLOCK_TYPE_INDEX,
+ .u.idx = {
+ .offset = 42,
+ .last_key = STRBUF_INIT,
+ },
};
uint8_t buffer[1024] = { 0 };
struct string_view dest = {
@@ -370,31 +378,30 @@ static void test_reftable_index_record_roundtrip(void)
.len = sizeof(buffer),
};
struct strbuf key = STRBUF_INIT;
- struct reftable_record rec = { NULL };
- struct reftable_index_record out = { .last_key = STRBUF_INIT };
- struct reftable_record out_rec = { NULL };
+ struct reftable_record out = {
+ .type = BLOCK_TYPE_INDEX,
+ .u.idx = { .last_key = STRBUF_INIT },
+ };
int n, m;
uint8_t extra;
- strbuf_addstr(&in.last_key, "refs/heads/master");
- reftable_record_from_index(&rec, &in);
- reftable_record_key(&rec, &key);
- test_copy(&rec);
+ strbuf_addstr(&in.u.idx.last_key, "refs/heads/master");
+ reftable_record_key(&in, &key);
+ test_copy(&in);
- EXPECT(0 == strbuf_cmp(&key, &in.last_key));
- n = reftable_record_encode(&rec, dest, GIT_SHA1_RAWSZ);
+ EXPECT(0 == strbuf_cmp(&key, &in.u.idx.last_key));
+ n = reftable_record_encode(&in, dest, GIT_SHA1_RAWSZ);
EXPECT(n > 0);
- extra = reftable_record_val_type(&rec);
- reftable_record_from_index(&out_rec, &out);
- m = reftable_record_decode(&out_rec, key, extra, dest, GIT_SHA1_RAWSZ);
+ extra = reftable_record_val_type(&in);
+ m = reftable_record_decode(&out, key, extra, dest, GIT_SHA1_RAWSZ);
EXPECT(m == n);
- EXPECT(in.offset == out.offset);
+ EXPECT(reftable_record_equal(&in, &out, GIT_SHA1_RAWSZ));
- reftable_record_release(&out_rec);
+ reftable_record_release(&out);
strbuf_release(&key);
- strbuf_release(&in.last_key);
+ strbuf_release(&in.u.idx.last_key);
}
int record_test_main(int argc, const char *argv[])
diff --git a/reftable/reftable-record.h b/reftable/reftable-record.h
index 5370d22..67104f8 100644
--- a/reftable/reftable-record.h
+++ b/reftable/reftable-record.h
@@ -49,25 +49,25 @@ struct reftable_ref_record {
/* Returns the first hash, or NULL if `rec` is not of type
* REFTABLE_REF_VAL1 or REFTABLE_REF_VAL2. */
-uint8_t *reftable_ref_record_val1(struct reftable_ref_record *rec);
+uint8_t *reftable_ref_record_val1(const struct reftable_ref_record *rec);
/* Returns the second hash, or NULL if `rec` is not of type
* REFTABLE_REF_VAL2. */
-uint8_t *reftable_ref_record_val2(struct reftable_ref_record *rec);
+uint8_t *reftable_ref_record_val2(const struct reftable_ref_record *rec);
/* returns whether 'ref' represents a deletion */
int reftable_ref_record_is_deletion(const struct reftable_ref_record *ref);
/* prints a reftable_ref_record onto stdout. Useful for debugging. */
-void reftable_ref_record_print(struct reftable_ref_record *ref,
+void reftable_ref_record_print(const struct reftable_ref_record *ref,
uint32_t hash_id);
/* frees and nulls all pointer values inside `ref`. */
void reftable_ref_record_release(struct reftable_ref_record *ref);
/* returns whether two reftable_ref_records are the same. Useful for testing. */
-int reftable_ref_record_equal(struct reftable_ref_record *a,
- struct reftable_ref_record *b, int hash_size);
+int reftable_ref_record_equal(const struct reftable_ref_record *a,
+ const struct reftable_ref_record *b, int hash_size);
/* reftable_log_record holds a reflog entry */
struct reftable_log_record {
@@ -104,8 +104,8 @@ int reftable_log_record_is_deletion(const struct reftable_log_record *log);
void reftable_log_record_release(struct reftable_log_record *log);
/* returns whether two records are equal. Useful for testing. */
-int reftable_log_record_equal(struct reftable_log_record *a,
- struct reftable_log_record *b, int hash_size);
+int reftable_log_record_equal(const struct reftable_log_record *a,
+ const struct reftable_log_record *b, int hash_size);
/* dumps a reftable_log_record on stdout, for debugging/testing. */
void reftable_log_record_print(struct reftable_log_record *log,
diff --git a/reftable/reftable-writer.h b/reftable/reftable-writer.h
index a560dc1..db8de19 100644
--- a/reftable/reftable-writer.h
+++ b/reftable/reftable-writer.h
@@ -143,7 +143,7 @@ int reftable_writer_close(struct reftable_writer *w);
This struct becomes invalid when the writer is freed.
*/
-const struct reftable_stats *writer_stats(struct reftable_writer *w);
+const struct reftable_stats *reftable_writer_stats(struct reftable_writer *w);
/* reftable_writer_free deallocates memory for the writer */
void reftable_writer_free(struct reftable_writer *w);
diff --git a/reftable/reftable.c b/reftable/reftable.c
deleted file mode 100644
index 0e4607a..0000000
--- a/reftable/reftable.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
-Copyright 2020 Google LLC
-
-Use of this source code is governed by a BSD-style
-license that can be found in the LICENSE file or at
-https://developers.google.com/open-source/licenses/bsd
-*/
-
-#include "basics.h"
-#include "record.h"
-#include "generic.h"
-#include "reftable-iterator.h"
-#include "reftable-generic.h"
-
-int reftable_table_seek_ref(struct reftable_table *tab,
- struct reftable_iterator *it, const char *name)
-{
- struct reftable_ref_record ref = {
- .refname = (char *)name,
- };
- struct reftable_record rec = { NULL };
- reftable_record_from_ref(&rec, &ref);
- return tab->ops->seek_record(tab->table_arg, it, &rec);
-}
-
-int reftable_table_read_ref(struct reftable_table *tab, const char *name,
- struct reftable_ref_record *ref)
-{
- struct reftable_iterator it = { NULL };
- int err = reftable_table_seek_ref(tab, &it, name);
- if (err)
- goto done;
-
- err = reftable_iterator_next_ref(&it, ref);
- if (err)
- goto done;
-
- if (strcmp(ref->refname, name) ||
- reftable_ref_record_is_deletion(ref)) {
- reftable_ref_record_release(ref);
- err = 1;
- goto done;
- }
-
-done:
- reftable_iterator_destroy(&it);
- return err;
-}
-
-uint64_t reftable_table_max_update_index(struct reftable_table *tab)
-{
- return tab->ops->max_update_index(tab->table_arg);
-}
-
-uint64_t reftable_table_min_update_index(struct reftable_table *tab)
-{
- return tab->ops->min_update_index(tab->table_arg);
-}
-
-uint32_t reftable_table_hash_id(struct reftable_table *tab)
-{
- return tab->ops->hash_id(tab->table_arg);
-}
-
-void reftable_iterator_destroy(struct reftable_iterator *it)
-{
- if (!it->ops) {
- return;
- }
- it->ops->close(it->iter_arg);
- it->ops = NULL;
- FREE_AND_NULL(it->iter_arg);
-}
-
-int reftable_iterator_next_ref(struct reftable_iterator *it,
- struct reftable_ref_record *ref)
-{
- struct reftable_record rec = { NULL };
- reftable_record_from_ref(&rec, ref);
- return iterator_next(it, &rec);
-}
-
-int reftable_iterator_next_log(struct reftable_iterator *it,
- struct reftable_log_record *log)
-{
- struct reftable_record rec = { NULL };
- reftable_record_from_log(&rec, log);
- return iterator_next(it, &rec);
-}
-
-int iterator_next(struct reftable_iterator *it, struct reftable_record *rec)
-{
- return it->ops->next(it->iter_arg, rec);
-}
-
-static int empty_iterator_next(void *arg, struct reftable_record *rec)
-{
- return 1;
-}
-
-static void empty_iterator_close(void *arg)
-{
-}
-
-static struct reftable_iterator_vtable empty_vtable = {
- .next = &empty_iterator_next,
- .close = &empty_iterator_close,
-};
-
-void iterator_set_empty(struct reftable_iterator *it)
-{
- assert(!it->ops);
- it->iter_arg = NULL;
- it->ops = &empty_vtable;
-}
diff --git a/reftable/stack.c b/reftable/stack.c
index 56bf5f2..ddbdf1b 100644
--- a/reftable/stack.c
+++ b/reftable/stack.c
@@ -889,7 +889,7 @@ static int stack_compact_range(struct reftable_stack *st, int first, int last,
struct strbuf new_table_path = STRBUF_INIT;
int err = 0;
int have_lock = 0;
- int lock_file_fd = 0;
+ int lock_file_fd = -1;
int compact_count = last - first + 1;
char **listp = NULL;
char **delete_on_success =
@@ -923,7 +923,7 @@ static int stack_compact_range(struct reftable_stack *st, int first, int last,
}
/* Don't want to write to the lock for now. */
close(lock_file_fd);
- lock_file_fd = 0;
+ lock_file_fd = -1;
have_lock = 1;
err = stack_uptodate(st);
@@ -1031,7 +1031,7 @@ static int stack_compact_range(struct reftable_stack *st, int first, int last,
goto done;
}
err = close(lock_file_fd);
- lock_file_fd = 0;
+ lock_file_fd = -1;
if (err < 0) {
err = REFTABLE_IO_ERROR;
unlink(new_table_path.buf);
@@ -1068,9 +1068,9 @@ done:
listp++;
}
free_names(subtable_locks);
- if (lock_file_fd > 0) {
+ if (lock_file_fd >= 0) {
close(lock_file_fd);
- lock_file_fd = 0;
+ lock_file_fd = -1;
}
if (have_lock) {
unlink(lock_file_name.buf);
diff --git a/reftable/stack_test.c b/reftable/stack_test.c
index f4c743d..19fe4e2 100644
--- a/reftable/stack_test.c
+++ b/reftable/stack_test.c
@@ -90,7 +90,7 @@ static void test_read_file(void)
EXPECT(0 == strcmp(want[i], names[i]));
}
free_names(names);
- remove(fn);
+ (void) remove(fn);
}
static void test_parse_names(void)
@@ -839,6 +839,7 @@ static void test_reftable_stack_auto_compaction(void)
EXPECT_ERR(err);
err = reftable_stack_auto_compact(st);
+ EXPECT_ERR(err);
EXPECT(i < 3 || st->merged->stack_len < 2 * fastlog2(i));
}
diff --git a/reftable/system.h b/reftable/system.h
index 4907306..18f9207 100644
--- a/reftable/system.h
+++ b/reftable/system.h
@@ -16,17 +16,6 @@ https://developers.google.com/open-source/licenses/bsd
#include "hash.h" /* hash ID, sizes.*/
#include "dir.h" /* remove_dir_recursively, for tests.*/
-#include <zlib.h>
-
-#ifdef NO_UNCOMPRESS2
-/*
- * This is uncompress2, which is only available in zlib >= 1.2.9
- * (released as of early 2017)
- */
-int uncompress2(Bytef *dest, uLongf *destLen, const Bytef *source,
- uLong *sourceLen);
-#endif
-
int hash_size(uint32_t id);
#endif
diff --git a/reftable/writer.c b/reftable/writer.c
index 35c8649..427f131 100644
--- a/reftable/writer.c
+++ b/reftable/writer.c
@@ -150,6 +150,8 @@ void reftable_writer_set_limits(struct reftable_writer *w, uint64_t min,
void reftable_writer_free(struct reftable_writer *w)
{
+ if (!w)
+ return;
reftable_free(w->block);
reftable_free(w);
}
@@ -238,14 +240,13 @@ static int writer_add_record(struct reftable_writer *w,
writer_reinit_block_writer(w, reftable_record_type(rec));
err = block_writer_add(w->block_writer, rec);
- if (err < 0) {
+ if (err == -1) {
/* we are writing into memory, so an error can only mean it
* doesn't fit. */
err = REFTABLE_ENTRY_TOO_BIG_ERROR;
goto done;
}
- err = 0;
done:
strbuf_release(&key);
return err;
@@ -254,8 +255,12 @@ done:
int reftable_writer_add_ref(struct reftable_writer *w,
struct reftable_ref_record *ref)
{
- struct reftable_record rec = { NULL };
- struct reftable_ref_record copy = *ref;
+ struct reftable_record rec = {
+ .type = BLOCK_TYPE_REF,
+ .u = {
+ .ref = *ref
+ },
+ };
int err = 0;
if (ref->refname == NULL)
@@ -264,8 +269,7 @@ int reftable_writer_add_ref(struct reftable_writer *w,
ref->update_index > w->max_update_index)
return REFTABLE_API_ERROR;
- reftable_record_from_ref(&rec, &copy);
- copy.update_index -= w->min_update_index;
+ rec.u.ref.update_index -= w->min_update_index;
err = writer_add_record(w, &rec);
if (err < 0)
@@ -304,7 +308,12 @@ int reftable_writer_add_refs(struct reftable_writer *w,
static int reftable_writer_add_log_verbatim(struct reftable_writer *w,
struct reftable_log_record *log)
{
- struct reftable_record rec = { NULL };
+ struct reftable_record rec = {
+ .type = BLOCK_TYPE_LOG,
+ .u = {
+ .log = *log,
+ },
+ };
if (w->block_writer &&
block_writer_type(w->block_writer) == BLOCK_TYPE_REF) {
int err = writer_finish_public_section(w);
@@ -314,8 +323,6 @@ static int reftable_writer_add_log_verbatim(struct reftable_writer *w,
w->next -= w->pending_padding;
w->pending_padding = 0;
-
- reftable_record_from_log(&rec, log);
return writer_add_record(w, &rec);
}
@@ -396,8 +403,12 @@ static int writer_finish_section(struct reftable_writer *w)
w->index_len = 0;
w->index_cap = 0;
for (i = 0; i < idx_len; i++) {
- struct reftable_record rec = { NULL };
- reftable_record_from_index(&rec, idx + i);
+ struct reftable_record rec = {
+ .type = BLOCK_TYPE_INDEX,
+ .u = {
+ .idx = idx[i],
+ },
+ };
if (block_writer_add(w->block_writer, &rec) == 0) {
continue;
}
@@ -465,17 +476,17 @@ static void write_object_record(void *void_arg, void *key)
{
struct write_record_arg *arg = void_arg;
struct obj_index_tree_node *entry = key;
- struct reftable_obj_record obj_rec = {
- .hash_prefix = (uint8_t *)entry->hash.buf,
- .hash_prefix_len = arg->w->stats.object_id_len,
- .offsets = entry->offsets,
- .offset_len = entry->offset_len,
- };
- struct reftable_record rec = { NULL };
+ struct reftable_record
+ rec = { .type = BLOCK_TYPE_OBJ,
+ .u.obj = {
+ .hash_prefix = (uint8_t *)entry->hash.buf,
+ .hash_prefix_len = arg->w->stats.object_id_len,
+ .offsets = entry->offsets,
+ .offset_len = entry->offset_len,
+ } };
if (arg->err < 0)
goto done;
- reftable_record_from_obj(&rec, &obj_rec);
arg->err = block_writer_add(arg->w->block_writer, &rec);
if (arg->err == 0)
goto done;
@@ -488,7 +499,8 @@ static void write_object_record(void *void_arg, void *key)
arg->err = block_writer_add(arg->w->block_writer, &rec);
if (arg->err == 0)
goto done;
- obj_rec.offset_len = 0;
+
+ rec.u.obj.offset_len = 0;
arg->err = block_writer_add(arg->w->block_writer, &rec);
/* Should be able to write into a fresh block. */
@@ -509,7 +521,9 @@ static void object_record_free(void *void_arg, void *key)
static int writer_dump_object_index(struct reftable_writer *w)
{
struct write_record_arg closure = { .w = w };
- struct common_prefix_arg common = { NULL };
+ struct common_prefix_arg common = {
+ .max = 1, /* obj_id_len should be >= 2. */
+ };
if (w->obj_index_tree) {
infix_walk(w->obj_index_tree, &update_common, &common);
}
@@ -687,7 +701,7 @@ static int writer_flush_block(struct reftable_writer *w)
return writer_flush_nonempty_block(w);
}
-const struct reftable_stats *writer_stats(struct reftable_writer *w)
+const struct reftable_stats *reftable_writer_stats(struct reftable_writer *w)
{
return &w->stats;
}
diff --git a/remote-curl.c b/remote-curl.c
index 0dabef2..67f178b 100644
--- a/remote-curl.c
+++ b/remote-curl.c
@@ -43,6 +43,7 @@ struct options {
/* see documentation of corresponding flag in fetch-pack.h */
from_promisor : 1,
+ refetch : 1,
atomic : 1,
object_format : 1,
force_if_includes : 1;
@@ -198,6 +199,9 @@ static int set_option(const char *name, const char *value)
} else if (!strcmp(name, "from-promisor")) {
options.from_promisor = 1;
return 0;
+ } else if (!strcmp(name, "refetch")) {
+ options.refetch = 1;
+ return 0;
} else if (!strcmp(name, "filter")) {
options.filter = xstrdup(value);
return 0;
@@ -1182,6 +1186,8 @@ static int fetch_git(struct discovery *heads,
strvec_push(&args, "--deepen-relative");
if (options.from_promisor)
strvec_push(&args, "--from-promisor");
+ if (options.refetch)
+ strvec_push(&args, "--refetch");
if (options.filter)
strvec_pushf(&args, "--filter=%s", options.filter);
strvec_push(&args, url.buf);
@@ -1472,11 +1478,12 @@ int cmd_main(int argc, const char **argv)
{
struct strbuf buf = STRBUF_INIT;
int nongit;
+ int ret = 1;
setup_git_directory_gently(&nongit);
if (argc < 2) {
error(_("remote-curl: usage: git remote-curl <remote> [<url>]"));
- return 1;
+ goto cleanup;
}
options.verbosity = 1;
@@ -1508,7 +1515,7 @@ int cmd_main(int argc, const char **argv)
if (strbuf_getline_lf(&buf, stdin) == EOF) {
if (ferror(stdin))
error(_("remote-curl: error reading command stream from git"));
- return 1;
+ goto cleanup;
}
if (buf.len == 0)
break;
@@ -1556,12 +1563,15 @@ int cmd_main(int argc, const char **argv)
break;
} else {
error(_("remote-curl: unknown command '%s' from git"), buf.buf);
- return 1;
+ goto cleanup;
}
strbuf_reset(&buf);
} while (1);
http_cleanup();
+ ret = 0;
+cleanup:
+ strbuf_release(&buf);
- return 0;
+ return ret;
}
diff --git a/remote.c b/remote.c
index a6d8ec6..42a4e71 100644
--- a/remote.c
+++ b/remote.c
@@ -508,9 +508,8 @@ static void read_config(struct repository *repo)
repo->remote_state->current_branch = NULL;
if (startup_info->have_repository) {
- int ignore_errno;
const char *head_ref = refs_resolve_ref_unsafe(
- get_main_ref_store(repo), "HEAD", 0, NULL, &flag, &ignore_errno);
+ get_main_ref_store(repo), "HEAD", 0, NULL, &flag);
if (head_ref && (flag & REF_ISSYMREF) &&
skip_prefix(head_ref, "refs/heads/", &head_ref)) {
repo->remote_state->current_branch = make_branch(
@@ -1946,13 +1945,9 @@ const char *branch_get_push(struct branch *branch, struct strbuf *err)
return branch->push_tracking_ref;
}
-static int ignore_symref_update(const char *refname)
+static int ignore_symref_update(const char *refname, struct strbuf *scratch)
{
- int flag;
-
- if (!resolve_ref_unsafe(refname, 0, NULL, &flag))
- return 0; /* non-existing refs are OK */
- return (flag & REF_ISSYMREF);
+ return !refs_read_symbolic_ref(get_main_ref_store(the_repository), refname, scratch);
}
/*
@@ -1965,6 +1960,7 @@ static int ignore_symref_update(const char *refname)
static struct ref *get_expanded_map(const struct ref *remote_refs,
const struct refspec_item *refspec)
{
+ struct strbuf scratch = STRBUF_INIT;
const struct ref *ref;
struct ref *ret = NULL;
struct ref **tail = &ret;
@@ -1972,11 +1968,13 @@ static struct ref *get_expanded_map(const struct ref *remote_refs,
for (ref = remote_refs; ref; ref = ref->next) {
char *expn_name = NULL;
+ strbuf_reset(&scratch);
+
if (strchr(ref->name, '^'))
continue; /* a dereference item */
if (match_name_with_pattern(refspec->src, ref->name,
refspec->dst, &expn_name) &&
- !ignore_symref_update(expn_name)) {
+ !ignore_symref_update(expn_name, &scratch)) {
struct ref *cpy = copy_ref(ref);
cpy->peer_ref = alloc_ref(expn_name);
@@ -1988,6 +1986,7 @@ static struct ref *get_expanded_map(const struct ref *remote_refs,
free(expn_name);
}
+ strbuf_release(&scratch);
return ret;
}
diff --git a/repo-settings.c b/repo-settings.c
index 00ca557..2dfcb2b 100644
--- a/repo-settings.c
+++ b/repo-settings.c
@@ -2,6 +2,7 @@
#include "config.h"
#include "repository.h"
#include "midx.h"
+#include "compat/fsmonitor/fsm-listen.h"
static void repo_cfg_bool(struct repository *r, const char *key, int *dest,
int def)
@@ -26,7 +27,7 @@ void prepare_repo_settings(struct repository *r)
/* Defaults */
r->settings.index_version = -1;
r->settings.core_untracked_cache = UNTRACKED_CACHE_KEEP;
- r->settings.fetch_negotiation_algorithm = FETCH_NEGOTIATION_DEFAULT;
+ r->settings.fetch_negotiation_algorithm = FETCH_NEGOTIATION_CONSECUTIVE;
/* Booleans config or default, cascades to other settings */
repo_cfg_bool(r, "feature.manyfiles", &manyfiles, 0);
@@ -81,10 +82,17 @@ void prepare_repo_settings(struct repository *r)
}
if (!repo_config_get_string(r, "fetch.negotiationalgorithm", &strval)) {
+ int fetch_default = r->settings.fetch_negotiation_algorithm;
if (!strcasecmp(strval, "skipping"))
r->settings.fetch_negotiation_algorithm = FETCH_NEGOTIATION_SKIPPING;
else if (!strcasecmp(strval, "noop"))
r->settings.fetch_negotiation_algorithm = FETCH_NEGOTIATION_NOOP;
+ else if (!strcasecmp(strval, "consecutive"))
+ r->settings.fetch_negotiation_algorithm = FETCH_NEGOTIATION_CONSECUTIVE;
+ else if (!strcasecmp(strval, "default"))
+ r->settings.fetch_negotiation_algorithm = fetch_default;
+ else
+ die("unknown fetch negotiation algorithm '%s'", strval);
}
/*
diff --git a/repository.c b/repository.c
index 34610c5..5d166b6 100644
--- a/repository.c
+++ b/repository.c
@@ -240,6 +240,20 @@ out:
return ret;
}
+static void repo_clear_path_cache(struct repo_path_cache *cache)
+{
+ FREE_AND_NULL(cache->squash_msg);
+ FREE_AND_NULL(cache->squash_msg);
+ FREE_AND_NULL(cache->merge_msg);
+ FREE_AND_NULL(cache->merge_rr);
+ FREE_AND_NULL(cache->merge_mode);
+ FREE_AND_NULL(cache->merge_head);
+ FREE_AND_NULL(cache->merge_autostash);
+ FREE_AND_NULL(cache->auto_merge);
+ FREE_AND_NULL(cache->fetch_head);
+ FREE_AND_NULL(cache->shallow);
+}
+
void repo_clear(struct repository *repo)
{
FREE_AND_NULL(repo->gitdir);
@@ -280,6 +294,8 @@ void repo_clear(struct repository *repo)
remote_state_clear(repo->remote_state);
FREE_AND_NULL(repo->remote_state);
}
+
+ repo_clear_path_cache(&repo->cached_paths);
}
int repo_read_index(struct repository *repo)
@@ -301,6 +317,13 @@ int repo_read_index(struct repository *repo)
if (repo->settings.command_requires_full_index)
ensure_full_index(repo->index);
+ /*
+ * If sparse checkouts are in use, check whether paths with the
+ * SKIP_WORKTREE attribute are missing from the worktree; if not,
+ * clear that attribute for that path.
+ */
+ clear_skip_worktree_from_present_files(repo->index);
+
return res;
}
diff --git a/repository.h b/repository.h
index 2b5cf97..6cc661e 100644
--- a/repository.h
+++ b/repository.h
@@ -4,6 +4,7 @@
#include "path.h"
struct config_set;
+struct fsmonitor_settings;
struct git_hash_algo;
struct index_state;
struct lock_file;
@@ -20,7 +21,7 @@ enum untracked_cache_setting {
};
enum fetch_negotiation_setting {
- FETCH_NEGOTIATION_DEFAULT,
+ FETCH_NEGOTIATION_CONSECUTIVE,
FETCH_NEGOTIATION_SKIPPING,
FETCH_NEGOTIATION_NOOP,
};
@@ -35,6 +36,8 @@ struct repo_settings {
int command_requires_full_index;
int sparse_index;
+ struct fsmonitor_settings *fsmonitor; /* lazily loaded */
+
int index_version;
enum untracked_cache_setting core_untracked_cache;
@@ -44,6 +47,18 @@ struct repo_settings {
int core_multi_pack_index;
};
+struct repo_path_cache {
+ char *squash_msg;
+ char *merge_msg;
+ char *merge_rr;
+ char *merge_mode;
+ char *merge_head;
+ char *merge_autostash;
+ char *auto_merge;
+ char *fetch_head;
+ char *shallow;
+};
+
struct repository {
/* Environment */
/*
@@ -82,7 +97,7 @@ struct repository {
/*
* Contains path to often used file names.
*/
- struct path_cache cached_paths;
+ struct repo_path_cache cached_paths;
/*
* Path to the repository's graft file.
diff --git a/rerere.c b/rerere.c
index d83d58d..d26627c 100644
--- a/rerere.c
+++ b/rerere.c
@@ -609,19 +609,20 @@ static int try_merge(struct index_state *istate,
const struct rerere_id *id, const char *path,
mmfile_t *cur, mmbuffer_t *result)
{
- int ret;
+ enum ll_merge_result ret;
mmfile_t base = {NULL, 0}, other = {NULL, 0};
if (read_mmfile(&base, rerere_path(id, "preimage")) ||
- read_mmfile(&other, rerere_path(id, "postimage")))
- ret = 1;
- else
+ read_mmfile(&other, rerere_path(id, "postimage"))) {
+ ret = LL_MERGE_CONFLICT;
+ } else {
/*
* A three-way merge. Note that this honors user-customizable
* low-level merge driver settings.
*/
ret = ll_merge(result, path, &base, NULL, cur, "", &other, "",
istate, NULL);
+ }
free(base.ptr);
free(other.ptr);
diff --git a/reset.c b/reset.c
index f214df3..e3383a9 100644
--- a/reset.c
+++ b/reset.c
@@ -7,38 +7,108 @@
#include "tree-walk.h"
#include "tree.h"
#include "unpack-trees.h"
+#include "hook.h"
-int reset_head(struct repository *r, struct object_id *oid, const char *action,
- const char *switch_to_branch, unsigned flags,
- const char *reflog_orig_head, const char *reflog_head,
- const char *default_reflog_action)
+static int update_refs(const struct reset_head_opts *opts,
+ const struct object_id *oid,
+ const struct object_id *head)
{
- unsigned detach_head = flags & RESET_HEAD_DETACH;
- unsigned reset_hard = flags & RESET_HEAD_HARD;
- unsigned run_hook = flags & RESET_HEAD_RUN_POST_CHECKOUT_HOOK;
- unsigned refs_only = flags & RESET_HEAD_REFS_ONLY;
- unsigned update_orig_head = flags & RESET_ORIG_HEAD;
- struct object_id head_oid;
+ unsigned detach_head = opts->flags & RESET_HEAD_DETACH;
+ unsigned run_hook = opts->flags & RESET_HEAD_RUN_POST_CHECKOUT_HOOK;
+ unsigned update_orig_head = opts->flags & RESET_ORIG_HEAD;
+ const struct object_id *orig_head = opts->orig_head;
+ const char *switch_to_branch = opts->branch;
+ const char *reflog_branch = opts->branch_msg;
+ const char *reflog_head = opts->head_msg;
+ const char *reflog_orig_head = opts->orig_head_msg;
+ const char *default_reflog_action = opts->default_reflog_action;
+ struct object_id *old_orig = NULL, oid_old_orig;
+ struct strbuf msg = STRBUF_INIT;
+ const char *reflog_action;
+ size_t prefix_len;
+ int ret;
+
+ if ((update_orig_head && !reflog_orig_head) || !reflog_head) {
+ if (!default_reflog_action)
+ BUG("default_reflog_action must be given when reflog messages are omitted");
+ reflog_action = getenv(GIT_REFLOG_ACTION_ENVIRONMENT);
+ strbuf_addf(&msg, "%s: ", reflog_action ? reflog_action :
+ default_reflog_action);
+ }
+ prefix_len = msg.len;
+
+ if (update_orig_head) {
+ if (!get_oid("ORIG_HEAD", &oid_old_orig))
+ old_orig = &oid_old_orig;
+ if (head) {
+ if (!reflog_orig_head) {
+ strbuf_addstr(&msg, "updating ORIG_HEAD");
+ reflog_orig_head = msg.buf;
+ }
+ update_ref(reflog_orig_head, "ORIG_HEAD",
+ orig_head ? orig_head : head,
+ old_orig, 0, UPDATE_REFS_MSG_ON_ERR);
+ } else if (old_orig)
+ delete_ref(NULL, "ORIG_HEAD", old_orig, 0);
+ }
+
+ if (!reflog_head) {
+ strbuf_setlen(&msg, prefix_len);
+ strbuf_addstr(&msg, "updating HEAD");
+ reflog_head = msg.buf;
+ }
+ if (!switch_to_branch)
+ ret = update_ref(reflog_head, "HEAD", oid, head,
+ detach_head ? REF_NO_DEREF : 0,
+ UPDATE_REFS_MSG_ON_ERR);
+ else {
+ ret = update_ref(reflog_branch ? reflog_branch : reflog_head,
+ switch_to_branch, oid, NULL, 0,
+ UPDATE_REFS_MSG_ON_ERR);
+ if (!ret)
+ ret = create_symref("HEAD", switch_to_branch,
+ reflog_head);
+ }
+ if (!ret && run_hook)
+ run_hooks_l("post-checkout",
+ oid_to_hex(head ? head : null_oid()),
+ oid_to_hex(oid), "1", NULL);
+ strbuf_release(&msg);
+ return ret;
+}
+
+int reset_head(struct repository *r, const struct reset_head_opts *opts)
+{
+ const struct object_id *oid = opts->oid;
+ const char *switch_to_branch = opts->branch;
+ unsigned reset_hard = opts->flags & RESET_HEAD_HARD;
+ unsigned refs_only = opts->flags & RESET_HEAD_REFS_ONLY;
+ unsigned update_orig_head = opts->flags & RESET_ORIG_HEAD;
+ struct object_id *head = NULL, head_oid;
struct tree_desc desc[2] = { { NULL }, { NULL } };
struct lock_file lock = LOCK_INIT;
struct unpack_trees_options unpack_tree_opts = { 0 };
struct tree *tree;
- const char *reflog_action;
- struct strbuf msg = STRBUF_INIT;
- size_t prefix_len;
- struct object_id *orig = NULL, oid_orig,
- *old_orig = NULL, oid_old_orig;
+ const char *action;
int ret = 0, nr = 0;
if (switch_to_branch && !starts_with(switch_to_branch, "refs/"))
BUG("Not a fully qualified branch: '%s'", switch_to_branch);
+ if (opts->orig_head_msg && !update_orig_head)
+ BUG("ORIG_HEAD reflog message given without updating ORIG_HEAD");
+
+ if (opts->branch_msg && !opts->branch)
+ BUG("branch reflog message given without a branch");
+
if (!refs_only && repo_hold_locked_index(r, &lock, LOCK_REPORT_ON_ERROR) < 0) {
ret = -1;
goto leave_reset_head;
}
- if ((!oid || !reset_hard) && get_oid("HEAD", &head_oid)) {
+ if (!get_oid("HEAD", &head_oid)) {
+ head = &head_oid;
+ } else if (!oid || !reset_hard) {
ret = error(_("could not determine HEAD revision"));
goto leave_reset_head;
}
@@ -47,8 +117,9 @@ int reset_head(struct repository *r, struct object_id *oid, const char *action,
oid = &head_oid;
if (refs_only)
- goto reset_head_refs;
+ return update_refs(opts, oid, head);
+ action = reset_hard ? "reset" : "checkout";
setup_unpack_trees_porcelain(&unpack_tree_opts, action);
unpack_tree_opts.head_idx = 1;
unpack_tree_opts.src_index = r->index;
@@ -58,7 +129,7 @@ int reset_head(struct repository *r, struct object_id *oid, const char *action,
unpack_tree_opts.merge = 1;
unpack_tree_opts.preserve_ignored = 0; /* FIXME: !overwrite_ignore */
init_checkout_metadata(&unpack_tree_opts.meta, switch_to_branch, oid, NULL);
- if (!detach_head)
+ if (reset_hard)
unpack_tree_opts.reset = UNPACK_RESET_PROTECT_UNTRACKED;
if (repo_read_index_unmerged(r) < 0) {
@@ -90,49 +161,10 @@ int reset_head(struct repository *r, struct object_id *oid, const char *action,
goto leave_reset_head;
}
-reset_head_refs:
- reflog_action = getenv(GIT_REFLOG_ACTION_ENVIRONMENT);
- strbuf_addf(&msg, "%s: ", reflog_action ? reflog_action : default_reflog_action);
- prefix_len = msg.len;
-
- if (update_orig_head) {
- if (!get_oid("ORIG_HEAD", &oid_old_orig))
- old_orig = &oid_old_orig;
- if (!get_oid("HEAD", &oid_orig)) {
- orig = &oid_orig;
- if (!reflog_orig_head) {
- strbuf_addstr(&msg, "updating ORIG_HEAD");
- reflog_orig_head = msg.buf;
- }
- update_ref(reflog_orig_head, "ORIG_HEAD", orig,
- old_orig, 0, UPDATE_REFS_MSG_ON_ERR);
- } else if (old_orig)
- delete_ref(NULL, "ORIG_HEAD", old_orig, 0);
- }
-
- if (!reflog_head) {
- strbuf_setlen(&msg, prefix_len);
- strbuf_addstr(&msg, "updating HEAD");
- reflog_head = msg.buf;
- }
- if (!switch_to_branch)
- ret = update_ref(reflog_head, "HEAD", oid, orig,
- detach_head ? REF_NO_DEREF : 0,
- UPDATE_REFS_MSG_ON_ERR);
- else {
- ret = update_ref(reflog_head, switch_to_branch, oid,
- NULL, 0, UPDATE_REFS_MSG_ON_ERR);
- if (!ret)
- ret = create_symref("HEAD", switch_to_branch,
- reflog_head);
- }
- if (run_hook)
- run_hook_le(NULL, "post-checkout",
- oid_to_hex(orig ? orig : null_oid()),
- oid_to_hex(oid), "1", NULL);
+ if (oid != &head_oid || update_orig_head || switch_to_branch)
+ ret = update_refs(opts, oid, head);
leave_reset_head:
- strbuf_release(&msg);
rollback_lock_file(&lock);
clear_unpack_trees_porcelain(&unpack_tree_opts);
while (nr)
diff --git a/reset.h b/reset.h
index 12f83c7..a28f818 100644
--- a/reset.h
+++ b/reset.h
@@ -6,15 +6,55 @@
#define GIT_REFLOG_ACTION_ENVIRONMENT "GIT_REFLOG_ACTION"
+/* Request a detached checkout */
#define RESET_HEAD_DETACH (1<<0)
+/* Request a reset rather than a checkout */
#define RESET_HEAD_HARD (1<<1)
+/* Run the post-checkout hook */
#define RESET_HEAD_RUN_POST_CHECKOUT_HOOK (1<<2)
+/* Only update refs, do not touch the worktree */
#define RESET_HEAD_REFS_ONLY (1<<3)
+/* Update ORIG_HEAD as well as HEAD */
#define RESET_ORIG_HEAD (1<<4)
-int reset_head(struct repository *r, struct object_id *oid, const char *action,
- const char *switch_to_branch, unsigned flags,
- const char *reflog_orig_head, const char *reflog_head,
- const char *default_reflog_action);
+struct reset_head_opts {
+ /*
+ * The commit to checkout/reset to. Defaults to HEAD.
+ */
+ const struct object_id *oid;
+ /*
+ * Optional value to set ORIG_HEAD. Defaults to HEAD.
+ */
+ const struct object_id *orig_head;
+ /*
+ * Optional branch to switch to.
+ */
+ const char *branch;
+ /*
+ * Flags defined above.
+ */
+ unsigned flags;
+ /*
+ * Optional reflog message for branch, defaults to head_msg.
+ */
+ const char *branch_msg;
+ /*
+ * Optional reflog message for HEAD, if this omitted but oid or branch
+ * are given then default_reflog_action must be given.
+ */
+ const char *head_msg;
+ /*
+ * Optional reflog message for ORIG_HEAD, if this omitted and flags
+ * contains RESET_ORIG_HEAD then default_reflog_action must be given.
+ */
+ const char *orig_head_msg;
+ /*
+ * Action to use in default reflog messages, only required if a ref is
+ * being updated and the reflog messages above are omitted.
+ */
+ const char *default_reflog_action;
+};
+
+int reset_head(struct repository *r, const struct reset_head_opts *opts);
#endif
diff --git a/revision.c b/revision.c
index ad4286f..7d435f8 100644
--- a/revision.c
+++ b/revision.c
@@ -32,6 +32,7 @@
#include "utf8.h"
#include "bloom.h"
#include "json-writer.h"
+#include "list-objects-filter-options.h"
volatile show_early_output_fn_t show_early_output;
@@ -273,7 +274,7 @@ static void commit_stack_clear(struct commit_stack *stack)
stack->nr = stack->alloc = 0;
}
-static void mark_one_parent_uninteresting(struct commit *commit,
+static void mark_one_parent_uninteresting(struct rev_info *revs, struct commit *commit,
struct commit_stack *pending)
{
struct commit_list *l;
@@ -290,20 +291,26 @@ static void mark_one_parent_uninteresting(struct commit *commit,
* wasn't uninteresting), in which case we need
* to mark its parents recursively too..
*/
- for (l = commit->parents; l; l = l->next)
+ for (l = commit->parents; l; l = l->next) {
commit_stack_push(pending, l->item);
+ if (revs && revs->exclude_first_parent_only)
+ break;
+ }
}
-void mark_parents_uninteresting(struct commit *commit)
+void mark_parents_uninteresting(struct rev_info *revs, struct commit *commit)
{
struct commit_stack pending = COMMIT_STACK_INIT;
struct commit_list *l;
- for (l = commit->parents; l; l = l->next)
- mark_one_parent_uninteresting(l->item, &pending);
+ for (l = commit->parents; l; l = l->next) {
+ mark_one_parent_uninteresting(revs, l->item, &pending);
+ if (revs && revs->exclude_first_parent_only)
+ break;
+ }
while (pending.nr > 0)
- mark_one_parent_uninteresting(commit_stack_pop(&pending),
+ mark_one_parent_uninteresting(revs, commit_stack_pop(&pending),
&pending);
commit_stack_clear(&pending);
@@ -441,7 +448,7 @@ static struct commit *handle_commit(struct rev_info *revs,
if (repo_parse_commit(revs->repo, commit) < 0)
die("unable to parse commit %s", name);
if (flags & UNINTERESTING) {
- mark_parents_uninteresting(commit);
+ mark_parents_uninteresting(revs, commit);
if (!revs->topo_order || !generation_numbers_enabled(the_repository))
revs->limited = 1;
@@ -1124,7 +1131,7 @@ static int process_parents(struct rev_info *revs, struct commit *commit,
if (repo_parse_commit_gently(revs->repo, p, 1) < 0)
continue;
if (p->parents)
- mark_parents_uninteresting(p);
+ mark_parents_uninteresting(revs, p);
if (p->object.flags & SEEN)
continue;
p->object.flags |= (SEEN | NOT_USER_GIVEN);
@@ -1132,6 +1139,8 @@ static int process_parents(struct rev_info *revs, struct commit *commit,
commit_list_insert_by_date(p, list);
if (queue)
prio_queue_put(queue, p);
+ if (revs->exclude_first_parent_only)
+ break;
}
return 0;
}
@@ -1422,7 +1431,7 @@ static int limit_list(struct rev_info *revs)
if (process_parents(revs, commit, &original_list, NULL) < 0)
return -1;
if (obj->flags & UNINTERESTING) {
- mark_parents_uninteresting(commit);
+ mark_parents_uninteresting(revs, commit);
slop = still_interesting(original_list, date, slop, &interesting_cache);
if (slop)
continue;
@@ -1838,7 +1847,7 @@ void repo_init_revisions(struct repository *r,
revs->commit_format = CMIT_FMT_DEFAULT;
revs->expand_tabs_in_log_default = 8;
- grep_init(&revs->grep_filter, revs->repo, prefix);
+ grep_init(&revs->grep_filter, revs->repo);
revs->grep_filter.status_only = 1;
repo_diff_setup(revs->repo, &revs->diffopt);
@@ -2223,6 +2232,8 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg
return argcount;
} else if (!strcmp(arg, "--first-parent")) {
revs->first_parent_only = 1;
+ } else if (!strcmp(arg, "--exclude-first-parent-only")) {
+ revs->exclude_first_parent_only = 1;
} else if (!strcmp(arg, "--ancestry-path")) {
revs->ancestry_path = 1;
revs->simplify_history = 0;
@@ -2424,9 +2435,11 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg
revs->pretty_given = 1;
revs->abbrev_commit = 1;
} else if (!strcmp(arg, "--graph")) {
- revs->topo_order = 1;
- revs->rewrite_parents = 1;
+ graph_clear(revs->graph);
revs->graph = graph_init(revs);
+ } else if (!strcmp(arg, "--no-graph")) {
+ graph_clear(revs->graph);
+ revs->graph = NULL;
} else if (!strcmp(arg, "--encode-email-headers")) {
revs->encode_email_headers = 1;
} else if (!strcmp(arg, "--no-encode-email-headers")) {
@@ -2523,8 +2536,6 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg
unkv[(*unkc)++] = arg;
return opts;
}
- if (revs->graph && revs->track_linear)
- die(_("options '%s' and '%s' cannot be used together"), "--show-linear-break", "--graph");
return 1;
}
@@ -2543,6 +2554,17 @@ void parse_revision_opt(struct rev_info *revs, struct parse_opt_ctx_t *ctx,
ctx->argc -= n;
}
+void revision_opts_finish(struct rev_info *revs)
+{
+ if (revs->graph && revs->track_linear)
+ die(_("options '%s' and '%s' cannot be used together"), "--show-linear-break", "--graph");
+
+ if (revs->graph) {
+ revs->topo_order = 1;
+ revs->rewrite_parents = 1;
+ }
+}
+
static int for_each_bisect_ref(struct ref_store *refs, each_ref_fn fn,
void *cb_data, const char *term)
{
@@ -2669,6 +2691,10 @@ static int handle_revision_pseudo_opt(struct rev_info *revs,
revs->no_walk = 0;
} else if (!strcmp(arg, "--single-worktree")) {
revs->single_worktree = 1;
+ } else if (skip_prefix(arg, ("--filter="), &arg)) {
+ parse_list_objects_filter(&revs->filter, arg);
+ } else if (!strcmp(arg, ("--no-filter"))) {
+ list_objects_filter_set_no_filter(&revs->filter);
} else {
return 0;
}
@@ -2785,6 +2811,7 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s
break;
}
}
+ revision_opts_finish(revs);
if (prune_data.nr) {
/*
@@ -2860,8 +2887,6 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s
diff_setup_done(&revs->diffopt);
- grep_commit_pattern_type(GREP_PATTERN_TYPE_UNSPECIFIED,
- &revs->grep_filter);
if (!is_encoding_utf8(get_log_output_encoding()))
revs->grep_filter.ignore_locale = 1;
compile_grep_patterns(&revs->grep_filter);
@@ -2872,6 +2897,8 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s
die("cannot combine --walk-reflogs with history-limiting options");
if (revs->rewrite_parents && revs->children.name)
die(_("options '%s' and '%s' cannot be used together"), "--parents", "--children");
+ if (revs->filter.choice && !revs->blob_objects)
+ die(_("object filtering requires --objects"));
/*
* Limitations on the graph functionality
@@ -3345,7 +3372,7 @@ static void explore_walk_step(struct rev_info *revs)
return;
if (c->object.flags & UNINTERESTING)
- mark_parents_uninteresting(c);
+ mark_parents_uninteresting(revs, c);
for (p = c->parents; p; p = p->next)
test_flag_and_insert(&info->explore_queue, p->item, TOPO_WALK_EXPLORED);
diff --git a/revision.h b/revision.h
index 3f66147..5bc59c7 100644
--- a/revision.h
+++ b/revision.h
@@ -8,6 +8,7 @@
#include "pretty.h"
#include "diff.h"
#include "commit-slab-decl.h"
+#include "list-objects-filter-options.h"
/**
* The revision walking API offers functions to build a list of revisions
@@ -94,6 +95,12 @@ struct rev_info {
/* The end-points specified by the end user */
struct rev_cmdline_info cmdline;
+ /*
+ * Object filter options. No filtering is specified
+ * if and only if filter.choice is zero.
+ */
+ struct list_objects_filter_options filter;
+
/* excluding from --branches, --refs, etc. expansion */
struct string_list *ref_excludes;
@@ -158,6 +165,7 @@ struct rev_info {
bisect:1,
ancestry_path:1,
first_parent_only:1,
+ exclude_first_parent_only:1,
line_level_traverse:1,
tree_blobs_in_commit_order:1,
@@ -195,7 +203,8 @@ struct rev_info {
combine_merges:1,
combined_all_paths:1,
dense_combined_merges:1,
- first_parent_merges:1;
+ first_parent_merges:1,
+ remerge_diff:1;
/* Format info */
int show_notes;
@@ -315,6 +324,9 @@ struct rev_info {
/* misc. flags related to '--no-kept-objects' */
unsigned keep_pack_cache_flags;
+
+ /* Location where temporary objects for remerge-diff are written. */
+ struct tmp_objdir *remerge_objdir;
};
int ref_excluded(struct string_list *, const char *path);
@@ -372,6 +384,7 @@ void parse_revision_opt(struct rev_info *revs, struct parse_opt_ctx_t *ctx,
#define REVARG_COMMITTISH 02
int handle_revision_arg(const char *arg, struct rev_info *revs,
int flags, unsigned revarg_opt);
+void revision_opts_finish(struct rev_info *revs);
/**
* Reset the flags used by the revision walking api. You can use this to do
@@ -398,7 +411,7 @@ const char *get_revision_mark(const struct rev_info *revs,
void put_revision_mark(const struct rev_info *revs,
const struct commit *commit);
-void mark_parents_uninteresting(struct commit *commit);
+void mark_parents_uninteresting(struct rev_info *revs, struct commit *commit);
void mark_tree_uninteresting(struct repository *r, struct tree *tree);
void mark_trees_uninteresting_sparse(struct repository *r, struct oidset *trees);
diff --git a/run-command.c b/run-command.c
index 69dde42..a8501e3 100644
--- a/run-command.c
+++ b/run-command.c
@@ -1307,39 +1307,6 @@ int async_with_fork(void)
#endif
}
-int run_hook_ve(const char *const *env, const char *name, va_list args)
-{
- struct child_process hook = CHILD_PROCESS_INIT;
- const char *p;
-
- p = find_hook(name);
- if (!p)
- return 0;
-
- strvec_push(&hook.args, p);
- while ((p = va_arg(args, const char *)))
- strvec_push(&hook.args, p);
- if (env)
- strvec_pushv(&hook.env_array, (const char **)env);
- hook.no_stdin = 1;
- hook.stdout_to_stderr = 1;
- hook.trace2_hook_name = name;
-
- return run_command(&hook);
-}
-
-int run_hook_le(const char *const *env, const char *name, ...)
-{
- va_list args;
- int ret;
-
- va_start(args, name);
- ret = run_hook_ve(env, name, args);
- va_end(args);
-
- return ret;
-}
-
struct io_pump {
/* initialized by caller */
int fd;
diff --git a/run-command.h b/run-command.h
index 2be5f5d..07bed6c 100644
--- a/run-command.h
+++ b/run-command.h
@@ -220,23 +220,6 @@ int finish_command_in_signal(struct child_process *);
*/
int run_command(struct child_process *);
-/**
- * Run a hook.
- * The first argument is a pathname to an index file, or NULL
- * if the hook uses the default index file or no index is needed.
- * The second argument is the name of the hook.
- * The further arguments correspond to the hook arguments.
- * The last argument has to be NULL to terminate the arguments list.
- * If the hook does not exist or is not executable, the return
- * value will be zero.
- * If it is executable, the hook will be executed and the exit
- * status of the hook is returned.
- * On execution, .stdout_to_stderr and .no_stdin will be set.
- */
-LAST_ARG_MUST_BE_NULL
-int run_hook_le(const char *const *env, const char *name, ...);
-int run_hook_ve(const char *const *env, const char *name, va_list args);
-
/*
* Trigger an auto-gc
*/
diff --git a/sequencer.c b/sequencer.c
index 5213d16..a1bb393 100644
--- a/sequencer.c
+++ b/sequencer.c
@@ -1220,7 +1220,7 @@ static int run_prepare_commit_msg_hook(struct repository *r,
} else {
arg1 = "message";
}
- if (run_commit_hook(0, r->index_file, "prepare-commit-msg", name,
+ if (run_commit_hook(0, r->index_file, NULL, "prepare-commit-msg", name,
arg1, arg2, NULL))
ret = error(_("'prepare-commit-msg' hook failed"));
@@ -1281,7 +1281,6 @@ void print_commit_summary(struct repository *r,
struct strbuf author_ident = STRBUF_INIT;
struct strbuf committer_ident = STRBUF_INIT;
struct ref_store *refs;
- int resolve_errno;
commit = lookup_commit(r, oid);
if (!commit)
@@ -1332,12 +1331,9 @@ void print_commit_summary(struct repository *r,
diff_setup_done(&rev.diffopt);
refs = get_main_ref_store(the_repository);
- head = refs_resolve_ref_unsafe(refs, "HEAD", 0, NULL, NULL,
- &resolve_errno);
- if (!head) {
- errno = resolve_errno;
- die_errno(_("unable to resolve HEAD after creating commit"));
- }
+ head = refs_resolve_ref_unsafe(refs, "HEAD", 0, NULL, NULL);
+ if (!head)
+ die(_("unable to resolve HEAD after creating commit"));
if (!strcmp(head, "HEAD"))
head = _("detached HEAD");
else
@@ -1556,7 +1552,7 @@ static int try_to_commit(struct repository *r,
goto out;
}
- run_commit_hook(0, r->index_file, "post-commit", NULL);
+ run_commit_hook(0, r->index_file, NULL, "post-commit", NULL);
if (flags & AMEND_MSG)
commit_post_rewrite(r, current_head, oid);
@@ -2806,7 +2802,7 @@ static int populate_opts_cb(const char *key, const char *value, void *data)
return error(_("invalid key: %s"), key);
if (!error_flag)
- return error(_("invalid value for %s: %s"), key, value);
+ return error(_("invalid value for '%s': '%s'"), key, value);
return 0;
}
@@ -3588,7 +3584,7 @@ static int do_label(struct repository *r, const char *name, int len)
strbuf_addf(&ref_name, "refs/rewritten/%.*s", len, name);
strbuf_addf(&msg, "rebase (label) '%.*s'", len, name);
- transaction = ref_store_transaction_begin(refs, &err);
+ transaction = ref_store_transaction_begin(refs, 0, &err);
if (!transaction) {
error("%s", err.buf);
ret = -1;
@@ -3753,7 +3749,7 @@ static int do_merge(struct repository *r,
int run_commit_flags = 0;
struct strbuf ref_name = STRBUF_INIT;
struct commit *head_commit, *merge_commit, *i;
- struct commit_list *bases, *j, *reversed = NULL;
+ struct commit_list *bases, *j;
struct commit_list *to_merge = NULL, **tail = &to_merge;
const char *strategy = !opts->xopts_nr &&
(!opts->strategy ||
@@ -3988,9 +3984,7 @@ static int do_merge(struct repository *r,
git_path_merge_head(r), 0);
write_message("no-ff", 5, git_path_merge_mode(r), 0);
- for (j = bases; j; j = j->next)
- commit_list_insert(j->item, &reversed);
- free_commit_list(bases);
+ bases = reverse_commit_list(bases);
repo_read_index(r);
init_merge_options(&o, r);
@@ -4006,10 +4000,10 @@ static int do_merge(struct repository *r,
* update the index and working copy immediately.
*/
ret = merge_ort_recursive(&o,
- head_commit, merge_commit, reversed,
+ head_commit, merge_commit, bases,
&i);
} else {
- ret = merge_recursive(&o, head_commit, merge_commit, reversed,
+ ret = merge_recursive(&o, head_commit, merge_commit, bases,
&i);
}
if (ret <= 0)
@@ -4089,8 +4083,7 @@ static enum todo_command peek_command(struct todo_list *todo_list, int offset)
return -1;
}
-void create_autostash(struct repository *r, const char *path,
- const char *default_reflog_action)
+void create_autostash(struct repository *r, const char *path)
{
struct strbuf buf = STRBUF_INIT;
struct lock_file lock_file = LOCK_INIT;
@@ -4105,6 +4098,7 @@ void create_autostash(struct repository *r, const char *path,
if (has_unstaged_changes(r, 1) ||
has_uncommitted_changes(r, 1)) {
struct child_process stash = CHILD_PROCESS_INIT;
+ struct reset_head_opts ropts = { .flags = RESET_HEAD_HARD };
struct object_id oid;
strvec_pushl(&stash.args,
@@ -4126,11 +4120,8 @@ void create_autostash(struct repository *r, const char *path,
path);
write_file(path, "%s", oid_to_hex(&oid));
printf(_("Created autostash: %s\n"), buf.buf);
- if (reset_head(r, NULL, "reset --hard",
- NULL, RESET_HEAD_HARD, NULL, NULL,
- default_reflog_action) < 0)
+ if (reset_head(r, &ropts) < 0)
die(_("could not reset --hard"));
-
if (discard_index(r->index) < 0 ||
repo_read_index(r) < 0)
die(_("could not read index"));
@@ -4215,47 +4206,26 @@ int apply_autostash_oid(const char *stash_oid)
return apply_save_autostash_oid(stash_oid, 1);
}
-static int run_git_checkout(struct repository *r, struct replay_opts *opts,
- const char *commit, const char *action)
-{
- struct child_process cmd = CHILD_PROCESS_INIT;
- int ret;
-
- cmd.git_cmd = 1;
-
- if (startup_info->original_cwd) {
- cmd.dir = startup_info->original_cwd;
- strvec_pushf(&cmd.env_array, "%s=%s",
- GIT_WORK_TREE_ENVIRONMENT, r->worktree);
- }
- strvec_push(&cmd.args, "checkout");
- strvec_push(&cmd.args, commit);
- strvec_pushf(&cmd.env_array, GIT_REFLOG_ACTION "=%s", action);
-
- if (opts->verbose)
- ret = run_command(&cmd);
- else
- ret = run_command_silent_on_success(&cmd);
-
- if (!ret)
- discard_index(r->index);
-
- return ret;
-}
-
static int checkout_onto(struct repository *r, struct replay_opts *opts,
const char *onto_name, const struct object_id *onto,
const struct object_id *orig_head)
{
- const char *action = reflog_message(opts, "start", "checkout %s", onto_name);
-
- if (run_git_checkout(r, opts, oid_to_hex(onto), action)) {
+ struct reset_head_opts ropts = {
+ .oid = onto,
+ .orig_head = orig_head,
+ .flags = RESET_HEAD_DETACH | RESET_ORIG_HEAD |
+ RESET_HEAD_RUN_POST_CHECKOUT_HOOK,
+ .head_msg = reflog_message(opts, "start", "checkout %s",
+ onto_name),
+ .default_reflog_action = "rebase"
+ };
+ if (reset_head(r, &ropts)) {
apply_autostash(rebase_path_autostash());
sequencer_remove_state(opts);
return error(_("could not detach HEAD"));
}
- return update_ref(NULL, "ORIG_HEAD", orig_head, NULL, 0, UPDATE_REFS_MSG_ON_ERR);
+ return 0;
}
static int stopped_at_head(struct repository *r)
diff --git a/sequencer.h b/sequencer.h
index 05a7d2b..da64473 100644
--- a/sequencer.h
+++ b/sequencer.h
@@ -197,8 +197,7 @@ void commit_post_rewrite(struct repository *r,
const struct commit *current_head,
const struct object_id *new_head);
-void create_autostash(struct repository *r, const char *path,
- const char *default_reflog_action);
+void create_autostash(struct repository *r, const char *path);
int save_autostash(const char *path);
int apply_autostash(const char *path);
int apply_autostash_oid(const char *stash_oid);
diff --git a/setup.c b/setup.c
index b03c382..c8f67bf 100644
--- a/setup.c
+++ b/setup.c
@@ -560,7 +560,8 @@ static enum extension_result handle_extension(const char *var,
return config_error_nonbool(var);
format = hash_algo_by_name(value);
if (format == GIT_HASH_UNKNOWN)
- return error("invalid value for 'extensions.objectformat'");
+ return error(_("invalid value for '%s': '%s'"),
+ "extensions.objectformat", value);
data->hash_algo = format;
return EXTENSION_OK;
}
diff --git a/shallow.c b/shallow.c
index 9ed18eb..e158be5 100644
--- a/shallow.c
+++ b/shallow.c
@@ -90,6 +90,7 @@ static void reset_repository_shallow(struct repository *r)
{
r->parsed_objects->is_shallow = -1;
stat_validity_clear(r->parsed_objects->shallow_stat);
+ reset_commit_grafts(r);
}
int commit_shallow_file(struct repository *r, struct shallow_lock *lk)
@@ -603,7 +604,7 @@ static int mark_uninteresting(const char *refname, const struct object_id *oid,
if (!commit)
return 0;
commit->object.flags |= UNINTERESTING;
- mark_parents_uninteresting(commit);
+ mark_parents_uninteresting(NULL, commit);
return 0;
}
diff --git a/shared.mak b/shared.mak
new file mode 100644
index 0000000..50d4596
--- /dev/null
+++ b/shared.mak
@@ -0,0 +1,103 @@
+### Remove GNU make implicit rules
+
+## This speeds things up since we don't need to look for and stat() a
+## "foo.c,v" every time a rule referring to "foo.c" is in play. See
+## "make -p -f/dev/null | grep ^%::'".
+%:: %,v
+%:: RCS/%,v
+%:: RCS/%
+%:: s.%
+%:: SCCS/s.%
+
+## Likewise delete default $(SUFFIXES). See:
+##
+## info make --index-search=.SUFFIXES
+.SUFFIXES:
+
+### Flags affecting all rules
+
+# A GNU make extension since gmake 3.72 (released in late 1994) to
+# remove the target of rules if commands in those rules fail. The
+# default is to only do that if make itself receives a signal. Affects
+# all targets, see:
+#
+# info make --index-search=.DELETE_ON_ERROR
+.DELETE_ON_ERROR:
+
+### Global variables
+
+## comma, empty, space: handy variables as these tokens are either
+## special or can be hard to spot among other Makefile syntax.
+comma := ,
+empty :=
+space := $(empty) $(empty)
+
+### Quieting
+## common
+QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir
+QUIET_SUBDIR1 =
+
+ifneq ($(findstring w,$(MAKEFLAGS)),w)
+PRINT_DIR = --no-print-directory
+else # "make -w"
+NO_SUBDIR = :
+endif
+
+ifneq ($(findstring s,$(MAKEFLAGS)),s)
+ifndef V
+## common
+ QUIET_SUBDIR0 = +@subdir=
+ QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \
+ $(MAKE) $(PRINT_DIR) -C $$subdir
+
+ QUIET = @
+ QUIET_GEN = @echo ' ' GEN $@;
+
+ QUIET_MKDIR_P_PARENT = @echo ' ' MKDIR -p $(@D);
+
+## Used in "Makefile"
+ QUIET_CC = @echo ' ' CC $@;
+ QUIET_AR = @echo ' ' AR $@;
+ QUIET_LINK = @echo ' ' LINK $@;
+ QUIET_BUILT_IN = @echo ' ' BUILTIN $@;
+ QUIET_LNCP = @echo ' ' LN/CP $@;
+ QUIET_XGETTEXT = @echo ' ' XGETTEXT $@;
+ QUIET_MSGFMT = @echo ' ' MSGFMT $@;
+ QUIET_GCOV = @echo ' ' GCOV $@;
+ QUIET_SP = @echo ' ' SP $<;
+ QUIET_HDR = @echo ' ' HDR $(<:hcc=h);
+ QUIET_RC = @echo ' ' RC $@;
+ QUIET_SPATCH = @echo ' ' SPATCH $<;
+
+## Used in "Documentation/Makefile"
+ QUIET_ASCIIDOC = @echo ' ' ASCIIDOC $@;
+ QUIET_XMLTO = @echo ' ' XMLTO $@;
+ QUIET_DB2TEXI = @echo ' ' DB2TEXI $@;
+ QUIET_MAKEINFO = @echo ' ' MAKEINFO $@;
+ QUIET_DBLATEX = @echo ' ' DBLATEX $@;
+ QUIET_XSLTPROC = @echo ' ' XSLTPROC $@;
+ QUIET_GEN = @echo ' ' GEN $@;
+ QUIET_STDERR = 2> /dev/null
+
+ QUIET_LINT_GITLINK = @echo ' ' LINT GITLINK $<;
+ QUIET_LINT_MANSEC = @echo ' ' LINT MAN SEC $<;
+ QUIET_LINT_MANEND = @echo ' ' LINT MAN END $<;
+
+ export V
+endif
+endif
+
+### Templates
+
+## mkdir_p_parent: lazily "mkdir -p" the path needed for a $@
+## file. Uses $(wildcard) to avoid the "mkdir -p" if it's not
+## needed.
+##
+## Is racy, but in a good way; we might redundantly (and safely)
+## "mkdir -p" when running in parallel, but won't need to exhaustively create
+## individual rules for "a" -> "prefix" -> "dir" -> "file" if given a
+## "a/prefix/dir/file". This can instead be inserted at the start of
+## the "a/prefix/dir/file" rule.
+define mkdir_p_parent_template
+$(if $(wildcard $(@D)),,$(QUIET_MKDIR_P_PARENT)$(shell mkdir -p $(@D)))
+endef
diff --git a/sparse-index.c b/sparse-index.c
index a1d505d..8636af7 100644
--- a/sparse-index.c
+++ b/sparse-index.c
@@ -99,13 +99,9 @@ static int convert_to_sparse_rec(struct index_state *istate,
int set_sparse_index_config(struct repository *repo, int enable)
{
- int res;
- char *config_path = repo_git_path(repo, "config.worktree");
- res = git_config_set_in_file_gently(config_path,
- "index.sparse",
- enable ? "true" : NULL);
- free(config_path);
-
+ int res = repo_config_set_worktree_gently(repo,
+ "index.sparse",
+ enable ? "true" : "false");
prepare_repo_settings(repo);
repo->settings.sparse_index = enable;
return res;
@@ -136,7 +132,7 @@ static int is_sparse_index_allowed(struct index_state *istate, int flags)
/*
* The sparse index is not (yet) integrated with a split index.
*/
- if (istate->split_index)
+ if (istate->split_index || git_env_bool("GIT_TEST_SPLIT_INDEX", 0))
return 0;
/*
* The GIT_TEST_SPARSE_INDEX environment variable triggers the
@@ -341,6 +337,80 @@ void ensure_correct_sparsity(struct index_state *istate)
ensure_full_index(istate);
}
+static int path_found(const char *path, const char **dirname, size_t *dir_len,
+ int *dir_found)
+{
+ struct stat st;
+ char *newdir;
+ char *tmp;
+
+ /*
+ * If dirname corresponds to a directory that doesn't exist, and this
+ * path starts with dirname, then path can't exist.
+ */
+ if (!*dir_found && !memcmp(path, *dirname, *dir_len))
+ return 0;
+
+ /*
+ * If path itself exists, return 1.
+ */
+ if (!lstat(path, &st))
+ return 1;
+
+ /*
+ * Otherwise, path does not exist so we'll return 0...but we'll first
+ * determine some info about its parent directory so we can avoid
+ * lstat calls for future cache entries.
+ */
+ newdir = strrchr(path, '/');
+ if (!newdir)
+ return 0; /* Didn't find a parent dir; just return 0 now. */
+
+ /*
+ * If path starts with directory (which we already lstat'ed and found),
+ * then no need to lstat parent directory again.
+ */
+ if (*dir_found && *dirname && memcmp(path, *dirname, *dir_len))
+ return 0;
+
+ /* Free previous dirname, and cache path's dirname */
+ *dirname = path;
+ *dir_len = newdir - path + 1;
+
+ tmp = xstrndup(path, *dir_len);
+ *dir_found = !lstat(tmp, &st);
+ free(tmp);
+
+ return 0;
+}
+
+void clear_skip_worktree_from_present_files(struct index_state *istate)
+{
+ const char *last_dirname = NULL;
+ size_t dir_len = 0;
+ int dir_found = 1;
+
+ int i;
+
+ if (!core_apply_sparse_checkout ||
+ sparse_expect_files_outside_of_patterns)
+ return;
+
+restart:
+ for (i = 0; i < istate->cache_nr; i++) {
+ struct cache_entry *ce = istate->cache[i];
+
+ if (ce_skip_worktree(ce) &&
+ path_found(ce->name, &last_dirname, &dir_len, &dir_found)) {
+ if (S_ISSPARSEDIR(ce->ce_mode)) {
+ ensure_full_index(istate);
+ goto restart;
+ }
+ ce->ce_flags &= ~CE_SKIP_WORKTREE;
+ }
+ }
+}
+
/*
* This static global helps avoid infinite recursion between
* expand_to_path() and index_file_exists().
diff --git a/sparse-index.h b/sparse-index.h
index 656bd83..633d4fb 100644
--- a/sparse-index.h
+++ b/sparse-index.h
@@ -5,6 +5,7 @@ struct index_state;
#define SPARSE_INDEX_MEMORY_ONLY (1 << 0)
int convert_to_sparse(struct index_state *istate, int flags);
void ensure_correct_sparsity(struct index_state *istate);
+void clear_skip_worktree_from_present_files(struct index_state *istate);
/*
* Some places in the codebase expect to search for a specific path.
diff --git a/split-index.c b/split-index.c
index 8e52e89..9d0ccc3 100644
--- a/split-index.c
+++ b/split-index.c
@@ -5,6 +5,9 @@
struct split_index *init_split_index(struct index_state *istate)
{
if (!istate->split_index) {
+ if (istate->sparse_index)
+ die(_("cannot use split index with a sparse index"));
+
CALLOC_ARRAY(istate->split_index, 1);
istate->split_index->refcount = 1;
}
diff --git a/stable-qsort.c b/stable-qsort.c
index 6cbaf39..7ff1246 100644
--- a/stable-qsort.c
+++ b/stable-qsort.c
@@ -48,15 +48,9 @@ void git_stable_qsort(void *b, size_t n, size_t s,
int (*cmp)(const void *, const void *))
{
const size_t size = st_mult(n, s);
- char buf[1024];
-
- if (size < sizeof(buf)) {
- /* The temporary array fits on the small on-stack buffer. */
- msort_with_tmp(b, n, s, cmp, buf);
- } else {
- /* It's somewhat large, so malloc it. */
- char *tmp = xmalloc(size);
- msort_with_tmp(b, n, s, cmp, tmp);
- free(tmp);
- }
+ char *tmp;
+
+ tmp = xmalloc(size);
+ msort_with_tmp(b, n, s, cmp, tmp);
+ free(tmp);
}
diff --git a/strbuf.c b/strbuf.c
index 613fee8..dd9eb85 100644
--- a/strbuf.c
+++ b/strbuf.c
@@ -2,6 +2,7 @@
#include "refs.h"
#include "string-list.h"
#include "utf8.h"
+#include "date.h"
int starts_with(const char *str, const char *prefix)
{
@@ -874,9 +875,9 @@ static void strbuf_humanise(struct strbuf *buf, off_t bytes,
strbuf_addf(buf,
humanise_rate == 0 ?
/* TRANSLATORS: IEC 80000-13:2008 byte */
- Q_("%u byte", "%u bytes", (unsigned)bytes) :
+ Q_("%u byte", "%u bytes", bytes) :
/* TRANSLATORS: IEC 80000-13:2008 byte/second */
- Q_("%u byte/s", "%u bytes/s", (unsigned)bytes),
+ Q_("%u byte/s", "%u bytes/s", bytes),
(unsigned)bytes);
}
}
diff --git a/string-list.h b/string-list.h
index 267d6e5..d5a744e 100644
--- a/string-list.h
+++ b/string-list.h
@@ -86,7 +86,8 @@ typedef int (*compare_strings_fn)(const char *, const char *);
*/
struct string_list {
struct string_list_item *items;
- unsigned int nr, alloc;
+ size_t nr;
+ size_t alloc;
unsigned int strdup_strings:1;
compare_strings_fn cmp; /* NULL uses strcmp() */
};
diff --git a/submodule-config.c b/submodule-config.c
index f953440..29668b0 100644
--- a/submodule-config.c
+++ b/submodule-config.c
@@ -7,6 +7,7 @@
#include "strbuf.h"
#include "object-store.h"
#include "parse-options.h"
+#include "tree-walk.h"
/*
* submodule cache lookup structure
@@ -496,7 +497,7 @@ static int parse_config(const char *var, const char *value, void *data)
else if (parse_submodule_update_strategy(value,
&submodule->update_strategy) < 0 ||
submodule->update_strategy.type == SM_UPDATE_COMMAND)
- die(_("invalid value for %s"), var);
+ die(_("invalid value for '%s'"), var);
} else if (!strcmp(item.buf, "shallow")) {
if (!me->overwrite && submodule->recommend_shallow != -1)
warn_multiple_config(me->treeish_name, submodule->name,
@@ -726,6 +727,66 @@ const struct submodule *submodule_from_path(struct repository *r,
return config_from(r->submodule_cache, treeish_name, path, lookup_path);
}
+/**
+ * Used internally by submodules_of_tree(). Recurses into 'treeish_name'
+ * and appends submodule entries to 'out'. The submodule_cache expects
+ * a root-level treeish_name and paths, so keep track of these values
+ * with 'root_tree' and 'prefix'.
+ */
+static void traverse_tree_submodules(struct repository *r,
+ const struct object_id *root_tree,
+ char *prefix,
+ const struct object_id *treeish_name,
+ struct submodule_entry_list *out)
+{
+ struct tree_desc tree;
+ struct submodule_tree_entry *st_entry;
+ struct name_entry *name_entry;
+ char *tree_path = NULL;
+
+ name_entry = xmalloc(sizeof(*name_entry));
+
+ fill_tree_descriptor(r, &tree, treeish_name);
+ while (tree_entry(&tree, name_entry)) {
+ if (prefix)
+ tree_path =
+ mkpathdup("%s/%s", prefix, name_entry->path);
+ else
+ tree_path = xstrdup(name_entry->path);
+
+ if (S_ISGITLINK(name_entry->mode) &&
+ is_tree_submodule_active(r, root_tree, tree_path)) {
+ st_entry = xmalloc(sizeof(*st_entry));
+ st_entry->name_entry = xmalloc(sizeof(*st_entry->name_entry));
+ *st_entry->name_entry = *name_entry;
+ st_entry->submodule =
+ submodule_from_path(r, root_tree, tree_path);
+ st_entry->repo = xmalloc(sizeof(*st_entry->repo));
+ if (repo_submodule_init(st_entry->repo, r, tree_path,
+ root_tree))
+ FREE_AND_NULL(st_entry->repo);
+
+ ALLOC_GROW(out->entries, out->entry_nr + 1,
+ out->entry_alloc);
+ out->entries[out->entry_nr++] = *st_entry;
+ } else if (S_ISDIR(name_entry->mode))
+ traverse_tree_submodules(r, root_tree, tree_path,
+ &name_entry->oid, out);
+ free(tree_path);
+ }
+}
+
+void submodules_of_tree(struct repository *r,
+ const struct object_id *treeish_name,
+ struct submodule_entry_list *out)
+{
+ CALLOC_ARRAY(out->entries, 0);
+ out->entry_nr = 0;
+ out->entry_alloc = 0;
+
+ traverse_tree_submodules(r, treeish_name, NULL, treeish_name, out);
+}
+
void submodule_free(struct repository *r)
{
if (r->submodule_cache)
diff --git a/submodule-config.h b/submodule-config.h
index 65875b9..fa229a8 100644
--- a/submodule-config.h
+++ b/submodule-config.h
@@ -6,6 +6,7 @@
#include "hashmap.h"
#include "submodule.h"
#include "strbuf.h"
+#include "tree-walk.h"
/**
* The submodule config cache API allows to read submodule
@@ -101,4 +102,37 @@ int check_submodule_name(const char *name);
void fetch_config_from_gitmodules(int *max_children, int *recurse_submodules);
void update_clone_config_from_gitmodules(int *max_jobs);
+/*
+ * Submodule entry that contains relevant information about a
+ * submodule in a tree.
+ */
+struct submodule_tree_entry {
+ /* The submodule's tree entry. */
+ struct name_entry *name_entry;
+ /*
+ * A struct repository corresponding to the submodule. May be
+ * NULL if the submodule has not been updated.
+ */
+ struct repository *repo;
+ /*
+ * A struct submodule containing the submodule config in the
+ * tree's .gitmodules.
+ */
+ const struct submodule *submodule;
+};
+
+struct submodule_entry_list {
+ struct submodule_tree_entry *entries;
+ int entry_nr;
+ int entry_alloc;
+};
+
+/**
+ * Given a treeish, return all submodules in the tree and its subtrees,
+ * but excluding nested submodules. Callers that require nested
+ * submodules are expected to recurse into the submodules themselves.
+ */
+void submodules_of_tree(struct repository *r,
+ const struct object_id *treeish_name,
+ struct submodule_entry_list *ret);
#endif /* SUBMODULE_CONFIG_H */
diff --git a/submodule.c b/submodule.c
index c689070..86c8f0f 100644
--- a/submodule.c
+++ b/submodule.c
@@ -22,6 +22,7 @@
#include "parse-options.h"
#include "object-store.h"
#include "commit-reach.h"
+#include "shallow.h"
static int config_update_recurse_submodules = RECURSE_SUBMODULES_OFF;
static int initialized_fetch_ref_tips;
@@ -167,26 +168,6 @@ void stage_updated_gitmodules(struct index_state *istate)
static struct string_list added_submodule_odb_paths = STRING_LIST_INIT_NODUP;
-/* TODO: remove this function, use repo_submodule_init instead. */
-int add_submodule_odb(const char *path)
-{
- struct strbuf objects_directory = STRBUF_INIT;
- int ret = 0;
-
- ret = strbuf_git_path_submodule(&objects_directory, path, "objects/");
- if (ret)
- goto done;
- if (!is_directory(objects_directory.buf)) {
- ret = -1;
- goto done;
- }
- string_list_insert(&added_submodule_odb_paths,
- strbuf_detach(&objects_directory, NULL));
-done:
- strbuf_release(&objects_directory);
- return ret;
-}
-
void add_submodule_odb_by_path(const char *path)
{
string_list_insert(&added_submodule_odb_paths, xstrdup(path));
@@ -267,7 +248,9 @@ int option_parse_recurse_submodules_worktree_updater(const struct option *opt,
* ie, the config looks like: "[submodule] active\n".
* Since that is an invalid pathspec, we should inform the user.
*/
-int is_submodule_active(struct repository *repo, const char *path)
+int is_tree_submodule_active(struct repository *repo,
+ const struct object_id *treeish_name,
+ const char *path)
{
int ret = 0;
char *key = NULL;
@@ -275,7 +258,7 @@ int is_submodule_active(struct repository *repo, const char *path)
const struct string_list *sl;
const struct submodule *module;
- module = submodule_from_path(repo, null_oid(), path);
+ module = submodule_from_path(repo, treeish_name, path);
/* early return if there isn't a path->module mapping */
if (!module)
@@ -317,6 +300,11 @@ int is_submodule_active(struct repository *repo, const char *path)
return ret;
}
+int is_submodule_active(struct repository *repo, const char *path)
+{
+ return is_tree_submodule_active(repo, null_oid(), path);
+}
+
int is_submodule_populated_gently(const char *path, int *return_error_code)
{
int ret = 0;
@@ -775,19 +763,6 @@ const struct submodule *submodule_from_ce(const struct cache_entry *ce)
return submodule_from_path(the_repository, null_oid(), ce->name);
}
-static struct oid_array *submodule_commits(struct string_list *submodules,
- const char *name)
-{
- struct string_list_item *item;
-
- item = string_list_insert(submodules, name);
- if (item->util)
- return (struct oid_array *) item->util;
-
- /* NEEDSWORK: should we have oid_array_init()? */
- item->util = xcalloc(1, sizeof(struct oid_array));
- return (struct oid_array *) item->util;
-}
struct collect_changed_submodules_cb_data {
struct repository *repo;
@@ -812,6 +787,52 @@ static const char *default_name_or_path(const char *path_or_name)
return path_or_name;
}
+/*
+ * Holds relevant information for a changed submodule. Used as the .util
+ * member of the changed submodule name string_list_item.
+ *
+ * (super_oid, path) allows the submodule config to be read from _some_
+ * .gitmodules file. We store this information the first time we find a
+ * superproject commit that points to the submodule, but this is
+ * arbitrary - we can choose any (super_oid, path) that matches the
+ * submodule's name.
+ *
+ * NEEDSWORK: Storing an arbitrary commit is undesirable because we can't
+ * guarantee that we're reading the commit that the user would expect. A better
+ * scheme would be to just fetch a submodule by its name. This requires two
+ * steps:
+ * - Create a function that behaves like repo_submodule_init(), but accepts a
+ * submodule name instead of treeish_name and path. This should be easy
+ * because repo_submodule_init() internally uses the submodule's name.
+ *
+ * - Replace most instances of 'struct submodule' (which is the .gitmodules
+ * config) with just the submodule name. This is OK because we expect
+ * submodule settings to be stored in .git/config (via "git submodule init"),
+ * not .gitmodules. This also lets us delete get_non_gitmodules_submodule(),
+ * which constructs a bogus 'struct submodule' for the sake of giving a
+ * placeholder name to a gitlink.
+ */
+struct changed_submodule_data {
+ /*
+ * The first superproject commit in the rev walk that points to
+ * the submodule.
+ */
+ const struct object_id *super_oid;
+ /*
+ * Path to the submodule in the superproject commit referenced
+ * by 'super_oid'.
+ */
+ char *path;
+ /* The submodule commits that have changed in the rev walk. */
+ struct oid_array new_commits;
+};
+
+static void changed_submodule_data_clear(struct changed_submodule_data *cs_data)
+{
+ oid_array_clear(&cs_data->new_commits);
+ free(cs_data->path);
+}
+
static void collect_changed_submodules_cb(struct diff_queue_struct *q,
struct diff_options *options,
void *data)
@@ -823,9 +844,10 @@ static void collect_changed_submodules_cb(struct diff_queue_struct *q,
for (i = 0; i < q->nr; i++) {
struct diff_filepair *p = q->queue[i];
- struct oid_array *commits;
const struct submodule *submodule;
const char *name;
+ struct string_list_item *item;
+ struct changed_submodule_data *cs_data;
if (!S_ISGITLINK(p->two->mode))
continue;
@@ -852,8 +874,16 @@ static void collect_changed_submodules_cb(struct diff_queue_struct *q,
if (!name)
continue;
- commits = submodule_commits(changed, name);
- oid_array_append(commits, &p->two->oid);
+ item = string_list_insert(changed, name);
+ if (item->util)
+ cs_data = item->util;
+ else {
+ item->util = xcalloc(1, sizeof(struct changed_submodule_data));
+ cs_data = item->util;
+ cs_data->super_oid = commit_oid;
+ cs_data->path = xstrdup(p->two->path);
+ }
+ oid_array_append(&cs_data->new_commits, &p->two->oid);
}
}
@@ -900,11 +930,12 @@ static void collect_changed_submodules(struct repository *r,
reset_revision_walk();
}
-static void free_submodules_oids(struct string_list *submodules)
+static void free_submodules_data(struct string_list *submodules)
{
struct string_list_item *item;
for_each_string_list_item(item, submodules)
- oid_array_clear((struct oid_array *) item->util);
+ changed_submodule_data_clear(item->util);
+
string_list_clear(submodules, 1);
}
@@ -925,6 +956,7 @@ struct has_commit_data {
struct repository *repo;
int result;
const char *path;
+ const struct object_id *super_oid;
};
static int check_has_commit(const struct object_id *oid, void *data)
@@ -933,9 +965,10 @@ static int check_has_commit(const struct object_id *oid, void *data)
struct repository subrepo;
enum object_type type;
- if (repo_submodule_init(&subrepo, cb->repo, cb->path, null_oid())) {
+ if (repo_submodule_init(&subrepo, cb->repo, cb->path, cb->super_oid)) {
cb->result = 0;
- goto cleanup;
+ /* subrepo failed to init, so don't clean it up. */
+ return 0;
}
type = oid_object_info(&subrepo, oid, NULL);
@@ -961,21 +994,15 @@ cleanup:
static int submodule_has_commits(struct repository *r,
const char *path,
+ const struct object_id *super_oid,
struct oid_array *commits)
{
- struct has_commit_data has_commit = { r, 1, path };
-
- /*
- * Perform a cheap, but incorrect check for the existence of 'commits'.
- * This is done by adding the submodule's object store to the in-core
- * object store, and then querying for each commit's existence. If we
- * do not have the commit object anywhere, there is no chance we have
- * it in the object store of the correct submodule and have it
- * reachable from a ref, so we can fail early without spawning rev-list
- * which is expensive.
- */
- if (add_submodule_odb(path))
- return 0;
+ struct has_commit_data has_commit = {
+ .repo = r,
+ .result = 1,
+ .path = path,
+ .super_oid = super_oid
+ };
oid_array_for_each_unique(commits, check_has_commit, &has_commit);
@@ -1010,7 +1037,7 @@ static int submodule_needs_pushing(struct repository *r,
const char *path,
struct oid_array *commits)
{
- if (!submodule_has_commits(r, path, commits))
+ if (!submodule_has_commits(r, path, null_oid(), commits))
/*
* NOTE: We do consider it safe to return "no" here. The
* correct answer would be "We do not know" instead of
@@ -1070,7 +1097,7 @@ int find_unpushed_submodules(struct repository *r,
collect_changed_submodules(r, &submodules, &argv);
for_each_string_list_item(name, &submodules) {
- struct oid_array *commits = name->util;
+ struct changed_submodule_data *cs_data = name->util;
const struct submodule *submodule;
const char *path = NULL;
@@ -1083,11 +1110,11 @@ int find_unpushed_submodules(struct repository *r,
if (!path)
continue;
- if (submodule_needs_pushing(r, path, commits))
+ if (submodule_needs_pushing(r, path, &cs_data->new_commits))
string_list_insert(needs_pushing, path);
}
- free_submodules_oids(&submodules);
+ free_submodules_data(&submodules);
strvec_clear(&argv);
return needs_pushing->nr;
@@ -1233,14 +1260,36 @@ void check_for_new_submodule_commits(struct object_id *oid)
oid_array_append(&ref_tips_after_fetch, oid);
}
+/*
+ * Returns 1 if there is at least one submodule gitdir in
+ * $GIT_DIR/modules and 0 otherwise. This follows
+ * submodule_name_to_gitdir(), which looks for submodules in
+ * $GIT_DIR/modules, not $GIT_COMMON_DIR.
+ *
+ * A submodule can be moved to $GIT_DIR/modules manually by running "git
+ * submodule absorbgitdirs", or it may be initialized there by "git
+ * submodule update".
+ */
+static int repo_has_absorbed_submodules(struct repository *r)
+{
+ int ret;
+ struct strbuf buf = STRBUF_INIT;
+
+ strbuf_repo_git_path(&buf, r, "modules/");
+ ret = file_exists(buf.buf) && !is_empty_dir(buf.buf);
+ strbuf_release(&buf);
+ return ret;
+}
+
static void calculate_changed_submodule_paths(struct repository *r,
struct string_list *changed_submodule_names)
{
struct strvec argv = STRVEC_INIT;
struct string_list_item *name;
- /* No need to check if there are no submodules configured */
- if (!submodule_from_path(r, NULL, NULL))
+ /* No need to check if no submodules would be fetched */
+ if (!submodule_from_path(r, NULL, NULL) &&
+ !repo_has_absorbed_submodules(r))
return;
strvec_push(&argv, "--"); /* argv[0] program name */
@@ -1257,7 +1306,7 @@ static void calculate_changed_submodule_paths(struct repository *r,
collect_changed_submodules(r, changed_submodule_names, &argv);
for_each_string_list_item(name, changed_submodule_names) {
- struct oid_array *commits = name->util;
+ struct changed_submodule_data *cs_data = name->util;
const struct submodule *submodule;
const char *path = NULL;
@@ -1270,8 +1319,8 @@ static void calculate_changed_submodule_paths(struct repository *r,
if (!path)
continue;
- if (submodule_has_commits(r, path, commits)) {
- oid_array_clear(commits);
+ if (submodule_has_commits(r, path, null_oid(), &cs_data->new_commits)) {
+ changed_submodule_data_clear(cs_data);
*name->string = '\0';
}
}
@@ -1308,12 +1357,21 @@ int submodule_touches_in_range(struct repository *r,
strvec_clear(&args);
- free_submodules_oids(&subs);
+ free_submodules_data(&subs);
return ret;
}
struct submodule_parallel_fetch {
- int count;
+ /*
+ * The index of the last index entry processed by
+ * get_fetch_task_from_index().
+ */
+ int index_count;
+ /*
+ * The index of the last string_list entry processed by
+ * get_fetch_task_from_changed().
+ */
+ int changed_count;
struct strvec args;
struct repository *r;
const char *prefix;
@@ -1322,7 +1380,16 @@ struct submodule_parallel_fetch {
int quiet;
int result;
+ /*
+ * Names of submodules that have new commits. Generated by
+ * walking the newly fetched superproject commits.
+ */
struct string_list changed_submodule_names;
+ /*
+ * Names of submodules that have already been processed. Lets us
+ * avoid fetching the same submodule more than once.
+ */
+ struct string_list seen_submodule_names;
/* Pending fetches by OIDs */
struct fetch_task **oid_fetch_tasks;
@@ -1333,6 +1400,7 @@ struct submodule_parallel_fetch {
#define SPF_INIT { \
.args = STRVEC_INIT, \
.changed_submodule_names = STRING_LIST_INIT_DUP, \
+ .seen_submodule_names = STRING_LIST_INIT_DUP, \
.submodules_with_errors = STRBUF_INIT, \
}
@@ -1369,6 +1437,8 @@ struct fetch_task {
struct repository *repo;
const struct submodule *sub;
unsigned free_sub : 1; /* Do we need to free the submodule? */
+ const char *default_argv; /* The default fetch mode. */
+ struct strvec git_args; /* Args for the child git process. */
struct oid_array *commits; /* Ensure these commits are fetched */
};
@@ -1394,31 +1464,6 @@ static const struct submodule *get_non_gitmodules_submodule(const char *path)
return (const struct submodule *) ret;
}
-static struct fetch_task *fetch_task_create(struct repository *r,
- const char *path)
-{
- struct fetch_task *task = xmalloc(sizeof(*task));
- memset(task, 0, sizeof(*task));
-
- task->sub = submodule_from_path(r, null_oid(), path);
- if (!task->sub) {
- /*
- * No entry in .gitmodules? Technically not a submodule,
- * but historically we supported repositories that happen to be
- * in-place where a gitlink is. Keep supporting them.
- */
- task->sub = get_non_gitmodules_submodule(path);
- if (!task->sub) {
- free(task);
- return NULL;
- }
-
- task->free_sub = 1;
- }
-
- return task;
-}
-
static void fetch_task_release(struct fetch_task *p)
{
if (p->free_sub)
@@ -1429,14 +1474,17 @@ static void fetch_task_release(struct fetch_task *p)
if (p->repo)
repo_clear(p->repo);
FREE_AND_NULL(p->repo);
+
+ strvec_clear(&p->git_args);
}
static struct repository *get_submodule_repo_for(struct repository *r,
- const char *path)
+ const char *path,
+ const struct object_id *treeish_name)
{
struct repository *ret = xmalloc(sizeof(*ret));
- if (repo_submodule_init(ret, r, path, null_oid())) {
+ if (repo_submodule_init(ret, r, path, treeish_name)) {
free(ret);
return NULL;
}
@@ -1444,67 +1492,83 @@ static struct repository *get_submodule_repo_for(struct repository *r,
return ret;
}
-static int get_next_submodule(struct child_process *cp,
- struct strbuf *err, void *data, void **task_cb)
+static struct fetch_task *fetch_task_create(struct submodule_parallel_fetch *spf,
+ const char *path,
+ const struct object_id *treeish_name)
{
- struct submodule_parallel_fetch *spf = data;
+ struct fetch_task *task = xmalloc(sizeof(*task));
+ memset(task, 0, sizeof(*task));
+
+ task->sub = submodule_from_path(spf->r, treeish_name, path);
+
+ if (!task->sub) {
+ /*
+ * No entry in .gitmodules? Technically not a submodule,
+ * but historically we supported repositories that happen to be
+ * in-place where a gitlink is. Keep supporting them.
+ */
+ task->sub = get_non_gitmodules_submodule(path);
+ if (!task->sub)
+ goto cleanup;
- for (; spf->count < spf->r->index->cache_nr; spf->count++) {
- const struct cache_entry *ce = spf->r->index->cache[spf->count];
- const char *default_argv;
+ task->free_sub = 1;
+ }
+
+ if (string_list_lookup(&spf->seen_submodule_names, task->sub->name))
+ goto cleanup;
+
+ switch (get_fetch_recurse_config(task->sub, spf))
+ {
+ default:
+ case RECURSE_SUBMODULES_DEFAULT:
+ case RECURSE_SUBMODULES_ON_DEMAND:
+ if (!task->sub ||
+ !string_list_lookup(
+ &spf->changed_submodule_names,
+ task->sub->name))
+ goto cleanup;
+ task->default_argv = "on-demand";
+ break;
+ case RECURSE_SUBMODULES_ON:
+ task->default_argv = "yes";
+ break;
+ case RECURSE_SUBMODULES_OFF:
+ goto cleanup;
+ }
+
+ task->repo = get_submodule_repo_for(spf->r, path, treeish_name);
+
+ return task;
+
+ cleanup:
+ fetch_task_release(task);
+ free(task);
+ return NULL;
+}
+
+static struct fetch_task *
+get_fetch_task_from_index(struct submodule_parallel_fetch *spf,
+ struct strbuf *err)
+{
+ for (; spf->index_count < spf->r->index->cache_nr; spf->index_count++) {
+ const struct cache_entry *ce =
+ spf->r->index->cache[spf->index_count];
struct fetch_task *task;
if (!S_ISGITLINK(ce->ce_mode))
continue;
- task = fetch_task_create(spf->r, ce->name);
+ task = fetch_task_create(spf, ce->name, null_oid());
if (!task)
continue;
- switch (get_fetch_recurse_config(task->sub, spf))
- {
- default:
- case RECURSE_SUBMODULES_DEFAULT:
- case RECURSE_SUBMODULES_ON_DEMAND:
- if (!task->sub ||
- !string_list_lookup(
- &spf->changed_submodule_names,
- task->sub->name))
- continue;
- default_argv = "on-demand";
- break;
- case RECURSE_SUBMODULES_ON:
- default_argv = "yes";
- break;
- case RECURSE_SUBMODULES_OFF:
- continue;
- }
-
- task->repo = get_submodule_repo_for(spf->r, task->sub->path);
if (task->repo) {
- struct strbuf submodule_prefix = STRBUF_INIT;
- child_process_init(cp);
- cp->dir = task->repo->gitdir;
- prepare_submodule_repo_env_in_gitdir(&cp->env_array);
- cp->git_cmd = 1;
if (!spf->quiet)
strbuf_addf(err, _("Fetching submodule %s%s\n"),
spf->prefix, ce->name);
- strvec_init(&cp->args);
- strvec_pushv(&cp->args, spf->args.v);
- strvec_push(&cp->args, default_argv);
- strvec_push(&cp->args, "--submodule-prefix");
-
- strbuf_addf(&submodule_prefix, "%s%s/",
- spf->prefix,
- task->sub->path);
- strvec_push(&cp->args, submodule_prefix.buf);
-
- spf->count++;
- *task_cb = task;
- strbuf_release(&submodule_prefix);
- return 1;
+ spf->index_count++;
+ return task;
} else {
struct strbuf empty_submodule_path = STRBUF_INIT;
@@ -1528,6 +1592,111 @@ static int get_next_submodule(struct child_process *cp,
strbuf_release(&empty_submodule_path);
}
}
+ return NULL;
+}
+
+static struct fetch_task *
+get_fetch_task_from_changed(struct submodule_parallel_fetch *spf,
+ struct strbuf *err)
+{
+ for (; spf->changed_count < spf->changed_submodule_names.nr;
+ spf->changed_count++) {
+ struct string_list_item item =
+ spf->changed_submodule_names.items[spf->changed_count];
+ struct changed_submodule_data *cs_data = item.util;
+ struct fetch_task *task;
+
+ if (!is_tree_submodule_active(spf->r, cs_data->super_oid,cs_data->path))
+ continue;
+
+ task = fetch_task_create(spf, cs_data->path,
+ cs_data->super_oid);
+ if (!task)
+ continue;
+
+ if (!task->repo) {
+ strbuf_addf(err, _("Could not access submodule '%s' at commit %s\n"),
+ cs_data->path,
+ find_unique_abbrev(cs_data->super_oid, DEFAULT_ABBREV));
+
+ fetch_task_release(task);
+ free(task);
+ continue;
+ }
+
+ if (!spf->quiet)
+ strbuf_addf(err,
+ _("Fetching submodule %s%s at commit %s\n"),
+ spf->prefix, task->sub->path,
+ find_unique_abbrev(cs_data->super_oid,
+ DEFAULT_ABBREV));
+
+ spf->changed_count++;
+ /*
+ * NEEDSWORK: Submodules set/unset a value for
+ * core.worktree when they are populated/unpopulated by
+ * "git checkout" (and similar commands, see
+ * submodule_move_head() and
+ * connect_work_tree_and_git_dir()), but if the
+ * submodule is unpopulated in another way (e.g. "git
+ * rm", "rm -r"), core.worktree will still be set even
+ * though the directory doesn't exist, and the child
+ * process will crash while trying to chdir into the
+ * nonexistent directory.
+ *
+ * In this case, we know that the submodule has no
+ * working tree, so we can work around this by
+ * setting "--work-tree=." (--bare does not work because
+ * worktree settings take precedence over bare-ness).
+ * However, this is not necessarily true in other cases,
+ * so a generalized solution is still necessary.
+ *
+ * Possible solutions:
+ * - teach "git [add|rm]" to unset core.worktree and
+ * discourage users from removing submodules without
+ * using a Git command.
+ * - teach submodule child processes to ignore stale
+ * core.worktree values.
+ */
+ strvec_push(&task->git_args, "--work-tree=.");
+ return task;
+ }
+ return NULL;
+}
+
+static int get_next_submodule(struct child_process *cp, struct strbuf *err,
+ void *data, void **task_cb)
+{
+ struct submodule_parallel_fetch *spf = data;
+ struct fetch_task *task =
+ get_fetch_task_from_index(spf, err);
+ if (!task)
+ task = get_fetch_task_from_changed(spf, err);
+
+ if (task) {
+ struct strbuf submodule_prefix = STRBUF_INIT;
+
+ child_process_init(cp);
+ cp->dir = task->repo->gitdir;
+ prepare_submodule_repo_env_in_gitdir(&cp->env_array);
+ cp->git_cmd = 1;
+ strvec_init(&cp->args);
+ if (task->git_args.nr)
+ strvec_pushv(&cp->args, task->git_args.v);
+ strvec_pushv(&cp->args, spf->args.v);
+ strvec_push(&cp->args, task->default_argv);
+ strvec_push(&cp->args, "--submodule-prefix");
+
+ strbuf_addf(&submodule_prefix, "%s%s/",
+ spf->prefix,
+ task->sub->path);
+ strvec_push(&cp->args, submodule_prefix.buf);
+ *task_cb = task;
+
+ strbuf_release(&submodule_prefix);
+ string_list_insert(&spf->seen_submodule_names, task->sub->name);
+ return 1;
+ }
if (spf->oid_fetch_tasks_nr) {
struct fetch_task *task =
@@ -1590,7 +1759,7 @@ static int fetch_finish(int retvalue, struct strbuf *err,
struct fetch_task *task = task_cb;
struct string_list_item *it;
- struct oid_array *commits;
+ struct changed_submodule_data *cs_data;
if (!task || !task->sub)
BUG("callback cookie bogus");
@@ -1618,14 +1787,14 @@ static int fetch_finish(int retvalue, struct strbuf *err,
/* Could be an unchanged submodule, not contained in the list */
goto out;
- commits = it->util;
- oid_array_filter(commits,
+ cs_data = it->util;
+ oid_array_filter(&cs_data->new_commits,
commit_missing_in_sub,
task->repo);
/* Are there commits we want, but do not exist? */
- if (commits->nr) {
- task->commits = commits;
+ if (cs_data->new_commits.nr) {
+ task->commits = &cs_data->new_commits;
ALLOC_GROW(spf->oid_fetch_tasks,
spf->oid_fetch_tasks_nr + 1,
spf->oid_fetch_tasks_alloc);
@@ -1640,11 +1809,11 @@ out:
return 0;
}
-int fetch_populated_submodules(struct repository *r,
- const struct strvec *options,
- const char *prefix, int command_line_option,
- int default_option,
- int quiet, int max_parallel_jobs)
+int fetch_submodules(struct repository *r,
+ const struct strvec *options,
+ const char *prefix, int command_line_option,
+ int default_option,
+ int quiet, int max_parallel_jobs)
{
int i;
struct submodule_parallel_fetch spf = SPF_INIT;
@@ -1683,7 +1852,7 @@ int fetch_populated_submodules(struct repository *r,
strvec_clear(&spf.args);
out:
- free_submodules_oids(&spf.changed_submodule_names);
+ free_submodules_data(&spf.changed_submodule_names);
return spf.result;
}
diff --git a/submodule.h b/submodule.h
index 6bd2c99..40c1445 100644
--- a/submodule.h
+++ b/submodule.h
@@ -54,6 +54,9 @@ int git_default_submodule_config(const char *var, const char *value, void *cb);
struct option;
int option_parse_recurse_submodules_worktree_updater(const struct option *opt,
const char *arg, int unset);
+int is_tree_submodule_active(struct repository *repo,
+ const struct object_id *treeish_name,
+ const char *path);
int is_submodule_active(struct repository *repo, const char *path);
/*
* Determine if a submodule has been populated at a given 'path' by checking if
@@ -85,12 +88,12 @@ int should_update_submodules(void);
*/
const struct submodule *submodule_from_ce(const struct cache_entry *ce);
void check_for_new_submodule_commits(struct object_id *oid);
-int fetch_populated_submodules(struct repository *r,
- const struct strvec *options,
- const char *prefix,
- int command_line_option,
- int default_option,
- int quiet, int max_parallel_jobs);
+int fetch_submodules(struct repository *r,
+ const struct strvec *options,
+ const char *prefix,
+ int command_line_option,
+ int default_option,
+ int quiet, int max_parallel_jobs);
unsigned is_submodule_modified(const char *path, int ignore_untracked);
int submodule_uses_gitfile(const char *path);
@@ -100,12 +103,11 @@ int submodule_uses_gitfile(const char *path);
int bad_to_remove_submodule(const char *path, unsigned flags);
/*
- * Call add_submodule_odb() to add the submodule at the given path to a list.
- * When register_all_submodule_odb_as_alternates() is called, the object stores
- * of all submodules in that list will be added as alternates in
- * the_repository.
+ * Call add_submodule_odb_by_path() to add the submodule at the given
+ * path to a list. When register_all_submodule_odb_as_alternates() is
+ * called, the object stores of all submodules in that list will be
+ * added as alternates in the_repository.
*/
-int add_submodule_odb(const char *path);
void add_submodule_odb_by_path(const char *path);
int register_all_submodule_odb_as_alternates(void);
diff --git a/t/Makefile b/t/Makefile
index 46cd5fc..056ce55 100644
--- a/t/Makefile
+++ b/t/Makefile
@@ -1,3 +1,6 @@
+# Import tree-wide shared Makefile behavior and libraries
+include ../shared.mak
+
# Run tests
#
# Copyright (c) 2005 Junio C Hamano
diff --git a/t/README b/t/README
index f48e054..9ffea1d 100644
--- a/t/README
+++ b/t/README
@@ -405,8 +405,8 @@ every 'git commit-graph write', as if the `--changed-paths` option was
passed in.
GIT_TEST_FSMONITOR=$PWD/t7519/fsmonitor-all exercises the fsmonitor
-code path for utilizing a file system monitor to speed up detecting
-new or changed files.
+code paths for utilizing a (hook based) file system monitor to speed up
+detecting new or changed files.
GIT_TEST_INDEX_VERSION=<n> exercises the index read/write code path
for the index version specified. Can be set to any valid version
diff --git a/t/helper/test-chmtime.c b/t/helper/test-chmtime.c
index 524b55c..dc28890 100644
--- a/t/helper/test-chmtime.c
+++ b/t/helper/test-chmtime.c
@@ -134,6 +134,21 @@ int cmd__chmtime(int argc, const char **argv)
}
if (utb.modtime != sb.st_mtime && utime(argv[i], &utb) < 0) {
+#ifdef GIT_WINDOWS_NATIVE
+ if (S_ISDIR(sb.st_mode)) {
+ /*
+ * NEEDSWORK: The Windows version of `utime()`
+ * (aka `mingw_utime()`) does not correctly
+ * handle directory arguments, since it uses
+ * `_wopen()`. Ignore it for now since this
+ * is just a test.
+ */
+ fprintf(stderr,
+ ("Failed to modify time on directory %s. "
+ "Skipping\n"), argv[i]);
+ continue;
+ }
+#endif
fprintf(stderr, "Failed to modify time on %s: %s\n",
argv[i], strerror(errno));
return 1;
diff --git a/t/helper/test-csprng.c b/t/helper/test-csprng.c
new file mode 100644
index 0000000..65d1497
--- /dev/null
+++ b/t/helper/test-csprng.c
@@ -0,0 +1,29 @@
+#include "test-tool.h"
+#include "git-compat-util.h"
+
+
+int cmd__csprng(int argc, const char **argv)
+{
+ unsigned long count;
+ unsigned char buf[1024];
+
+ if (argc > 2) {
+ fprintf(stderr, "usage: %s [<size>]\n", argv[0]);
+ return 2;
+ }
+
+ count = (argc == 2) ? strtoul(argv[1], NULL, 0) : -1L;
+
+ while (count) {
+ unsigned long chunk = count < sizeof(buf) ? count : sizeof(buf);
+ if (csprng_bytes(buf, chunk) < 0) {
+ perror("failed to read");
+ return 5;
+ }
+ if (fwrite(buf, chunk, 1, stdout) != chunk)
+ return 1;
+ count -= chunk;
+ }
+
+ return 0;
+}
diff --git a/t/helper/test-date.c b/t/helper/test-date.c
index 099eff4..45951b1 100644
--- a/t/helper/test-date.c
+++ b/t/helper/test-date.c
@@ -1,5 +1,6 @@
#include "test-tool.h"
#include "cache.h"
+#include "date.h"
static const char *usage_msg = "\n"
" test-tool date relative [time_t]...\n"
@@ -34,7 +35,7 @@ static void show_human_dates(const char **argv)
static void show_dates(const char **argv, const char *format)
{
- struct date_mode mode;
+ struct date_mode mode = DATE_MODE_INIT;
parse_date_format(format, &mode);
for (; *argv; argv++) {
@@ -53,6 +54,8 @@ static void show_dates(const char **argv, const char *format)
printf("%s -> %s\n", *argv, show_date(t, tz, &mode));
}
+
+ date_mode_release(&mode);
}
static void parse_dates(const char **argv)
diff --git a/t/helper/test-fsmonitor-client.c b/t/helper/test-fsmonitor-client.c
new file mode 100644
index 0000000..3062c8a
--- /dev/null
+++ b/t/helper/test-fsmonitor-client.c
@@ -0,0 +1,116 @@
+/*
+ * test-fsmonitor-client.c: client code to send commands/requests to
+ * a `git fsmonitor--daemon` daemon.
+ */
+
+#include "test-tool.h"
+#include "cache.h"
+#include "parse-options.h"
+#include "fsmonitor-ipc.h"
+
+#ifndef HAVE_FSMONITOR_DAEMON_BACKEND
+int cmd__fsmonitor_client(int argc, const char **argv)
+{
+ die("fsmonitor--daemon not available on this platform");
+}
+#else
+
+/*
+ * Read the `.git/index` to get the last token written to the
+ * FSMonitor Index Extension.
+ */
+static const char *get_token_from_index(void)
+{
+ struct index_state *istate = the_repository->index;
+
+ if (do_read_index(istate, the_repository->index_file, 0) < 0)
+ die("unable to read index file");
+ if (!istate->fsmonitor_last_update)
+ die("index file does not have fsmonitor extension");
+
+ return istate->fsmonitor_last_update;
+}
+
+/*
+ * Send an IPC query to a `git-fsmonitor--daemon` daemon and
+ * ask for the changes since the given token or from the last
+ * token in the index extension.
+ *
+ * This will implicitly start a daemon process if necessary. The
+ * daemon process will persist after we exit.
+ */
+static int do_send_query(const char *token)
+{
+ struct strbuf answer = STRBUF_INIT;
+ int ret;
+
+ if (!token || !*token)
+ token = get_token_from_index();
+
+ ret = fsmonitor_ipc__send_query(token, &answer);
+ if (ret < 0)
+ die("could not query fsmonitor--daemon");
+
+ write_in_full(1, answer.buf, answer.len);
+ strbuf_release(&answer);
+
+ return 0;
+}
+
+/*
+ * Send a "flush" command to the `git-fsmonitor--daemon` (if running)
+ * and tell it to flush its cache.
+ *
+ * This feature is primarily used by the test suite to simulate a loss of
+ * sync with the filesystem where we miss kernel events.
+ */
+static int do_send_flush(void)
+{
+ struct strbuf answer = STRBUF_INIT;
+ int ret;
+
+ ret = fsmonitor_ipc__send_command("flush", &answer);
+ if (ret)
+ return ret;
+
+ write_in_full(1, answer.buf, answer.len);
+ strbuf_release(&answer);
+
+ return 0;
+}
+
+int cmd__fsmonitor_client(int argc, const char **argv)
+{
+ const char *subcmd;
+ const char *token = NULL;
+
+ const char * const fsmonitor_client_usage[] = {
+ "test-tool fsmonitor-client query [<token>]",
+ "test-tool fsmonitor-client flush",
+ NULL,
+ };
+
+ struct option options[] = {
+ OPT_STRING(0, "token", &token, "token",
+ "command token to send to the server"),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, NULL, options, fsmonitor_client_usage, 0);
+
+ if (argc != 1)
+ usage_with_options(fsmonitor_client_usage, options);
+
+ subcmd = argv[0];
+
+ setup_git_directory();
+
+ if (!strcmp(subcmd, "query"))
+ return !!do_send_query(token);
+
+ if (!strcmp(subcmd, "flush"))
+ return !!do_send_flush();
+
+ die("Unhandled subcommand: '%s'", subcmd);
+}
+#endif
diff --git a/t/helper/test-progress.c b/t/helper/test-progress.c
index 5d05cbe..6cc9735 100644
--- a/t/helper/test-progress.c
+++ b/t/helper/test-progress.c
@@ -3,6 +3,9 @@
*
* Reads instructions from standard input, one instruction per line:
*
+ * "start <total>[ <title>]" - Call start_progress(title, total),
+ * Uses the default title of "Working hard"
+ * if the " <title>" is omitted.
* "progress <items>" - Call display_progress() with the given item count
* as parameter.
* "throughput <bytes> <millis> - Call display_throughput() with the given
@@ -10,6 +13,7 @@
* specify the time elapsed since the
* start_progress() call.
* "update" - Set the 'progress_update' flag.
+ * "stop" - Call stop_progress().
*
* See 't0500-progress-display.sh' for examples.
*/
@@ -19,34 +23,50 @@
#include "parse-options.h"
#include "progress.h"
#include "strbuf.h"
+#include "string-list.h"
int cmd__progress(int argc, const char **argv)
{
- int total = 0;
- const char *title;
+ const char *const default_title = "Working hard";
+ struct string_list titles = STRING_LIST_INIT_DUP;
struct strbuf line = STRBUF_INIT;
- struct progress *progress;
+ struct progress *progress = NULL;
const char *usage[] = {
- "test-tool progress [--total=<n>] <progress-title>",
+ "test-tool progress <stdin",
NULL
};
struct option options[] = {
- OPT_INTEGER(0, "total", &total, "total number of items"),
OPT_END(),
};
argc = parse_options(argc, argv, NULL, options, usage, 0);
- if (argc != 1)
- die("need a title for the progress output");
- title = argv[0];
+ if (argc)
+ usage_with_options(usage, options);
progress_testing = 1;
- progress = start_progress(title, total);
while (strbuf_getline(&line, stdin) != EOF) {
char *end;
- if (skip_prefix(line.buf, "progress ", (const char **) &end)) {
+ if (skip_prefix(line.buf, "start ", (const char **) &end)) {
+ uint64_t total = strtoull(end, &end, 10);
+ const char *title;
+
+ /*
+ * We can't use "end + 1" as an argument to
+ * start_progress(), it doesn't xstrdup() its
+ * "title" argument. We need to hold onto a
+ * valid "char *" for it until the end.
+ */
+ if (!*end)
+ title = default_title;
+ else if (*end == ' ')
+ title = string_list_insert(&titles, end + 1)->string;
+ else
+ die("invalid input: '%s'\n", line.buf);
+
+ progress = start_progress(title, total);
+ } else if (skip_prefix(line.buf, "progress ", (const char **) &end)) {
uint64_t item_count = strtoull(end, &end, 10);
if (*end != '\0')
die("invalid input: '%s'\n", line.buf);
@@ -63,12 +83,16 @@ int cmd__progress(int argc, const char **argv)
die("invalid input: '%s'\n", line.buf);
progress_test_ns = test_ms * 1000 * 1000;
display_throughput(progress, byte_count);
- } else if (!strcmp(line.buf, "update"))
+ } else if (!strcmp(line.buf, "update")) {
progress_test_force_update();
- else
+ } else if (!strcmp(line.buf, "stop")) {
+ stop_progress(&progress);
+ } else {
die("invalid input: '%s'\n", line.buf);
+ }
}
- stop_progress(&progress);
+ strbuf_release(&line);
+ string_list_clear(&titles, 0);
return 0;
}
diff --git a/t/helper/test-read-graph.c b/t/helper/test-read-graph.c
index 75927b2..98b73bb 100644
--- a/t/helper/test-read-graph.c
+++ b/t/helper/test-read-graph.c
@@ -3,6 +3,7 @@
#include "commit-graph.h"
#include "repository.h"
#include "object-store.h"
+#include "bloom.h"
int cmd__read_graph(int argc, const char **argv)
{
@@ -45,6 +46,18 @@ int cmd__read_graph(int argc, const char **argv)
printf(" bloom_data");
printf("\n");
+ printf("options:");
+ if (graph->bloom_filter_settings)
+ printf(" bloom(%"PRIu32",%"PRIu32",%"PRIu32")",
+ graph->bloom_filter_settings->hash_version,
+ graph->bloom_filter_settings->bits_per_entry,
+ graph->bloom_filter_settings->num_hashes);
+ if (graph->read_generation_data)
+ printf(" read_generation_data");
+ if (graph->topo_levels)
+ printf(" topo_levels");
+ printf("\n");
+
UNLEAK(graph);
return 0;
diff --git a/t/helper/test-ref-store.c b/t/helper/test-ref-store.c
index 3e4ddae..9646d85 100644
--- a/t/helper/test-ref-store.c
+++ b/t/helper/test-ref-store.c
@@ -180,10 +180,9 @@ static int cmd_resolve_ref(struct ref_store *refs, const char **argv)
int resolve_flags = arg_flags(*argv++, "resolve-flags", empty_flags);
int flags;
const char *ref;
- int ignore_errno;
ref = refs_resolve_ref_unsafe(refs, refname, resolve_flags,
- &oid, &flags, &ignore_errno);
+ &oid, &flags);
printf("%s %s 0x%x\n", oid_to_hex(&oid), ref ? ref : "(null)", flags);
return ref ? 0 : 1;
}
diff --git a/t/helper/test-reftable.c b/t/helper/test-reftable.c
index 26b03d7..1f0a28c 100644
--- a/t/helper/test-reftable.c
+++ b/t/helper/test-reftable.c
@@ -3,15 +3,16 @@
int cmd__reftable(int argc, const char **argv)
{
+ /* test from simple to complex. */
basics_test_main(argc, argv);
+ record_test_main(argc, argv);
block_test_main(argc, argv);
- merged_test_main(argc, argv);
+ tree_test_main(argc, argv);
pq_test_main(argc, argv);
- record_test_main(argc, argv);
- refname_test_main(argc, argv);
readwrite_test_main(argc, argv);
+ merged_test_main(argc, argv);
stack_test_main(argc, argv);
- tree_test_main(argc, argv);
+ refname_test_main(argc, argv);
return 0;
}
diff --git a/t/helper/test-run-command.c b/t/helper/test-run-command.c
index 913775a..f3b90aa 100644
--- a/t/helper/test-run-command.c
+++ b/t/helper/test-run-command.c
@@ -19,7 +19,6 @@
#include "thread-utils.h"
#include "wildmatch.h"
#include "gettext.h"
-#include "parse-options.h"
static int number_callbacks;
static int parallel_next(struct child_process *cp,
@@ -180,15 +179,16 @@ static int testsuite(int argc, const char **argv)
if (max_jobs > suite.tests.nr)
max_jobs = suite.tests.nr;
- fprintf(stderr, "Running %d tests (%d at a time)\n",
- suite.tests.nr, max_jobs);
+ fprintf(stderr, "Running %"PRIuMAX" tests (%d at a time)\n",
+ (uintmax_t)suite.tests.nr, max_jobs);
ret = run_processes_parallel(max_jobs, next_test, test_failed,
test_finished, &suite);
if (suite.failed.nr > 0) {
ret = 1;
- fprintf(stderr, "%d tests failed:\n\n", suite.failed.nr);
+ fprintf(stderr, "%"PRIuMAX" tests failed:\n\n",
+ (uintmax_t)suite.failed.nr);
for (i = 0; i < suite.failed.nr; i++)
fprintf(stderr, "\t%s\n", suite.failed.items[i].string);
}
@@ -221,9 +221,9 @@ static int quote_stress_test(int argc, const char **argv)
struct strbuf out = STRBUF_INIT;
struct strvec args = STRVEC_INIT;
struct option options[] = {
- OPT_INTEGER('n', "trials", &trials, "Number of trials"),
- OPT_INTEGER('s', "skip", &skip, "Skip <n> trials"),
- OPT_BOOL('m', "msys2", &msys2, "Test quoting for MSYS2's sh"),
+ OPT_INTEGER('n', "trials", &trials, "number of trials"),
+ OPT_INTEGER('s', "skip", &skip, "skip <n> trials"),
+ OPT_BOOL('m', "msys2", &msys2, "test quoting for MSYS2's sh"),
OPT_END()
};
const char * const usage[] = {
diff --git a/t/helper/test-tool.c b/t/helper/test-tool.c
index 338a57b..0424f7a 100644
--- a/t/helper/test-tool.c
+++ b/t/helper/test-tool.c
@@ -20,6 +20,7 @@ static struct test_cmd cmds[] = {
{ "chmtime", cmd__chmtime },
{ "config", cmd__config },
{ "crontab", cmd__crontab },
+ { "csprng", cmd__csprng },
{ "ctype", cmd__ctype },
{ "date", cmd__date },
{ "delta", cmd__delta },
@@ -31,6 +32,7 @@ static struct test_cmd cmds[] = {
{ "dump-untracked-cache", cmd__dump_untracked_cache },
{ "example-decorate", cmd__example_decorate },
{ "fast-rebase", cmd__fast_rebase },
+ { "fsmonitor-client", cmd__fsmonitor_client },
{ "genrandom", cmd__genrandom },
{ "genzeros", cmd__genzeros },
{ "getcwd", cmd__getcwd },
diff --git a/t/helper/test-tool.h b/t/helper/test-tool.h
index 48cee1f..c876e82 100644
--- a/t/helper/test-tool.h
+++ b/t/helper/test-tool.h
@@ -10,6 +10,7 @@ int cmd__bloom(int argc, const char **argv);
int cmd__chmtime(int argc, const char **argv);
int cmd__config(int argc, const char **argv);
int cmd__crontab(int argc, const char **argv);
+int cmd__csprng(int argc, const char **argv);
int cmd__ctype(int argc, const char **argv);
int cmd__date(int argc, const char **argv);
int cmd__delta(int argc, const char **argv);
@@ -22,6 +23,7 @@ int cmd__dump_untracked_cache(int argc, const char **argv);
int cmd__dump_reftable(int argc, const char **argv);
int cmd__example_decorate(int argc, const char **argv);
int cmd__fast_rebase(int argc, const char **argv);
+int cmd__fsmonitor_client(int argc, const char **argv);
int cmd__genrandom(int argc, const char **argv);
int cmd__genzeros(int argc, const char **argv);
int cmd__getcwd(int argc, const char **argv);
diff --git a/t/interop/Makefile b/t/interop/Makefile
index 31a4bbc..6911c29 100644
--- a/t/interop/Makefile
+++ b/t/interop/Makefile
@@ -1,3 +1,6 @@
+# Import tree-wide shared Makefile behavior and libraries
+include ../../shared.mak
+
-include ../../config.mak
export GIT_TEST_OPTIONS
diff --git a/t/lib-bitmap.sh b/t/lib-bitmap.sh
index 21d0392..a95537e 100644
--- a/t/lib-bitmap.sh
+++ b/t/lib-bitmap.sh
@@ -1,6 +1,9 @@
# Helpers for scripts testing bitmap functionality; see t5310 for
# example usage.
+objdir=.git/objects
+midx=$objdir/pack/multi-pack-index
+
# Compare a file containing rev-list bitmap traversal output to its non-bitmap
# counterpart. You can't just use test_cmp for this, because the two produce
# subtly different output:
@@ -264,3 +267,185 @@ have_delta () {
midx_checksum () {
test-tool read-midx --checksum "$1"
}
+
+# midx_pack_source <obj>
+midx_pack_source () {
+ test-tool read-midx --show-objects .git/objects | grep "^$1 " | cut -f2
+}
+
+test_rev_exists () {
+ commit="$1"
+ kind="$2"
+
+ test_expect_success "reverse index exists ($kind)" '
+ GIT_TRACE2_EVENT=$(pwd)/event.trace \
+ git rev-list --test-bitmap "$commit" &&
+
+ if test "rev" = "$kind"
+ then
+ test_path_is_file $midx-$(midx_checksum $objdir).rev
+ fi &&
+ grep "\"category\":\"load_midx_revindex\",\"key\":\"source\",\"value\":\"$kind\"" event.trace
+ '
+}
+
+midx_bitmap_core () {
+ rev_kind="${1:-midx}"
+
+ setup_bitmap_history
+
+ test_expect_success 'create single-pack midx with bitmaps' '
+ git repack -ad &&
+ git multi-pack-index write --bitmap &&
+ test_path_is_file $midx &&
+ test_path_is_file $midx-$(midx_checksum $objdir).bitmap
+ '
+
+ test_rev_exists HEAD "$rev_kind"
+
+ basic_bitmap_tests
+
+ test_expect_success 'create new additional packs' '
+ for i in $(test_seq 1 16)
+ do
+ test_commit "$i" &&
+ git repack -d || return 1
+ done &&
+
+ git checkout -b other2 HEAD~8 &&
+ for i in $(test_seq 1 8)
+ do
+ test_commit "side-$i" &&
+ git repack -d || return 1
+ done &&
+ git checkout second
+ '
+
+ test_expect_success 'create multi-pack midx with bitmaps' '
+ git multi-pack-index write --bitmap &&
+
+ ls $objdir/pack/pack-*.pack >packs &&
+ test_line_count = 25 packs &&
+
+ test_path_is_file $midx &&
+ test_path_is_file $midx-$(midx_checksum $objdir).bitmap
+ '
+
+ test_rev_exists HEAD "$rev_kind"
+
+ basic_bitmap_tests
+
+ test_expect_success '--no-bitmap is respected when bitmaps exist' '
+ git multi-pack-index write --bitmap &&
+
+ test_commit respect--no-bitmap &&
+ git repack -d &&
+
+ test_path_is_file $midx &&
+ test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
+
+ git multi-pack-index write --no-bitmap &&
+
+ test_path_is_file $midx &&
+ test_path_is_missing $midx-$(midx_checksum $objdir).bitmap &&
+ test_path_is_missing $midx-$(midx_checksum $objdir).rev
+ '
+
+ test_expect_success 'setup midx with base from later pack' '
+ # Write a and b so that "a" is a delta on top of base "b", since Git
+ # prefers to delete contents out of a base rather than add to a shorter
+ # object.
+ test_seq 1 128 >a &&
+ test_seq 1 130 >b &&
+
+ git add a b &&
+ git commit -m "initial commit" &&
+
+ a=$(git rev-parse HEAD:a) &&
+ b=$(git rev-parse HEAD:b) &&
+
+ # In the first pack, "a" is stored as a delta to "b".
+ p1=$(git pack-objects .git/objects/pack/pack <<-EOF
+ $a
+ $b
+ EOF
+ ) &&
+
+ # In the second pack, "a" is missing, and "b" is not a delta nor base to
+ # any other object.
+ p2=$(git pack-objects .git/objects/pack/pack <<-EOF
+ $b
+ $(git rev-parse HEAD)
+ $(git rev-parse HEAD^{tree})
+ EOF
+ ) &&
+
+ git prune-packed &&
+ # Use the second pack as the preferred source, so that "b" occurs
+ # earlier in the MIDX object order, rendering "a" unusable for pack
+ # reuse.
+ git multi-pack-index write --bitmap --preferred-pack=pack-$p2.idx &&
+
+ have_delta $a $b &&
+ test $(midx_pack_source $a) != $(midx_pack_source $b)
+ '
+
+ rev_list_tests 'full bitmap with backwards delta'
+
+ test_expect_success 'clone with bitmaps enabled' '
+ git clone --no-local --bare . clone-reverse-delta.git &&
+ test_when_finished "rm -fr clone-reverse-delta.git" &&
+
+ git rev-parse HEAD >expect &&
+ git --git-dir=clone-reverse-delta.git rev-parse HEAD >actual &&
+ test_cmp expect actual
+ '
+
+ test_expect_success 'changing the preferred pack does not corrupt bitmaps' '
+ rm -fr repo &&
+ git init repo &&
+ test_when_finished "rm -fr repo" &&
+ (
+ cd repo &&
+
+ test_commit A &&
+ test_commit B &&
+
+ git rev-list --objects --no-object-names HEAD^ >A.objects &&
+ git rev-list --objects --no-object-names HEAD^.. >B.objects &&
+
+ A=$(git pack-objects $objdir/pack/pack <A.objects) &&
+ B=$(git pack-objects $objdir/pack/pack <B.objects) &&
+
+ cat >indexes <<-EOF &&
+ pack-$A.idx
+ pack-$B.idx
+ EOF
+
+ git multi-pack-index write --bitmap --stdin-packs \
+ --preferred-pack=pack-$A.pack <indexes &&
+ git rev-list --test-bitmap A &&
+
+ git multi-pack-index write --bitmap --stdin-packs \
+ --preferred-pack=pack-$B.pack <indexes &&
+ git rev-list --test-bitmap A
+ )
+ '
+}
+
+midx_bitmap_partial_tests () {
+ rev_kind="${1:-midx}"
+
+ test_expect_success 'setup partial bitmaps' '
+ test_commit packed &&
+ git repack &&
+ test_commit loose &&
+ git multi-pack-index write --bitmap 2>err &&
+ test_path_is_file $midx &&
+ test_path_is_file $midx-$(midx_checksum $objdir).bitmap
+ '
+
+ test_rev_exists HEAD~ "$rev_kind"
+
+ basic_bitmap_tests HEAD~
+}
diff --git a/t/lib-commit-graph.sh b/t/lib-commit-graph.sh
new file mode 100755
index 0000000..5d79e1a
--- /dev/null
+++ b/t/lib-commit-graph.sh
@@ -0,0 +1,58 @@
+#!/bin/sh
+
+# Helper functions for testing commit-graphs.
+
+# Initialize OID cache with oid_version
+test_oid_cache <<-EOF
+oid_version sha1:1
+oid_version sha256:2
+EOF
+
+graph_git_two_modes() {
+ git -c core.commitGraph=true $1 >output &&
+ git -c core.commitGraph=false $1 >expect &&
+ test_cmp expect output
+}
+
+graph_git_behavior() {
+ MSG=$1
+ DIR=$2
+ BRANCH=$3
+ COMPARE=$4
+ test_expect_success "check normal git operations: $MSG" '
+ cd "$TRASH_DIRECTORY/$DIR" &&
+ graph_git_two_modes "log --oneline $BRANCH" &&
+ graph_git_two_modes "log --topo-order $BRANCH" &&
+ graph_git_two_modes "log --graph $COMPARE..$BRANCH" &&
+ graph_git_two_modes "branch -vv" &&
+ graph_git_two_modes "merge-base -a $BRANCH $COMPARE"
+ '
+}
+
+graph_read_expect() {
+ OPTIONAL=""
+ NUM_CHUNKS=3
+ if test -n "$2"
+ then
+ OPTIONAL=" $2"
+ NUM_CHUNKS=$((3 + $(echo "$2" | wc -w)))
+ fi
+ GENERATION_VERSION=2
+ if test -n "$3"
+ then
+ GENERATION_VERSION=$3
+ fi
+ OPTIONS=
+ if test $GENERATION_VERSION -gt 1
+ then
+ OPTIONS=" read_generation_data"
+ fi
+ cat >expect <<- EOF
+ header: 43475048 1 $(test_oid oid_version) $NUM_CHUNKS 0
+ num_commits: $1
+ chunks: oid_fanout oid_lookup commit_metadata$OPTIONAL
+ options:$OPTIONS
+ EOF
+ test-tool read-graph >output &&
+ test_cmp expect output
+}
diff --git a/t/lib-gpg.sh b/t/lib-gpg.sh
index 3e7ee13..1147855 100644
--- a/t/lib-gpg.sh
+++ b/t/lib-gpg.sh
@@ -40,7 +40,7 @@ test_lazy_prereq GPG '
# > lib-gpg/ownertrust
mkdir "$GNUPGHOME" &&
chmod 0700 "$GNUPGHOME" &&
- (gpgconf --kill gpg-agent || : ) &&
+ (gpgconf --kill all || : ) &&
gpg --homedir "${GNUPGHOME}" --import \
"$TEST_DIRECTORY"/lib-gpg/keyring.gpg &&
gpg --homedir "${GNUPGHOME}" --import-ownertrust \
@@ -72,12 +72,11 @@ test_lazy_prereq GPGSM '
--passphrase-fd 0 --pinentry-mode loopback \
--import "$TEST_DIRECTORY"/lib-gpg/gpgsm_cert.p12 &&
- gpgsm --homedir "${GNUPGHOME}" -K |
- grep fingerprint: |
- cut -d" " -f4 |
- tr -d "\\n" >"${GNUPGHOME}/trustlist.txt" &&
+ gpgsm --homedir "${GNUPGHOME}" -K --with-colons |
+ awk -F ":" "/^fpr:/ {printf \"%s S relax\\n\", \$10}" \
+ >"${GNUPGHOME}/trustlist.txt" &&
+ (gpgconf --reload all || : ) &&
- echo " S relax" >>"${GNUPGHOME}/trustlist.txt" &&
echo hello | gpgsm --homedir "${GNUPGHOME}" >/dev/null \
-u committer@example.com -o /dev/null --sign -
'
diff --git a/t/lib-read-tree-m-3way.sh b/t/lib-read-tree-m-3way.sh
index 168329a..2da25b3 100644
--- a/t/lib-read-tree-m-3way.sh
+++ b/t/lib-read-tree-m-3way.sh
@@ -3,21 +3,21 @@
mkdir Z
for a in N D M
do
- for b in N D M
- do
- p=$a$b
+ for b in N D M
+ do
+ p=$a$b
echo This is $p from the original tree. >$p
echo This is Z/$p from the original tree. >Z/$p
- test_expect_success \
- "adding test file $p and Z/$p" \
- 'git update-index --add $p &&
- git update-index --add Z/$p'
+ test_expect_success "adding test file $p and Z/$p" '
+ git update-index --add $p &&
+ git update-index --add Z/$p
+ '
done
done
echo This is SS from the original tree. >SS
-test_expect_success \
- 'adding test file SS' \
- 'git update-index --add SS'
+test_expect_success 'adding test file SS' '
+ git update-index --add SS
+'
cat >TT <<\EOF
This is a trivial merge sample text.
Branch A is expected to upcase this word, here.
@@ -30,12 +30,12 @@ At the very end, here comes another line, that is
the word, expected to be upcased by Branch B.
This concludes the trivial merge sample file.
EOF
-test_expect_success \
- 'adding test file TT' \
- 'git update-index --add TT'
-test_expect_success \
- 'prepare initial tree' \
- 'tree_O=$(git write-tree)'
+test_expect_success 'adding test file TT' '
+ git update-index --add TT
+'
+test_expect_success 'prepare initial tree' '
+ tree_O=$(git write-tree)
+'
################################################################
# Branch A and B makes the changes according to the above matrix.
@@ -45,48 +45,48 @@ test_expect_success \
to_remove=$(echo D? Z/D?)
rm -f $to_remove
-test_expect_success \
- 'change in branch A (removal)' \
- 'git update-index --remove $to_remove'
+test_expect_success 'change in branch A (removal)' '
+ git update-index --remove $to_remove
+'
for p in M? Z/M?
do
- echo This is modified $p in the branch A. >$p
- test_expect_success \
- 'change in branch A (modification)' \
- "git update-index $p"
+ echo This is modified $p in the branch A. >$p
+ test_expect_success 'change in branch A (modification)' '
+ git update-index $p
+ '
done
for p in AN AA Z/AN Z/AA
do
- echo This is added $p in the branch A. >$p
- test_expect_success \
- 'change in branch A (addition)' \
- "git update-index --add $p"
+ echo This is added $p in the branch A. >$p
+ test_expect_success 'change in branch A (addition)' '
+ git update-index --add $p
+ '
done
echo This is SS from the modified tree. >SS
echo This is LL from the modified tree. >LL
-test_expect_success \
- 'change in branch A (addition)' \
- 'git update-index --add LL &&
- git update-index SS'
+test_expect_success 'change in branch A (addition)' '
+ git update-index --add LL &&
+ git update-index SS
+'
mv TT TT-
sed -e '/Branch A/s/word/WORD/g' <TT- >TT
rm -f TT-
-test_expect_success \
- 'change in branch A (edit)' \
- 'git update-index TT'
+test_expect_success 'change in branch A (edit)' '
+ git update-index TT
+'
mkdir DF
echo Branch A makes a file at DF/DF, creating a directory DF. >DF/DF
-test_expect_success \
- 'change in branch A (change file to directory)' \
- 'git update-index --add DF/DF'
+test_expect_success 'change in branch A (change file to directory)' '
+ git update-index --add DF/DF
+'
-test_expect_success \
- 'recording branch A tree' \
- 'tree_A=$(git write-tree)'
+test_expect_success 'recording branch A tree' '
+ tree_A=$(git write-tree)
+'
################################################################
# Branch B
@@ -94,65 +94,65 @@ test_expect_success \
rm -rf [NDMASLT][NDMASLT] Z DF
mkdir Z
-test_expect_success \
- 'reading original tree and checking out' \
- 'git read-tree $tree_O &&
- git checkout-index -a'
+test_expect_success 'reading original tree and checking out' '
+ git read-tree $tree_O &&
+ git checkout-index -a
+'
to_remove=$(echo ?D Z/?D)
rm -f $to_remove
-test_expect_success \
- 'change in branch B (removal)' \
- "git update-index --remove $to_remove"
+test_expect_success 'change in branch B (removal)' '
+ git update-index --remove $to_remove
+'
for p in ?M Z/?M
do
- echo This is modified $p in the branch B. >$p
- test_expect_success \
- 'change in branch B (modification)' \
- "git update-index $p"
+ echo This is modified $p in the branch B. >$p
+ test_expect_success 'change in branch B (modification)' '
+ git update-index $p
+ '
done
for p in NA AA Z/NA Z/AA
do
- echo This is added $p in the branch B. >$p
- test_expect_success \
- 'change in branch B (addition)' \
- "git update-index --add $p"
+ echo This is added $p in the branch B. >$p
+ test_expect_success 'change in branch B (addition)' '
+ git update-index --add $p
+ '
done
echo This is SS from the modified tree. >SS
echo This is LL from the modified tree. >LL
-test_expect_success \
- 'change in branch B (addition and modification)' \
- 'git update-index --add LL &&
- git update-index SS'
+test_expect_success 'change in branch B (addition and modification)' '
+ git update-index --add LL &&
+ git update-index SS
+'
mv TT TT-
sed -e '/Branch B/s/word/WORD/g' <TT- >TT
rm -f TT-
-test_expect_success \
- 'change in branch B (modification)' \
- 'git update-index TT'
+test_expect_success 'change in branch B (modification)' '
+ git update-index TT
+'
echo Branch B makes a file at DF. >DF
-test_expect_success \
- 'change in branch B (addition of a file to conflict with directory)' \
- 'git update-index --add DF'
-
-test_expect_success \
- 'recording branch B tree' \
- 'tree_B=$(git write-tree)'
-
-test_expect_success \
- 'keep contents of 3 trees for easy access' \
- 'rm -f .git/index &&
- git read-tree $tree_O &&
- mkdir .orig-O &&
- git checkout-index --prefix=.orig-O/ -f -q -a &&
- rm -f .git/index &&
- git read-tree $tree_A &&
- mkdir .orig-A &&
- git checkout-index --prefix=.orig-A/ -f -q -a &&
- rm -f .git/index &&
- git read-tree $tree_B &&
- mkdir .orig-B &&
- git checkout-index --prefix=.orig-B/ -f -q -a'
+test_expect_success 'change in branch B (addition of a file to conflict with directory)' '
+ git update-index --add DF
+'
+
+test_expect_success 'recording branch B tree' '
+ tree_B=$(git write-tree)
+'
+
+test_expect_success 'keep contents of 3 trees for easy access' '
+ rm -f .git/index &&
+ git read-tree $tree_O &&
+ mkdir .orig-O &&
+ git checkout-index --prefix=.orig-O/ -f -q -a &&
+ rm -f .git/index &&
+ git read-tree $tree_A &&
+ mkdir .orig-A &&
+ git checkout-index --prefix=.orig-A/ -f -q -a &&
+ rm -f .git/index &&
+ git read-tree $tree_B &&
+ mkdir .orig-B &&
+ git checkout-index --prefix=.orig-B/ -f -q -a
+'
diff --git a/t/perf/Makefile b/t/perf/Makefile
index 2465770..e4808ae 100644
--- a/t/perf/Makefile
+++ b/t/perf/Makefile
@@ -1,3 +1,6 @@
+# Import tree-wide shared Makefile behavior and libraries
+include ../../shared.mak
+
-include ../../config.mak
export GIT_TEST_OPTIONS
diff --git a/t/perf/p1006-cat-file.sh b/t/perf/p1006-cat-file.sh
new file mode 100755
index 0000000..dcd8015
--- /dev/null
+++ b/t/perf/p1006-cat-file.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+test_description='Tests listing object info performance'
+. ./perf-lib.sh
+
+test_perf_large_repo
+
+test_perf 'cat-file --batch-check' '
+ git cat-file --batch-all-objects --batch-check
+'
+
+test_done
diff --git a/t/perf/p2000-sparse-operations.sh b/t/perf/p2000-sparse-operations.sh
index cb777c7..382716c 100755
--- a/t/perf/p2000-sparse-operations.sh
+++ b/t/perf/p2000-sparse-operations.sh
@@ -117,5 +117,8 @@ test_perf_on_all git diff
test_perf_on_all git diff --cached
test_perf_on_all git blame $SPARSE_CONE/a
test_perf_on_all git blame $SPARSE_CONE/f3/a
+test_perf_on_all git read-tree -mu HEAD
+test_perf_on_all git checkout-index -f --all
+test_perf_on_all git update-index --add --remove $SPARSE_CONE/a
test_done
diff --git a/t/perf/p7519-fsmonitor.sh b/t/perf/p7519-fsmonitor.sh
index c8be58f..0b9129c 100755
--- a/t/perf/p7519-fsmonitor.sh
+++ b/t/perf/p7519-fsmonitor.sh
@@ -72,7 +72,7 @@ then
fi
fi
-trace_start() {
+trace_start () {
if test -n "$GIT_PERF_7519_TRACE"
then
name="$1"
@@ -91,13 +91,20 @@ trace_start() {
fi
}
-trace_stop() {
+trace_stop () {
if test -n "$GIT_PERF_7519_TRACE"
then
unset GIT_TRACE2_PERF
fi
}
+touch_files () {
+ n=$1 &&
+ d="$n"_files &&
+
+ (cd $d && test_seq 1 $n | xargs touch )
+}
+
test_expect_success "one time repo setup" '
# set untrackedCache depending on the environment
if test -n "$GIT_PERF_7519_UNTRACKED_CACHE"
@@ -119,10 +126,11 @@ test_expect_success "one time repo setup" '
fi &&
mkdir 1_file 10_files 100_files 1000_files 10000_files &&
- for i in $(test_seq 1 10); do touch 10_files/$i || return 1; done &&
- for i in $(test_seq 1 100); do touch 100_files/$i || return 1; done &&
- for i in $(test_seq 1 1000); do touch 1000_files/$i || return 1; done &&
- for i in $(test_seq 1 10000); do touch 10000_files/$i || return 1; done &&
+ : 1_file directory should be left empty &&
+ touch_files 10 &&
+ touch_files 100 &&
+ touch_files 1000 &&
+ touch_files 10000 &&
git add 1_file 10_files 100_files 1000_files 10000_files &&
git commit -qm "Add files" &&
@@ -133,7 +141,7 @@ test_expect_success "one time repo setup" '
fi
'
-setup_for_fsmonitor() {
+setup_for_fsmonitor_hook () {
# set INTEGRATION_SCRIPT depending on the environment
if test -n "$INTEGRATION_PATH"
then
@@ -173,8 +181,12 @@ test_perf_w_drop_caches () {
test_perf "$@"
}
-test_fsmonitor_suite() {
- if test -n "$INTEGRATION_SCRIPT"; then
+test_fsmonitor_suite () {
+ if test -n "$USE_FSMONITOR_DAEMON"
+ then
+ DESC="builtin fsmonitor--daemon"
+ elif test -n "$INTEGRATION_SCRIPT"
+ then
DESC="fsmonitor=$(basename $INTEGRATION_SCRIPT)"
else
DESC="fsmonitor=disabled"
@@ -199,15 +211,15 @@ test_fsmonitor_suite() {
# Update the mtimes on upto 100k files to make status think
# that they are dirty. For simplicity, omit any files with
- # LFs (i.e. anything that ls-files thinks it needs to dquote).
- # Then fully backslash-quote the paths to capture any
- # whitespace so that they pass thru xargs properly.
+ # LFs (i.e. anything that ls-files thinks it needs to dquote)
+ # and any files with whitespace so that they pass thru xargs
+ # properly.
#
test_perf_w_drop_caches "status (dirty) ($DESC)" '
git ls-files | \
head -100000 | \
grep -v \" | \
- sed '\''s/\(.\)/\\\1/g'\'' | \
+ grep -v " ." | \
xargs test-tool chmtime -300 &&
git status
'
@@ -253,11 +265,11 @@ test_fsmonitor_suite() {
trace_start fsmonitor-watchman
if test -n "$GIT_PERF_7519_FSMONITOR"; then
for INTEGRATION_PATH in $GIT_PERF_7519_FSMONITOR; do
- test_expect_success "setup for fsmonitor $INTEGRATION_PATH" 'setup_for_fsmonitor'
+ test_expect_success "setup for fsmonitor $INTEGRATION_PATH" 'setup_for_fsmonitor_hook'
test_fsmonitor_suite
done
else
- test_expect_success "setup for fsmonitor" 'setup_for_fsmonitor'
+ test_expect_success "setup for fsmonitor hook" 'setup_for_fsmonitor_hook'
test_fsmonitor_suite
fi
@@ -285,4 +297,30 @@ test_expect_success "setup without fsmonitor" '
test_fsmonitor_suite
trace_stop
+#
+# Run a full set of perf tests using the built-in fsmonitor--daemon.
+# It does not use the Hook API, so it has a different setup.
+# Explicitly start the daemon here and before we start client commands
+# so that we can later add custom tracing.
+#
+if test_have_prereq FSMONITOR_DAEMON
+then
+ USE_FSMONITOR_DAEMON=t
+
+ test_expect_success "setup for builtin fsmonitor" '
+ trace_start fsmonitor--daemon--server &&
+ git fsmonitor--daemon start &&
+
+ trace_start fsmonitor--daemon--client &&
+
+ git config core.fsmonitor true &&
+ git update-index --fsmonitor
+ '
+
+ test_fsmonitor_suite
+
+ git fsmonitor--daemon stop
+ trace_stop
+fi
+
test_done
diff --git a/t/perf/perf-lib.sh b/t/perf/perf-lib.sh
index 407252b..932105c 100644
--- a/t/perf/perf-lib.sh
+++ b/t/perf/perf-lib.sh
@@ -78,7 +78,7 @@ test_perf_copy_repo_contents () {
for stuff in "$1"/*
do
case "$stuff" in
- */objects|*/hooks|*/config|*/commondir|*/gitdir|*/worktrees)
+ */objects|*/hooks|*/config|*/commondir|*/gitdir|*/worktrees|*/fsmonitor--daemon*)
;;
*)
cp -R "$stuff" "$repo/.git/" || exit 1
diff --git a/t/t0000-basic.sh b/t/t0000-basic.sh
index b007f0e..17a268c 100755
--- a/t/t0000-basic.sh
+++ b/t/t0000-basic.sh
@@ -101,6 +101,19 @@ test_expect_success 'subtest: 2/3 tests passing' '
EOF
'
+test_expect_success 'subtest: --immediate' '
+ run_sub_test_lib_test_err partial-pass \
+ --immediate &&
+ check_sub_test_lib_test_err partial-pass \
+ <<-\EOF_OUT 3<<-EOF_ERR
+ > ok 1 - passing test #1
+ > not ok 2 - failing test #2
+ > # false
+ > 1..2
+ EOF_OUT
+ EOF_ERR
+'
+
test_expect_success 'subtest: a failing TODO test' '
write_and_run_sub_test_lib_test failing-todo <<-\EOF &&
test_expect_success "passing test" "true"
@@ -1089,7 +1102,8 @@ test_expect_success 'update-index D/F conflict' '
mv path2 path0 &&
mv tmp path2 &&
git update-index --add --replace path2 path0/file2 &&
- numpath0=$(git ls-files path0 | wc -l) &&
+ git ls-files path0 >tmp &&
+ numpath0=$(wc -l <tmp) &&
test $numpath0 = 1
'
@@ -1103,13 +1117,14 @@ test_expect_success 'very long name in the index handled sanely' '
>path4 &&
git update-index --add path4 &&
+ git ls-files -s path4 >tmp &&
(
- git ls-files -s path4 |
- sed -e "s/ .*/ /" |
+ sed -e "s/ .*/ /" tmp |
tr -d "\012" &&
echo "$a"
) | git update-index --index-info &&
- len=$(git ls-files "a*" | wc -c) &&
+ git ls-files "a*" >tmp &&
+ len=$(wc -c <tmp) &&
test $len = 4098
'
diff --git a/t/t0001-init.sh b/t/t0001-init.sh
index 3235ab4..d479303 100755
--- a/t/t0001-init.sh
+++ b/t/t0001-init.sh
@@ -6,7 +6,8 @@ TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
check_config () {
- if test -d "$1" && test -f "$1/config" && test -d "$1/refs"
+ if test_path_is_dir "$1" &&
+ test_path_is_file "$1/config" && test_path_is_dir "$1/refs"
then
: happy
else
diff --git a/t/t0002-gitfile.sh b/t/t0002-gitfile.sh
index 76052cb..f6356db 100755
--- a/t/t0002-gitfile.sh
+++ b/t/t0002-gitfile.sh
@@ -65,9 +65,11 @@ test_expect_success 'check commit-tree' '
test_path_is_file "$REAL/objects/$(objpath $SHA)"
'
-test_expect_success 'check rev-list' '
+test_expect_success !SANITIZE_LEAK 'check rev-list' '
git update-ref "HEAD" "$SHA" &&
- test "$SHA" = "$(git rev-list HEAD)"
+ git rev-list HEAD >actual &&
+ echo $SHA >expected &&
+ test_cmp expected actual
'
test_expect_success 'setup_git_dir twice in subdir' '
diff --git a/t/t0003-attributes.sh b/t/t0003-attributes.sh
index b9ed612..143f100 100755
--- a/t/t0003-attributes.sh
+++ b/t/t0003-attributes.sh
@@ -205,15 +205,18 @@ test_expect_success 'attribute test: read paths from stdin' '
test_expect_success 'attribute test: --all option' '
grep -v unspecified <expect-all | sort >specified-all &&
sed -e "s/:.*//" <expect-all | uniq >stdin-all &&
- git check-attr --stdin --all <stdin-all | sort >actual &&
+ git check-attr --stdin --all <stdin-all >tmp &&
+ sort tmp >actual &&
test_cmp specified-all actual
'
test_expect_success 'attribute test: --cached option' '
- git check-attr --cached --stdin --all <stdin-all | sort >actual &&
+ git check-attr --cached --stdin --all <stdin-all >tmp &&
+ sort tmp >actual &&
test_must_be_empty actual &&
git add .gitattributes a/.gitattributes a/b/.gitattributes &&
- git check-attr --cached --stdin --all <stdin-all | sort >actual &&
+ git check-attr --cached --stdin --all <stdin-all >tmp &&
+ sort tmp >actual &&
test_cmp specified-all actual
'
diff --git a/t/t0006-date.sh b/t/t0006-date.sh
index 7941869..2490162 100755
--- a/t/t0006-date.sh
+++ b/t/t0006-date.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='test date parsing and printing'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
# arbitrary reference time: 2009-08-30 19:20:00
diff --git a/t/t0012-help.sh b/t/t0012-help.sh
index 91b68c7..6c3e1f7 100755
--- a/t/t0012-help.sh
+++ b/t/t0012-help.sh
@@ -35,6 +35,9 @@ test_expect_success 'basic help commands' '
'
test_expect_success 'invalid usage' '
+ test_expect_code 129 git help -a add &&
+ test_expect_code 129 git help --all add &&
+
test_expect_code 129 git help -g add &&
test_expect_code 129 git help -a -c &&
@@ -46,6 +49,29 @@ test_expect_success 'invalid usage' '
test_expect_code 129 git help --config-sections-for-completion add
'
+for opt in '-a' '-g' '-c' '--config-for-completion' '--config-sections-for-completion'
+do
+ test_expect_success "invalid usage of '$opt' with [-i|-m|-w]" '
+ git help $opt &&
+ test_expect_code 129 git help $opt -i &&
+ test_expect_code 129 git help $opt -m &&
+ test_expect_code 129 git help $opt -w
+ '
+
+ if test "$opt" = "-a"
+ then
+ continue
+ fi
+
+ test_expect_success "invalid usage of '$opt' with --no-external-commands" '
+ test_expect_code 129 git help $opt --no-external-commands
+ '
+
+ test_expect_success "invalid usage of '$opt' with --no-aliases" '
+ test_expect_code 129 git help $opt --no-external-commands
+ '
+done
+
test_expect_success "works for commands and guides by default" '
configure_help &&
git help status &&
@@ -138,14 +164,87 @@ test_expect_success 'git help --config-sections-for-completion' '
test_cmp human.munged sections
'
+test_section_spacing () {
+ cat >expect &&
+ "$@" >out &&
+ grep -E "(^[^ ]|^$)" out >actual
+}
+
+test_section_spacing_trailer () {
+ test_section_spacing "$@" &&
+ test_expect_code 1 git >out &&
+ sed -n '/list available subcommands/,$p' <out >>expect
+}
+
+
+for cmd in git "git help"
+do
+ test_expect_success "'$cmd' section spacing" '
+ test_section_spacing_trailer git help <<-\EOF &&
+ usage: git [--version] [--help] [-C <path>] [-c <name>=<value>]
+
+ These are common Git commands used in various situations:
+
+ start a working area (see also: git help tutorial)
+
+ work on the current change (see also: git help everyday)
+
+ examine the history and state (see also: git help revisions)
+
+ grow, mark and tweak your common history
+
+ collaborate (see also: git help workflows)
+
+ EOF
+ test_cmp expect actual
+ '
+done
+
+test_expect_success "'git help -a' section spacing" '
+ test_section_spacing \
+ git help -a --no-external-commands --no-aliases <<-\EOF &&
+ See '\''git help <command>'\'' to read about a specific subcommand
+
+ Main Porcelain Commands
+
+ Ancillary Commands / Manipulators
+
+ Ancillary Commands / Interrogators
+
+ Interacting with Others
+
+ Low-level Commands / Manipulators
+
+ Low-level Commands / Interrogators
+
+ Low-level Commands / Syncing Repositories
+
+ Low-level Commands / Internal Helpers
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success "'git help -g' section spacing" '
+ test_section_spacing_trailer git help -g <<-\EOF &&
+ The Git concept guides are:
+
+ EOF
+ test_cmp expect actual
+'
+
test_expect_success 'generate builtin list' '
+ mkdir -p sub &&
git --list-cmds=builtins >builtins
'
while read builtin
do
test_expect_success "$builtin can handle -h" '
- test_expect_code 129 git $builtin -h >output 2>&1 &&
+ (
+ GIT_CEILING_DIRECTORIES=$(pwd) &&
+ export GIT_CEILING_DIRECTORIES &&
+ test_expect_code 129 git -C sub $builtin -h >output 2>&1
+ ) &&
test_i18ngrep usage output
'
done <builtins
diff --git a/t/t0015-hash.sh b/t/t0015-hash.sh
index 291e906..086822f 100755
--- a/t/t0015-hash.sh
+++ b/t/t0015-hash.sh
@@ -15,7 +15,7 @@ test_expect_success 'test basic SHA-1 hash values' '
grep c12252ceda8be8994d5fa0290a47231c1d16aae3 actual &&
printf "abcdefghijklmnopqrstuvwxyz" | test-tool sha1 >actual &&
grep 32d10c7b8cf96570ca04ce37f2a19d84240d3a89 actual &&
- perl -e "$| = 1; print q{aaaaaaaaaa} for 1..100000;" | \
+ perl -e "$| = 1; print q{aaaaaaaaaa} for 1..100000;" |
test-tool sha1 >actual &&
grep 34aa973cd4c4daa4f61eeb2bdbad27316534016f actual &&
printf "blob 0\0" | test-tool sha1 >actual &&
@@ -38,10 +38,10 @@ test_expect_success 'test basic SHA-256 hash values' '
printf "abcdefghijklmnopqrstuvwxyz" | test-tool sha256 >actual &&
grep 71c480df93d6ae2f1efad1447c66c9525e316218cf51fc8d9ed832f2daf18b73 actual &&
# Try to exercise the chunking code by turning autoflush on.
- perl -e "$| = 1; print q{aaaaaaaaaa} for 1..100000;" | \
+ perl -e "$| = 1; print q{aaaaaaaaaa} for 1..100000;" |
test-tool sha256 >actual &&
grep cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0 actual &&
- perl -e "$| = 1; print q{abcdefghijklmnopqrstuvwxyz} for 1..100000;" | \
+ perl -e "$| = 1; print q{abcdefghijklmnopqrstuvwxyz} for 1..100000;" |
test-tool sha256 >actual &&
grep e406ba321ca712ad35a698bf0af8d61fc4dc40eca6bdcea4697962724ccbde35 actual &&
printf "blob 0\0" | test-tool sha256 >actual &&
diff --git a/t/t0022-crlf-rename.sh b/t/t0022-crlf-rename.sh
index c1a331e..9fe9891 100755
--- a/t/t0022-crlf-rename.sh
+++ b/t/t0022-crlf-rename.sh
@@ -24,8 +24,8 @@ test_expect_success setup '
test_expect_success 'diff -M' '
- git diff-tree -M -r --name-status HEAD^ HEAD |
- sed -e "s/R[0-9]*/RNUM/" >actual &&
+ git diff-tree -M -r --name-status HEAD^ HEAD >tmp &&
+ sed -e "s/R[0-9]*/RNUM/" tmp >actual &&
echo "RNUM sample elpmas" >expect &&
test_cmp expect actual
diff --git a/t/t0025-crlf-renormalize.sh b/t/t0025-crlf-renormalize.sh
index 8144797..f7202c1 100755
--- a/t/t0025-crlf-renormalize.sh
+++ b/t/t0025-crlf-renormalize.sh
@@ -22,8 +22,8 @@ test_expect_success 'renormalize CRLF in repo' '
i/lf w/lf attr/text=auto LF.txt
i/lf w/mixed attr/text=auto CRLF_mix_LF.txt
EOF
- git ls-files --eol |
- sed -e "s/ / /g" -e "s/ */ /g" |
+ git ls-files --eol >tmp &&
+ sed -e "s/ / /g" -e "s/ */ /g" tmp |
sort >actual &&
test_cmp expect actual
'
diff --git a/t/t0027-auto-crlf.sh b/t/t0027-auto-crlf.sh
index 4a5c5c6..0feb41a 100755
--- a/t/t0027-auto-crlf.sh
+++ b/t/t0027-auto-crlf.sh
@@ -311,8 +311,8 @@ checkout_files () {
i/-text w/$(stats_ascii $crlfnul) attr/$(attr_ascii $attr $aeol) crlf_false_attr__CRLF_nul.txt
i/-text w/$(stats_ascii $crlfnul) attr/$(attr_ascii $attr $aeol) crlf_false_attr__LF_nul.txt
EOF
- git ls-files --eol crlf_false_attr__* |
- sed -e "s/ / /g" -e "s/ */ /g" |
+ git ls-files --eol crlf_false_attr__* >tmp &&
+ sed -e "s/ / /g" -e "s/ */ /g" tmp |
sort >actual &&
test_cmp expect actual
'
@@ -359,12 +359,12 @@ test_expect_success 'ls-files --eol -o Text/Binary' '
i/ w/crlf TeBi_126_CL
i/ w/-text TeBi_126_CLC
EOF
- git ls-files --eol -o |
+ git ls-files --eol -o >tmp &&
sed -n -e "/TeBi_/{s!attr/[ ]*!!g
s! ! !g
s! *! !g
p
- }" | sort >actual &&
+ }" tmp | sort >actual &&
test_cmp expect actual
'
@@ -597,6 +597,12 @@ do
# auto: core.autocrlf=false and core.eol unset(or native) uses native eol
checkout_files auto "$id" "" false "" $NL CRLF CRLF_mix_LF LF_mix_CR LF_nul
checkout_files auto "$id" "" false native $NL CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ # core.autocrlf false, .gitattributes sets eol
+ checkout_files "" "$id" "lf" false "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "" "$id" "crlf" false "" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ # core.autocrlf true, .gitattributes sets eol
+ checkout_files "" "$id" "lf" true "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "" "$id" "crlf" true "" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
done
# The rest of the tests are unique; do the usual linting.
@@ -611,8 +617,8 @@ test_expect_success 'ls-files --eol -d -z' '
i/lf w/ crlf_false_attr__LF.txt
i/mixed w/ crlf_false_attr__CRLF_mix_LF.txt
EOF
- git ls-files --eol -d |
- sed -e "s!attr/[^ ]*!!g" -e "s/ / /g" -e "s/ */ /g" |
+ git ls-files --eol -d >tmp &&
+ sed -e "s!attr/[^ ]*!!g" -e "s/ / /g" -e "s/ */ /g" tmp |
sort >actual &&
test_cmp expect actual
'
diff --git a/t/t0029-core-unsetenvvars.sh b/t/t0029-core-unsetenvvars.sh
index b138e1d..4e8e90d 100755
--- a/t/t0029-core-unsetenvvars.sh
+++ b/t/t0029-core-unsetenvvars.sh
@@ -12,8 +12,7 @@ then
fi
test_expect_success 'setup' '
- mkdir -p "$TRASH_DIRECTORY/.git/hooks" &&
- write_script "$TRASH_DIRECTORY/.git/hooks/pre-commit" <<-\EOF
+ test_hook --setup pre-commit <<-\EOF
echo $HOBBES >&2
EOF
'
diff --git a/t/t0030-stripspace.sh b/t/t0030-stripspace.sh
index ae1ca38..0a5713c 100755
--- a/t/t0030-stripspace.sh
+++ b/t/t0030-stripspace.sh
@@ -13,6 +13,10 @@ s40=' '
sss="$s40$s40$s40$s40$s40$s40$s40$s40$s40$s40" # 400
ttt="$t40$t40$t40$t40$t40$t40$t40$t40$t40$t40" # 400
+printf_git_stripspace () {
+ printf "$1" | git stripspace
+}
+
test_expect_success \
'long lines without spaces should be unchanged' '
echo "$ttt" >expect &&
@@ -225,32 +229,38 @@ test_expect_success \
test_expect_success \
'text without newline at end should end with newline' '
- test $(printf "$ttt" | git stripspace | wc -l) -gt 0 &&
- test $(printf "$ttt$ttt" | git stripspace | wc -l) -gt 0 &&
- test $(printf "$ttt$ttt$ttt" | git stripspace | wc -l) -gt 0 &&
- test $(printf "$ttt$ttt$ttt$ttt" | git stripspace | wc -l) -gt 0
+ test_stdout_line_count -gt 0 printf_git_stripspace "$ttt" &&
+ test_stdout_line_count -gt 0 printf_git_stripspace "$ttt$ttt" &&
+ test_stdout_line_count -gt 0 printf_git_stripspace "$ttt$ttt$ttt" &&
+ test_stdout_line_count -gt 0 printf_git_stripspace "$ttt$ttt$ttt$ttt"
'
# text plus spaces at the end:
test_expect_success \
'text plus spaces without newline at end should end with newline' '
- test $(printf "$ttt$sss" | git stripspace | wc -l) -gt 0 &&
- test $(printf "$ttt$ttt$sss" | git stripspace | wc -l) -gt 0 &&
- test $(printf "$ttt$ttt$ttt$sss" | git stripspace | wc -l) -gt 0 &&
- test $(printf "$ttt$sss$sss" | git stripspace | wc -l) -gt 0 &&
- test $(printf "$ttt$ttt$sss$sss" | git stripspace | wc -l) -gt 0 &&
- test $(printf "$ttt$sss$sss$sss" | git stripspace | wc -l) -gt 0
+ test_stdout_line_count -gt 0 printf_git_stripspace "$ttt$sss" &&
+ test_stdout_line_count -gt 0 printf_git_stripspace "$ttt$ttt$sss" &&
+ test_stdout_line_count -gt 0 printf_git_stripspace "$ttt$ttt$ttt$sss" &&
+ test_stdout_line_count -gt 0 printf_git_stripspace "$ttt$sss$sss" &&
+ test_stdout_line_count -gt 0 printf_git_stripspace "$ttt$ttt$sss$sss" &&
+ test_stdout_line_count -gt 0 printf_git_stripspace "$ttt$sss$sss$sss"
'
test_expect_success \
'text plus spaces without newline at end should not show spaces' '
- ! (printf "$ttt$sss" | git stripspace | grep " " >/dev/null) &&
- ! (printf "$ttt$ttt$sss" | git stripspace | grep " " >/dev/null) &&
- ! (printf "$ttt$ttt$ttt$sss" | git stripspace | grep " " >/dev/null) &&
- ! (printf "$ttt$sss$sss" | git stripspace | grep " " >/dev/null) &&
- ! (printf "$ttt$ttt$sss$sss" | git stripspace | grep " " >/dev/null) &&
- ! (printf "$ttt$sss$sss$sss" | git stripspace | grep " " >/dev/null)
+ printf "$ttt$sss" | git stripspace >tmp &&
+ ! grep " " tmp >/dev/null &&
+ printf "$ttt$ttt$sss" | git stripspace >tmp &&
+ ! grep " " tmp >/dev/null &&
+ printf "$ttt$ttt$ttt$sss" | git stripspace >tmp &&
+ ! grep " " tmp >/dev/null &&
+ printf "$ttt$sss$sss" | git stripspace >tmp &&
+ ! grep " " tmp >/dev/null &&
+ printf "$ttt$ttt$sss$sss" | git stripspace >tmp &&
+ ! grep " " tmp >/dev/null &&
+ printf "$ttt$sss$sss$sss" | git stripspace >tmp &&
+ ! grep " " tmp >/dev/null
'
test_expect_success \
@@ -282,12 +292,18 @@ test_expect_success \
test_expect_success \
'text plus spaces at end should not show spaces' '
- ! (echo "$ttt$sss" | git stripspace | grep " " >/dev/null) &&
- ! (echo "$ttt$ttt$sss" | git stripspace | grep " " >/dev/null) &&
- ! (echo "$ttt$ttt$ttt$sss" | git stripspace | grep " " >/dev/null) &&
- ! (echo "$ttt$sss$sss" | git stripspace | grep " " >/dev/null) &&
- ! (echo "$ttt$ttt$sss$sss" | git stripspace | grep " " >/dev/null) &&
- ! (echo "$ttt$sss$sss$sss" | git stripspace | grep " " >/dev/null)
+ echo "$ttt$sss" | git stripspace >tmp &&
+ ! grep " " tmp >/dev/null &&
+ echo "$ttt$ttt$sss" | git stripspace >tmp &&
+ ! grep " " tmp >/dev/null &&
+ echo "$ttt$ttt$ttt$sss" | git stripspace >tmp &&
+ ! grep " " tmp >/dev/null &&
+ echo "$ttt$sss$sss" | git stripspace >tmp &&
+ ! grep " " tmp >/dev/null &&
+ echo "$ttt$ttt$sss$sss" | git stripspace >tmp &&
+ ! grep " " tmp >/dev/null &&
+ echo "$ttt$sss$sss$sss" | git stripspace >tmp &&
+ ! grep " " tmp >/dev/null
'
test_expect_success \
@@ -339,11 +355,16 @@ test_expect_success \
test_expect_success \
'spaces without newline at end should not show spaces' '
- ! (printf "" | git stripspace | grep " " >/dev/null) &&
- ! (printf "$sss" | git stripspace | grep " " >/dev/null) &&
- ! (printf "$sss$sss" | git stripspace | grep " " >/dev/null) &&
- ! (printf "$sss$sss$sss" | git stripspace | grep " " >/dev/null) &&
- ! (printf "$sss$sss$sss$sss" | git stripspace | grep " " >/dev/null)
+ printf "" | git stripspace >tmp &&
+ ! grep " " tmp >/dev/null &&
+ printf "$sss" | git stripspace >tmp &&
+ ! grep " " tmp >/dev/null &&
+ printf "$sss$sss" | git stripspace >tmp &&
+ ! grep " " tmp >/dev/null &&
+ printf "$sss$sss$sss" | git stripspace >tmp &&
+ ! grep " " tmp >/dev/null &&
+ printf "$sss$sss$sss$sss" | git stripspace >tmp &&
+ ! grep " " tmp >/dev/null
'
test_expect_success \
diff --git a/t/t0050-filesystem.sh b/t/t0050-filesystem.sh
index afc343c..5c9dc90 100755
--- a/t/t0050-filesystem.sh
+++ b/t/t0050-filesystem.sh
@@ -104,7 +104,8 @@ test_expect_failure CASE_INSENSITIVE_FS 'add (with different case)' '
rm camelcase &&
echo 1 >CamelCase &&
git add CamelCase &&
- camel=$(git ls-files | grep -i camelcase) &&
+ git ls-files >tmp &&
+ camel=$(grep -i camelcase tmp) &&
test $(echo "$camel" | wc -l) = 1 &&
test "z$(git cat-file blob :$camel)" = z1
'
diff --git a/t/t0051-windows-named-pipe.sh b/t/t0051-windows-named-pipe.sh
index 10ac92d..412f413 100755
--- a/t/t0051-windows-named-pipe.sh
+++ b/t/t0051-windows-named-pipe.sh
@@ -3,8 +3,13 @@
test_description='Windows named pipes'
. ./test-lib.sh
+if ! test_have_prereq MINGW
+then
+ skip_all='skipping Windows-specific tests'
+ test_done
+fi
-test_expect_success MINGW 'o_append write to named pipe' '
+test_expect_success 'o_append write to named pipe' '
GIT_TRACE="$(pwd)/expect" git status >/dev/null 2>&1 &&
{ test-tool windows-named-pipe t0051 >actual 2>&1 & } &&
pid=$! &&
diff --git a/t/t0091-bugreport.sh b/t/t0091-bugreport.sh
index eeedbfa..08f5fe9 100755
--- a/t/t0091-bugreport.sh
+++ b/t/t0091-bugreport.sh
@@ -60,18 +60,22 @@ test_expect_success 'can create leading directories outside of a git dir' '
test_expect_success 'indicates populated hooks' '
test_when_finished rm git-bugreport-hooks.txt &&
- test_when_finished rm -fr .git/hooks &&
- rm -fr .git/hooks &&
- mkdir .git/hooks &&
- for hook in applypatch-msg prepare-commit-msg.sample
- do
- write_script ".git/hooks/$hook" <<-EOF || return 1
- echo "hook $hook exists"
- EOF
- done &&
+
+ test_hook applypatch-msg <<-\EOF &&
+ true
+ EOF
+ test_hook unknown-hook <<-\EOF &&
+ true
+ EOF
git bugreport -s hooks &&
- grep applypatch-msg git-bugreport-hooks.txt &&
- ! grep prepare-commit-msg git-bugreport-hooks.txt
+
+ sort >expect <<-\EOF &&
+ [Enabled Hooks]
+ applypatch-msg
+ EOF
+
+ sed -ne "/^\[Enabled Hooks\]$/,/^$/p" <git-bugreport-hooks.txt >actual &&
+ test_cmp expect actual
'
test_done
diff --git a/t/t0211/scrub_perf.perl b/t/t0211/scrub_perf.perl
index d164b750..299999f 100644
--- a/t/t0211/scrub_perf.perl
+++ b/t/t0211/scrub_perf.perl
@@ -59,6 +59,10 @@ while (<>) {
# and highly variable. Just omit them.
goto SKIP_LINE;
}
+ if ($tokens[$col_category] =~ m/fsync/) {
+ # fsync events aren't interesting for the test
+ goto SKIP_LINE;
+ }
}
# t_abs and t_rel are either blank or a float. Replace the float
diff --git a/t/t0410-partial-clone.sh b/t/t0410-partial-clone.sh
index f17abd2..1e864cf 100755
--- a/t/t0410-partial-clone.sh
+++ b/t/t0410-partial-clone.sh
@@ -618,6 +618,25 @@ test_expect_success 'do not fetch when checking existence of tree we construct o
git -C repo cherry-pick side1
'
+test_expect_success 'exact rename does not need to fetch the blob lazily' '
+ rm -rf repo partial.git &&
+ test_create_repo repo &&
+ content="some dummy content" &&
+ test_commit -C repo create-a-file file.txt "$content" &&
+ git -C repo mv file.txt new-file.txt &&
+ git -C repo commit -m rename-the-file &&
+ FILE_HASH=$(git -C repo rev-parse HEAD:new-file.txt) &&
+ test_config -C repo uploadpack.allowfilter 1 &&
+ test_config -C repo uploadpack.allowanysha1inwant 1 &&
+
+ git clone --filter=blob:none --bare "file://$(pwd)/repo" partial.git &&
+ git -C partial.git rev-list --objects --missing=print HEAD >out &&
+ grep "[?]$FILE_HASH" out &&
+ git -C partial.git log --follow -- new-file.txt &&
+ git -C partial.git rev-list --objects --missing=print HEAD >out &&
+ grep "[?]$FILE_HASH" out
+'
+
test_expect_success 'lazy-fetch when accessing object not in the_repository' '
rm -rf full partial.git &&
test_create_repo full &&
diff --git a/t/t0500-progress-display.sh b/t/t0500-progress-display.sh
index 22058b5..1eb3a83 100755
--- a/t/t0500-progress-display.sh
+++ b/t/t0500-progress-display.sh
@@ -2,6 +2,7 @@
test_description='progress display'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
show_cr () {
@@ -17,6 +18,7 @@ test_expect_success 'simple progress display' '
EOF
cat >in <<-\EOF &&
+ start 0
update
progress 1
update
@@ -25,8 +27,9 @@ test_expect_success 'simple progress display' '
progress 4
update
progress 5
+ stop
EOF
- test-tool progress "Working hard" <in 2>stderr &&
+ test-tool progress <in 2>stderr &&
show_cr <stderr >out &&
test_cmp expect out
@@ -41,11 +44,13 @@ test_expect_success 'progress display with total' '
EOF
cat >in <<-\EOF &&
+ start 3
progress 1
progress 2
progress 3
+ stop
EOF
- test-tool progress --total=3 "Working hard" <in 2>stderr &&
+ test-tool progress <in 2>stderr &&
show_cr <stderr >out &&
test_cmp expect out
@@ -62,14 +67,14 @@ Working hard.......2.........3.........4.........5.........6:
EOF
cat >in <<-\EOF &&
+ start 100000 Working hard.......2.........3.........4.........5.........6
progress 100
progress 1000
progress 10000
progress 100000
+ stop
EOF
- test-tool progress --total=100000 \
- "Working hard.......2.........3.........4.........5.........6" \
- <in 2>stderr &&
+ test-tool progress <in 2>stderr &&
show_cr <stderr >out &&
test_cmp expect out
@@ -88,16 +93,16 @@ Working hard.......2.........3.........4.........5.........6:
EOF
cat >in <<-\EOF &&
+ start 100000 Working hard.......2.........3.........4.........5.........6
update
progress 1
update
progress 2
progress 10000
progress 100000
+ stop
EOF
- test-tool progress --total=100000 \
- "Working hard.......2.........3.........4.........5.........6" \
- <in 2>stderr &&
+ test-tool progress <in 2>stderr &&
show_cr <stderr >out &&
test_cmp expect out
@@ -116,14 +121,14 @@ Working hard.......2.........3.........4.........5.........6:
EOF
cat >in <<-\EOF &&
+ start 100000 Working hard.......2.........3.........4.........5.........6
progress 25000
progress 50000
progress 75000
progress 100000
+ stop
EOF
- test-tool progress --total=100000 \
- "Working hard.......2.........3.........4.........5.........6" \
- <in 2>stderr &&
+ test-tool progress <in 2>stderr &&
show_cr <stderr >out &&
test_cmp expect out
@@ -140,14 +145,14 @@ Working hard.......2.........3.........4.........5.........6.........7.........:
EOF
cat >in <<-\EOF &&
+ start 100000 Working hard.......2.........3.........4.........5.........6.........7.........
progress 25000
progress 50000
progress 75000
progress 100000
+ stop
EOF
- test-tool progress --total=100000 \
- "Working hard.......2.........3.........4.........5.........6.........7........." \
- <in 2>stderr &&
+ test-tool progress <in 2>stderr &&
show_cr <stderr >out &&
test_cmp expect out
@@ -164,12 +169,14 @@ test_expect_success 'progress shortens - crazy caller' '
EOF
cat >in <<-\EOF &&
+ start 1000
progress 100
progress 200
progress 1
progress 1000
+ stop
EOF
- test-tool progress --total=1000 "Working hard" <in 2>stderr &&
+ test-tool progress <in 2>stderr &&
show_cr <stderr >out &&
test_cmp expect out
@@ -185,6 +192,7 @@ test_expect_success 'progress display with throughput' '
EOF
cat >in <<-\EOF &&
+ start 0
throughput 102400 1000
update
progress 10
@@ -197,8 +205,9 @@ test_expect_success 'progress display with throughput' '
throughput 409600 4000
update
progress 40
+ stop
EOF
- test-tool progress "Working hard" <in 2>stderr &&
+ test-tool progress <in 2>stderr &&
show_cr <stderr >out &&
test_cmp expect out
@@ -214,6 +223,7 @@ test_expect_success 'progress display with throughput and total' '
EOF
cat >in <<-\EOF &&
+ start 40
throughput 102400 1000
progress 10
throughput 204800 2000
@@ -222,8 +232,9 @@ test_expect_success 'progress display with throughput and total' '
progress 30
throughput 409600 4000
progress 40
+ stop
EOF
- test-tool progress --total=40 "Working hard" <in 2>stderr &&
+ test-tool progress <in 2>stderr &&
show_cr <stderr >out &&
test_cmp expect out
@@ -239,6 +250,7 @@ test_expect_success 'cover up after throughput shortens' '
EOF
cat >in <<-\EOF &&
+ start 0
throughput 409600 1000
update
progress 1
@@ -251,8 +263,9 @@ test_expect_success 'cover up after throughput shortens' '
throughput 1638400 4000
update
progress 4
+ stop
EOF
- test-tool progress "Working hard" <in 2>stderr &&
+ test-tool progress <in 2>stderr &&
show_cr <stderr >out &&
test_cmp expect out
@@ -267,6 +280,7 @@ test_expect_success 'cover up after throughput shortens a lot' '
EOF
cat >in <<-\EOF &&
+ start 0
throughput 1 1000
update
progress 1
@@ -276,8 +290,9 @@ test_expect_success 'cover up after throughput shortens a lot' '
throughput 3145728 3000
update
progress 3
+ stop
EOF
- test-tool progress "Working hard" <in 2>stderr &&
+ test-tool progress <in 2>stderr &&
show_cr <stderr >out &&
test_cmp expect out
@@ -285,6 +300,7 @@ test_expect_success 'cover up after throughput shortens a lot' '
test_expect_success 'progress generates traces' '
cat >in <<-\EOF &&
+ start 40
throughput 102400 1000
update
progress 10
@@ -297,10 +313,11 @@ test_expect_success 'progress generates traces' '
throughput 409600 4000
update
progress 40
+ stop
EOF
- GIT_TRACE2_EVENT="$(pwd)/trace.event" test-tool progress --total=40 \
- "Working hard" <in 2>stderr &&
+ GIT_TRACE2_EVENT="$(pwd)/trace.event" test-tool progress \
+ <in 2>stderr &&
# t0212/parse_events.perl intentionally omits regions and data.
test_region progress "Working hard" trace.event &&
@@ -308,4 +325,54 @@ test_expect_success 'progress generates traces' '
grep "\"key\":\"total_bytes\",\"value\":\"409600\"" trace.event
'
+test_expect_success 'progress generates traces: stop / start' '
+ cat >in <<-\EOF &&
+ start 0
+ stop
+ EOF
+
+ GIT_TRACE2_EVENT="$PWD/trace-startstop.event" test-tool progress \
+ <in 2>stderr &&
+ test_region progress "Working hard" trace-startstop.event
+'
+
+test_expect_success 'progress generates traces: start without stop' '
+ cat >in <<-\EOF &&
+ start 0
+ EOF
+
+ GIT_TRACE2_EVENT="$PWD/trace-start.event" \
+ LSAN_OPTIONS=detect_leaks=0 \
+ test-tool progress \
+ <in 2>stderr &&
+ grep region_enter.*progress trace-start.event &&
+ ! grep region_leave.*progress trace-start.event
+'
+
+test_expect_success 'progress generates traces: stop without start' '
+ cat >in <<-\EOF &&
+ stop
+ EOF
+
+ GIT_TRACE2_EVENT="$PWD/trace-stop.event" test-tool progress \
+ <in 2>stderr &&
+ ! grep region_enter.*progress trace-stop.event &&
+ ! grep region_leave.*progress trace-stop.event
+'
+
+test_expect_success 'progress generates traces: start with active progress bar (no stops)' '
+ cat >in <<-\EOF &&
+ start 0 One
+ start 0 Two
+ EOF
+
+ GIT_TRACE2_EVENT="$PWD/trace-2start.event" \
+ LSAN_OPTIONS=detect_leaks=0 \
+ test-tool progress \
+ <in 2>stderr &&
+ grep region_enter.*progress.*One trace-2start.event &&
+ grep region_enter.*progress.*Two trace-2start.event &&
+ ! grep region_leave trace-2start.event
+'
+
test_done
diff --git a/t/t1001-read-tree-m-2way.sh b/t/t1001-read-tree-m-2way.sh
index d111552..0710b1f 100755
--- a/t/t1001-read-tree-m-2way.sh
+++ b/t/t1001-read-tree-m-2way.sh
@@ -21,7 +21,6 @@ In the test, these paths are used:
yomin - not in H or M
'
-TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-read-tree.sh
@@ -38,11 +37,12 @@ compare_change () {
}
check_cache_at () {
- clean_if_empty=$(git diff-files -- "$1")
+ git diff-files -- "$1" >out &&
+ clean_if_empty=$(cat out) &&
case "$clean_if_empty" in
'') echo "$1: clean" ;;
?*) echo "$1: dirty" ;;
- esac
+ esac &&
case "$2,$clean_if_empty" in
clean,) : ;;
clean,?*) false ;;
diff --git a/t/t1002-read-tree-m-u-2way.sh b/t/t1002-read-tree-m-u-2way.sh
index ca5c551..46cbd55 100755
--- a/t/t1002-read-tree-m-u-2way.sh
+++ b/t/t1002-read-tree-m-u-2way.sh
@@ -9,7 +9,6 @@ This is identical to t1001, but uses -u to update the work tree as well.
'
-TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-read-tree.sh
@@ -23,11 +22,12 @@ compare_change () {
}
check_cache_at () {
- clean_if_empty=$(git diff-files -- "$1")
+ git diff-files -- "$1" >out &&
+ clean_if_empty=$(cat out) &&
case "$clean_if_empty" in
'') echo "$1: clean" ;;
?*) echo "$1: dirty" ;;
- esac
+ esac &&
case "$2,$clean_if_empty" in
clean,) : ;;
clean,?*) false ;;
diff --git a/t/t1003-read-tree-prefix.sh b/t/t1003-read-tree-prefix.sh
index e0db206..c860c08 100755
--- a/t/t1003-read-tree-prefix.sh
+++ b/t/t1003-read-tree-prefix.sh
@@ -25,4 +25,14 @@ test_expect_success 'read-tree --prefix' '
cmp expect actual
'
+test_expect_success 'read-tree --prefix with leading slash exits with error' '
+ git rm -rf . &&
+ test_must_fail git read-tree --prefix=/two/ $tree &&
+ git read-tree --prefix=two/ $tree &&
+
+ git rm -rf . &&
+ test_must_fail git read-tree --prefix=/ $tree &&
+ git read-tree --prefix= $tree
+'
+
test_done
diff --git a/t/t1006-cat-file.sh b/t/t1006-cat-file.sh
index 39382fa..1b85207 100755
--- a/t/t1006-cat-file.sh
+++ b/t/t1006-cat-file.sh
@@ -4,6 +4,98 @@ test_description='git cat-file'
. ./test-lib.sh
+test_cmdmode_usage () {
+ test_expect_code 129 "$@" 2>err &&
+ grep "^error:.*is incompatible with" err
+}
+
+for switches in \
+ '-e -p' \
+ '-p -t' \
+ '-t -s' \
+ '-s --textconv' \
+ '--textconv --filters' \
+ '--batch-all-objects -e'
+do
+ test_expect_success "usage: cmdmode $switches" '
+ test_cmdmode_usage git cat-file $switches
+ '
+done
+
+test_incompatible_usage () {
+ test_expect_code 129 "$@" 2>err &&
+ grep -E "^(fatal|error):.*(requires|incompatible with|needs)" err
+}
+
+for opt in --batch --batch-check
+do
+ test_expect_success "usage: incompatible options: --path with $opt" '
+ test_incompatible_usage git cat-file --path=foo $opt
+ '
+done
+
+test_missing_usage () {
+ test_expect_code 129 "$@" 2>err &&
+ grep -E "^fatal:.*required" err
+}
+
+short_modes="-e -p -t -s"
+cw_modes="--textconv --filters"
+
+for opt in $cw_modes
+do
+ test_expect_success "usage: $opt requires another option" '
+ test_missing_usage git cat-file $opt
+ '
+done
+
+for opt in $short_modes
+do
+ test_expect_success "usage: $opt requires another option" '
+ test_missing_usage git cat-file $opt
+ '
+
+ for opt2 in --batch \
+ --batch-check \
+ --follow-symlinks \
+ "--path=foo HEAD:some-path.txt"
+ do
+ test_expect_success "usage: incompatible options: $opt and $opt2" '
+ test_incompatible_usage git cat-file $opt $opt2
+ '
+ done
+done
+
+test_too_many_arguments () {
+ test_expect_code 129 "$@" 2>err &&
+ grep -E "^fatal: too many arguments$" err
+}
+
+for opt in $short_modes $cw_modes
+do
+ args="one two three"
+ test_expect_success "usage: too many arguments: $opt $args" '
+ test_too_many_arguments git cat-file $opt $args
+ '
+
+ for opt2 in --buffer --follow-symlinks
+ do
+ test_expect_success "usage: incompatible arguments: $opt with batch option $opt2" '
+ test_incompatible_usage git cat-file $opt $opt2
+ '
+ done
+done
+
+for opt in --buffer \
+ --follow-symlinks \
+ --batch-all-objects
+do
+ test_expect_success "usage: bad option combination: $opt without batch mode" '
+ test_incompatible_usage git cat-file $opt &&
+ test_incompatible_usage git cat-file $opt commit HEAD
+ '
+done
+
echo_without_newline () {
printf '%s' "$*"
}
@@ -13,13 +105,18 @@ strlen () {
}
maybe_remove_timestamp () {
- if test -z "$2"; then
- echo_without_newline "$1"
- else
- echo_without_newline "$(printf '%s\n' "$1" | sed -e 's/ [0-9][0-9]* [-+][0-9][0-9][0-9][0-9]$//')"
- fi
+ if test -z "$2"; then
+ echo_without_newline "$1"
+ else
+ echo_without_newline "$(printf '%s\n' "$1" | remove_timestamp)"
+ fi
+}
+
+remove_timestamp () {
+ sed -e 's/ [0-9][0-9]* [-+][0-9][0-9][0-9][0-9]$//'
}
+
run_tests () {
type=$1
sha1=$2
@@ -85,12 +182,36 @@ $content"
test_cmp expect actual
'
+ for opt in --buffer --no-buffer
+ do
+ test -z "$content" ||
+ test_expect_success "--batch-command $opt output of $type content is correct" '
+ maybe_remove_timestamp "$batch_output" $no_ts >expect &&
+ maybe_remove_timestamp "$(test_write_lines "contents $sha1" |
+ git cat-file --batch-command $opt)" $no_ts >actual &&
+ test_cmp expect actual
+ '
+
+ test_expect_success "--batch-command $opt output of $type info is correct" '
+ echo "$sha1 $type $size" >expect &&
+ test_write_lines "info $sha1" |
+ git cat-file --batch-command $opt >actual &&
+ test_cmp expect actual
+ '
+ done
+
test_expect_success "custom --batch-check format" '
echo "$type $sha1" >expect &&
echo $sha1 | git cat-file --batch-check="%(objecttype) %(objectname)" >actual &&
test_cmp expect actual
'
+ test_expect_success "custom --batch-command format" '
+ echo "$type $sha1" >expect &&
+ echo "info $sha1" | git cat-file --batch-command="%(objecttype) %(objectname)" >actual &&
+ test_cmp expect actual
+ '
+
test_expect_success '--batch-check with %(rest)' '
echo "$type this is some extra content" >expect &&
echo "$sha1 this is some extra content" |
@@ -132,6 +253,22 @@ test_expect_success "setup" '
run_tests 'blob' $hello_sha1 $hello_size "$hello_content" "$hello_content"
+test_expect_success '--batch-command --buffer with flush for blob info' '
+ echo "$hello_sha1 blob $hello_size" >expect &&
+ test_write_lines "info $hello_sha1" "flush" |
+ GIT_TEST_CAT_FILE_NO_FLUSH_ON_EXIT=1 \
+ git cat-file --batch-command --buffer >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '--batch-command --buffer without flush for blob info' '
+ touch output &&
+ test_write_lines "info $hello_sha1" |
+ GIT_TEST_CAT_FILE_NO_FLUSH_ON_EXIT=1 \
+ git cat-file --batch-command --buffer >>output &&
+ test_must_be_empty output
+'
+
test_expect_success '--batch-check without %(rest) considers whole line' '
echo "$hello_sha1 blob $hello_size" >expect &&
git update-index --add --cacheinfo 100644 $hello_sha1 "white space" &&
@@ -175,7 +312,7 @@ test_expect_success \
"Reach a blob from a tag pointing to it" \
"test '$hello_content' = \"\$(git cat-file blob $tag_sha1)\""
-for batch in batch batch-check
+for batch in batch batch-check batch-command
do
for opt in t s e p
do
@@ -281,6 +418,49 @@ test_expect_success "--batch-check with multiple sha1s gives correct format" '
"$(echo_without_newline "$batch_check_input" | git cat-file --batch-check)"
'
+test_expect_success '--batch-command with multiple info calls gives correct format' '
+ cat >expect <<-EOF &&
+ $hello_sha1 blob $hello_size
+ $tree_sha1 tree $tree_size
+ $commit_sha1 commit $commit_size
+ $tag_sha1 tag $tag_size
+ deadbeef missing
+ EOF
+
+ git cat-file --batch-command --buffer >actual <<-EOF &&
+ info $hello_sha1
+ info $tree_sha1
+ info $commit_sha1
+ info $tag_sha1
+ info deadbeef
+ EOF
+
+ test_cmp expect actual
+'
+
+test_expect_success '--batch-command with multiple command calls gives correct format' '
+ remove_timestamp >expect <<-EOF &&
+ $hello_sha1 blob $hello_size
+ $hello_content
+ $commit_sha1 commit $commit_size
+ $commit_content
+ $tag_sha1 tag $tag_size
+ $tag_content
+ deadbeef missing
+ EOF
+
+ git cat-file --batch-command --buffer >actual_raw <<-EOF &&
+ contents $hello_sha1
+ contents $commit_sha1
+ contents $tag_sha1
+ contents deadbeef
+ flush
+ EOF
+
+ remove_timestamp <actual_raw >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'setup blobs which are likely to delta' '
test-tool genrandom foo 10240 >foo &&
{ cat foo && echo plus; } >foo-plus &&
@@ -871,5 +1051,40 @@ test_expect_success 'cat-file --batch-all-objects --batch-check ignores replace'
echo "$orig commit $orig_size" >expect &&
test_cmp expect actual
'
+test_expect_success 'batch-command empty command' '
+ echo "" >cmd &&
+ test_expect_code 128 git cat-file --batch-command <cmd 2>err &&
+ grep "^fatal:.*empty command in input.*" err
+'
+
+test_expect_success 'batch-command whitespace before command' '
+ echo " info deadbeef" >cmd &&
+ test_expect_code 128 git cat-file --batch-command <cmd 2>err &&
+ grep "^fatal:.*whitespace before command.*" err
+'
+
+test_expect_success 'batch-command unknown command' '
+ echo unknown_command >cmd &&
+ test_expect_code 128 git cat-file --batch-command <cmd 2>err &&
+ grep "^fatal:.*unknown command.*" err
+'
+
+test_expect_success 'batch-command missing arguments' '
+ echo "info" >cmd &&
+ test_expect_code 128 git cat-file --batch-command <cmd 2>err &&
+ grep "^fatal:.*info requires arguments.*" err
+'
+
+test_expect_success 'batch-command flush with arguments' '
+ echo "flush arg" >cmd &&
+ test_expect_code 128 git cat-file --batch-command --buffer <cmd 2>err &&
+ grep "^fatal:.*flush takes no arguments.*" err
+'
+
+test_expect_success 'batch-command flush without --buffer' '
+ echo "flush" >cmd &&
+ test_expect_code 128 git cat-file --batch-command <cmd 2>err &&
+ grep "^fatal:.*flush is only for --buffer mode.*" err
+'
test_done
diff --git a/t/t1007-hash-object.sh b/t/t1007-hash-object.sh
index 64b340f..ac5ad8c 100755
--- a/t/t1007-hash-object.sh
+++ b/t/t1007-hash-object.sh
@@ -2,6 +2,7 @@
test_description="git hash-object"
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
echo_without_newline() {
diff --git a/t/t1011-read-tree-sparse-checkout.sh b/t/t1011-read-tree-sparse-checkout.sh
index 24092c0..dd957be 100755
--- a/t/t1011-read-tree-sparse-checkout.sh
+++ b/t/t1011-read-tree-sparse-checkout.sh
@@ -187,11 +187,32 @@ test_expect_success 'read-tree updates worktree, absent case' '
test ! -f init.t
'
+test_expect_success 'read-tree will not throw away dirty changes, non-sparse' '
+ echo "/*" >.git/info/sparse-checkout &&
+ read_tree_u_must_succeed -m -u HEAD &&
+
+ echo dirty >init.t &&
+ read_tree_u_must_fail -m -u HEAD^ &&
+ test_path_is_file init.t &&
+ grep -q dirty init.t
+'
+
+test_expect_success 'read-tree will not throw away dirty changes, sparse' '
+ echo "/*" >.git/info/sparse-checkout &&
+ read_tree_u_must_succeed -m -u HEAD &&
+
+ echo dirty >init.t &&
+ echo sub/added >.git/info/sparse-checkout &&
+ read_tree_u_must_fail -m -u HEAD^ &&
+ test_path_is_file init.t &&
+ grep -q dirty init.t
+'
+
test_expect_success 'read-tree updates worktree, dirty case' '
echo sub/added >.git/info/sparse-checkout &&
git checkout -f top &&
echo dirty >init.t &&
- read_tree_u_must_succeed -m -u HEAD^ &&
+ read_tree_u_must_fail -m -u HEAD^ &&
grep -q dirty init.t &&
rm init.t
'
diff --git a/t/t1090-sparse-checkout-scope.sh b/t/t1090-sparse-checkout-scope.sh
index 3deb490..d1833c0 100755
--- a/t/t1090-sparse-checkout-scope.sh
+++ b/t/t1090-sparse-checkout-scope.sh
@@ -52,6 +52,25 @@ test_expect_success 'return to full checkout of main' '
test "$(cat b)" = "modified"
'
+test_expect_success 'skip-worktree on files outside sparse patterns' '
+ git sparse-checkout disable &&
+ git sparse-checkout set --no-cone "a*" &&
+ git checkout-index --all --ignore-skip-worktree-bits &&
+
+ git ls-files -t >output &&
+ ! grep ^S output >actual &&
+ test_must_be_empty actual &&
+
+ test_config sparse.expectFilesOutsideOfPatterns true &&
+ cat <<-\EOF >expect &&
+ S b
+ S c
+ EOF
+ git ls-files -t >output &&
+ grep ^S output >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'in partial clone, sparse checkout only fetches needed blobs' '
test_create_repo server &&
git clone "file://$(pwd)/server" client &&
diff --git a/t/t1091-sparse-checkout-builtin.sh b/t/t1091-sparse-checkout-builtin.sh
index 4277698..9a90031 100755
--- a/t/t1091-sparse-checkout-builtin.sh
+++ b/t/t1091-sparse-checkout-builtin.sh
@@ -5,6 +5,9 @@ test_description='sparse checkout builtin tests'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+GIT_TEST_SPLIT_INDEX=false
+export GIT_TEST_SPLIT_INDEX
+
. ./test-lib.sh
list_files() {
@@ -79,6 +82,12 @@ test_expect_success 'git sparse-checkout init' '
check_files repo a
'
+test_expect_success 'git sparse-checkout init in empty repo' '
+ test_when_finished rm -rf empty-repo blank-template &&
+ git init --template= empty-repo &&
+ git -C empty-repo sparse-checkout init
+'
+
test_expect_success 'git sparse-checkout list after init' '
git -C repo sparse-checkout list >actual &&
cat >expect <<-\EOF &&
@@ -117,7 +126,7 @@ test_expect_success 'switching to cone mode with non-cone mode patterns' '
cd bad-patterns &&
git sparse-checkout init &&
git sparse-checkout add dir &&
- git config core.sparseCheckoutCone true &&
+ git config --worktree core.sparseCheckoutCone true &&
test_must_fail git sparse-checkout add dir 2>err &&
grep "existing sparse-checkout patterns do not use cone mode" err
)
@@ -146,9 +155,9 @@ test_expect_success 'interaction with clone --no-checkout (unborn index)' '
'
test_expect_success 'set enables config' '
- git init empty-config &&
+ git init worktree-config &&
(
- cd empty-config &&
+ cd worktree-config &&
test_commit test file &&
test_path_is_missing .git/config.worktree &&
git sparse-checkout set nothing &&
@@ -201,6 +210,21 @@ test_expect_success 'add to sparse-checkout' '
check_files repo "a folder1 folder2"
'
+test_expect_success 'worktree: add copies sparse-checkout patterns' '
+ cat repo/.git/info/sparse-checkout >old &&
+ test_when_finished cp old repo/.git/info/sparse-checkout &&
+ test_when_finished git -C repo worktree remove ../worktree &&
+ git -C repo sparse-checkout set --no-cone "/*" &&
+ git -C repo worktree add --quiet ../worktree 2>err &&
+ test_must_be_empty err &&
+ new="$(git -C worktree rev-parse --git-path info/sparse-checkout)" &&
+ test_path_is_file "$new" &&
+ test_cmp repo/.git/info/sparse-checkout "$new" &&
+ git -C worktree sparse-checkout set --cone &&
+ test_cmp_config -C worktree true core.sparseCheckoutCone &&
+ test_must_fail git -C repo core.sparseCheckoutCone
+'
+
test_expect_success 'cone mode: match patterns' '
git -C repo config --worktree core.sparseCheckoutCone true &&
rm -rf repo/a repo/folder1 repo/folder2 &&
@@ -228,36 +252,31 @@ test_expect_success 'sparse-checkout disable' '
'
test_expect_success 'sparse-index enabled and disabled' '
- (
- sane_unset GIT_TEST_SPLIT_INDEX &&
- git -C repo update-index --no-split-index &&
-
- git -C repo sparse-checkout init --cone --sparse-index &&
- test_cmp_config -C repo true index.sparse &&
- git -C repo ls-files --sparse >sparse &&
- git -C repo sparse-checkout disable &&
- git -C repo ls-files --sparse >full &&
-
- cat >expect <<-\EOF &&
- @@ -1,4 +1,7 @@
- a
- -deep/
- -folder1/
- -folder2/
- +deep/a
- +deep/deeper1/a
- +deep/deeper1/deepest/a
- +deep/deeper2/a
- +folder1/a
- +folder2/a
- EOF
+ git -C repo sparse-checkout init --cone --sparse-index &&
+ test_cmp_config -C repo true index.sparse &&
+ git -C repo ls-files --sparse >sparse &&
+ git -C repo sparse-checkout disable &&
+ git -C repo ls-files --sparse >full &&
- diff -u sparse full | tail -n +3 >actual &&
- test_cmp expect actual &&
+ cat >expect <<-\EOF &&
+ @@ -1,4 +1,7 @@
+ a
+ -deep/
+ -folder1/
+ -folder2/
+ +deep/a
+ +deep/deeper1/a
+ +deep/deeper1/deepest/a
+ +deep/deeper2/a
+ +folder1/a
+ +folder2/a
+ EOF
+
+ diff -u sparse full | tail -n +3 >actual &&
+ test_cmp expect actual &&
- git -C repo config --list >config &&
- ! grep index.sparse config
- )
+ git -C repo config --list >config &&
+ test_cmp_config -C repo false index.sparse
'
test_expect_success 'cone mode: init and set' '
@@ -491,6 +510,37 @@ test_expect_failure 'sparse-checkout reapply' '
git -C tweak sparse-checkout disable
'
+test_expect_success 'reapply can handle config options' '
+ git -C repo sparse-checkout init --cone --no-sparse-index &&
+ git -C repo config --worktree --list >actual &&
+ cat >expect <<-\EOF &&
+ core.sparsecheckout=true
+ core.sparsecheckoutcone=true
+ index.sparse=false
+ EOF
+ test_cmp expect actual &&
+
+ git -C repo sparse-checkout reapply --no-cone --no-sparse-index &&
+ git -C repo config --worktree --list >actual &&
+ cat >expect <<-\EOF &&
+ core.sparsecheckout=true
+ core.sparsecheckoutcone=false
+ index.sparse=false
+ EOF
+ test_cmp expect actual &&
+
+ git -C repo sparse-checkout reapply --cone --sparse-index &&
+ git -C repo config --worktree --list >actual &&
+ cat >expect <<-\EOF &&
+ core.sparsecheckout=true
+ core.sparsecheckoutcone=true
+ index.sparse=true
+ EOF
+ test_cmp expect actual &&
+
+ git -C repo sparse-checkout disable
+'
+
test_expect_success 'cone mode: set with core.ignoreCase=true' '
rm repo/.git/info/sparse-checkout &&
git -C repo sparse-checkout init --cone &&
@@ -520,17 +570,17 @@ test_expect_success 'interaction with submodules' '
'
test_expect_success 'different sparse-checkouts with worktrees' '
+ git -C repo sparse-checkout set --cone deep folder1 &&
git -C repo worktree add --detach ../worktree &&
- check_files worktree "a deep folder1 folder2" &&
- git -C worktree sparse-checkout init --cone &&
- git -C repo sparse-checkout set folder1 &&
- git -C worktree sparse-checkout set deep/deeper1 &&
- check_files repo a folder1 &&
- check_files worktree a deep
+ check_files worktree "a deep folder1" &&
+ git -C repo sparse-checkout set --cone folder1 &&
+ git -C worktree sparse-checkout set --cone deep/deeper1 &&
+ check_files repo "a folder1" &&
+ check_files worktree "a deep"
'
test_expect_success 'set using filename keeps file on-disk' '
- git -C repo sparse-checkout set a deep &&
+ git -C repo sparse-checkout set --skip-checks a deep &&
cat >expect <<-\EOF &&
/*
!/*/
@@ -641,7 +691,7 @@ test_expect_success BSLASHPSPEC 'pattern-checks: escaped characters' '
git -C escaped reset --hard $COMMIT &&
check_files escaped "a deep folder1 folder2 zbad\\dir zdoes*exist" zglob[!a]? &&
git -C escaped sparse-checkout init --cone &&
- git -C escaped sparse-checkout set zbad\\dir/bogus "zdoes*not*exist" "zdoes*exist" "zglob[!a]?" &&
+ git -C escaped sparse-checkout set --skip-checks zbad\\dir/bogus "zdoes*not*exist" "zdoes*exist" "zglob[!a]?" &&
cat >expect <<-\EOF &&
/*
!/*/
@@ -766,4 +816,59 @@ test_expect_success 'malformed cone-mode patterns' '
grep "warning: disabling cone pattern matching" err
'
+test_expect_success 'set from subdir pays attention to prefix' '
+ git -C repo sparse-checkout disable &&
+ git -C repo/deep sparse-checkout set --cone deeper2 ../folder1 &&
+
+ git -C repo sparse-checkout list >actual &&
+
+ cat >expect <<-\EOF &&
+ deep/deeper2
+ folder1
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'add from subdir pays attention to prefix' '
+ git -C repo sparse-checkout set --cone deep/deeper2 &&
+ git -C repo/deep sparse-checkout add deeper1/deepest ../folder1 &&
+
+ git -C repo sparse-checkout list >actual &&
+
+ cat >expect <<-\EOF &&
+ deep/deeper1/deepest
+ deep/deeper2
+ folder1
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'set from subdir in non-cone mode throws an error' '
+ git -C repo sparse-checkout disable &&
+ test_must_fail git -C repo/deep sparse-checkout set --no-cone deeper2 ../folder1 2>error &&
+
+ grep "run from the toplevel directory in non-cone mode" error
+'
+
+test_expect_success 'set from subdir in non-cone mode throws an error' '
+ git -C repo sparse-checkout set --no-cone deep/deeper2 &&
+ test_must_fail git -C repo/deep sparse-checkout add deeper1/deepest ../folder1 2>error &&
+
+ grep "run from the toplevel directory in non-cone mode" error
+'
+
+test_expect_success 'by default, cone mode will error out when passed files' '
+ git -C repo sparse-checkout reapply --cone &&
+ test_must_fail git -C repo sparse-checkout add .gitignore 2>error &&
+
+ grep ".gitignore.*is not a directory" error
+'
+
+test_expect_success 'by default, non-cone mode will warn on individual files' '
+ git -C repo sparse-checkout reapply --no-cone &&
+ git -C repo sparse-checkout add .gitignore 2>warning &&
+
+ grep "pass a leading slash before paths.*if you want a single file" warning
+'
+
test_done
diff --git a/t/t1092-sparse-checkout-compatibility.sh b/t/t1092-sparse-checkout-compatibility.sh
index 4ba1617..236ab53 100755
--- a/t/t1092-sparse-checkout-compatibility.sh
+++ b/t/t1092-sparse-checkout-compatibility.sh
@@ -16,7 +16,9 @@ test_expect_success 'setup' '
echo "after deep" >e &&
echo "after folder1" >g &&
echo "after x" >z &&
- mkdir folder1 folder2 deep x &&
+ mkdir folder1 folder2 deep before x &&
+ echo "before deep" >before/a &&
+ echo "before deep again" >before/b &&
mkdir deep/deeper1 deep/deeper2 deep/before deep/later &&
mkdir deep/deeper1/deepest &&
mkdir deep/deeper1/deepest2 &&
@@ -244,6 +246,25 @@ test_expect_success 'expanded in-memory index matches full index' '
test_sparse_match git ls-files --stage
'
+test_expect_success 'root directory cannot be sparse' '
+ init_repos &&
+
+ # Remove all in-cone files and directories from the index, collapse index
+ # with `git sparse-checkout reapply`
+ git -C sparse-index rm -r . &&
+ git -C sparse-index sparse-checkout reapply &&
+
+ # Verify sparse directories still present, root directory is not sparse
+ cat >expect <<-EOF &&
+ before/
+ folder1/
+ folder2/
+ x/
+ EOF
+ git -C sparse-index ls-files --sparse >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'status with options' '
init_repos &&
test_sparse_match ls &&
@@ -260,6 +281,13 @@ test_expect_success 'status with options' '
test_all_match git status --porcelain=v2 -uno
'
+test_expect_success 'status with diff in unexpanded sparse directory' '
+ init_repos &&
+ test_all_match git checkout rename-base &&
+ test_all_match git reset --soft rename-out-to-out &&
+ test_all_match git status --porcelain=v2
+'
+
test_expect_success 'status reports sparse-checkout' '
init_repos &&
git -C sparse-checkout status >full &&
@@ -367,7 +395,7 @@ test_expect_success 'status/add: outside sparse cone' '
write_script edit-contents <<-\EOF &&
echo text >>$1
EOF
- run_on_sparse ../edit-contents folder1/a &&
+ run_on_all ../edit-contents folder1/a &&
run_on_all ../edit-contents folder1/new &&
test_sparse_match git status --porcelain=v2 &&
@@ -376,8 +404,8 @@ test_expect_success 'status/add: outside sparse cone' '
test_sparse_match test_must_fail git add folder1/a &&
grep "Disable or modify the sparsity rules" sparse-checkout-err &&
test_sparse_unstaged folder1/a &&
- test_sparse_match test_must_fail git add --refresh folder1/a &&
- grep "Disable or modify the sparsity rules" sparse-checkout-err &&
+ test_all_match git add --refresh folder1/a &&
+ test_must_be_empty sparse-checkout-err &&
test_sparse_unstaged folder1/a &&
test_sparse_match test_must_fail git add folder1/new &&
grep "Disable or modify the sparsity rules" sparse-checkout-err &&
@@ -593,13 +621,11 @@ test_expect_success 'reset with pathspecs outside sparse definition' '
test_sparse_match git reset update-folder1 -- folder1 &&
git -C full-checkout reset update-folder1 -- folder1 &&
- test_sparse_match git status --porcelain=v2 &&
- test_all_match git rev-parse HEAD:folder1 &&
+ test_all_match git ls-files -s -- folder1 &&
test_sparse_match git reset update-folder2 -- folder2/a &&
git -C full-checkout reset update-folder2 -- folder2/a &&
- test_sparse_match git status --porcelain=v2 &&
- test_all_match git rev-parse HEAD:folder2/a
+ test_all_match git ls-files -s -- folder2/a
'
test_expect_success 'reset with wildcard pathspec' '
@@ -629,6 +655,260 @@ test_expect_success 'reset with wildcard pathspec' '
test_all_match git ls-files -s -- folder1
'
+test_expect_success 'update-index modify outside sparse definition' '
+ init_repos &&
+
+ write_script edit-contents <<-\EOF &&
+ echo text >>$1
+ EOF
+
+ # Create & modify folder1/a
+ # Note that this setup is a manual way of reaching the erroneous
+ # condition in which a `skip-worktree` enabled, outside-of-cone file
+ # exists on disk. It is used here to ensure `update-index` is stable
+ # and behaves predictably if such a condition occurs.
+ run_on_sparse mkdir -p folder1 &&
+ run_on_sparse cp ../initial-repo/folder1/a folder1/a &&
+ run_on_all ../edit-contents folder1/a &&
+
+ # If file has skip-worktree enabled, but the file is present, it is
+ # treated the same as if skip-worktree is disabled
+ test_all_match git status --porcelain=v2 &&
+ test_all_match git update-index folder1/a &&
+ test_all_match git status --porcelain=v2 &&
+
+ # When skip-worktree is disabled (even on files outside sparse cone), file
+ # is updated in the index
+ test_sparse_match git update-index --no-skip-worktree folder1/a &&
+ test_all_match git status --porcelain=v2 &&
+ test_all_match git update-index folder1/a &&
+ test_all_match git status --porcelain=v2
+'
+
+test_expect_success 'update-index --add outside sparse definition' '
+ init_repos &&
+
+ write_script edit-contents <<-\EOF &&
+ echo text >>$1
+ EOF
+
+ # Create folder1, add new file
+ run_on_sparse mkdir -p folder1 &&
+ run_on_all ../edit-contents folder1/b &&
+
+ # The *untracked* out-of-cone file is added to the index because it does
+ # not have a `skip-worktree` bit to signal that it should be ignored
+ # (unlike in `git add`, which will fail due to the file being outside
+ # the sparse checkout definition).
+ test_all_match git update-index --add folder1/b &&
+ test_all_match git status --porcelain=v2
+'
+
+# NEEDSWORK: `--remove`, unlike the rest of `update-index`, does not ignore
+# `skip-worktree` entries by default and will remove them from the index.
+# The `--ignore-skip-worktree-entries` flag must be used in conjunction with
+# `--remove` to ignore the `skip-worktree` entries and prevent their removal
+# from the index.
+test_expect_success 'update-index --remove outside sparse definition' '
+ init_repos &&
+
+ # When --ignore-skip-worktree-entries is _not_ specified:
+ # out-of-cone, not-on-disk files are removed from the index
+ test_sparse_match git update-index --remove folder1/a &&
+ cat >expect <<-EOF &&
+ D folder1/a
+ EOF
+ test_sparse_match git diff --cached --name-status &&
+ test_cmp expect sparse-checkout-out &&
+
+ # Reset the state
+ test_all_match git reset --hard &&
+
+ # When --ignore-skip-worktree-entries is specified, out-of-cone
+ # (skip-worktree) files are ignored
+ test_sparse_match git update-index --remove --ignore-skip-worktree-entries folder1/a &&
+ test_sparse_match git diff --cached --name-status &&
+ test_must_be_empty sparse-checkout-out &&
+
+ # Reset the state
+ test_all_match git reset --hard &&
+
+ # --force-remove supercedes --ignore-skip-worktree-entries, removing
+ # a skip-worktree file from the index (and disk) when both are specified
+ # with --remove
+ test_sparse_match git update-index --force-remove --ignore-skip-worktree-entries folder1/a &&
+ cat >expect <<-EOF &&
+ D folder1/a
+ EOF
+ test_sparse_match git diff --cached --name-status &&
+ test_cmp expect sparse-checkout-out
+'
+
+test_expect_success 'update-index with directories' '
+ init_repos &&
+
+ # update-index will exit silently when provided with a directory name
+ # containing a trailing slash
+ test_all_match git update-index deep/ folder1/ &&
+ grep "Ignoring path deep/" sparse-checkout-err &&
+ grep "Ignoring path folder1/" sparse-checkout-err &&
+
+ # When update-index is given a directory name WITHOUT a trailing slash, it will
+ # behave in different ways depending on the status of the directory on disk:
+ # * if it exists, the command exits with an error ("add individual files instead")
+ # * if it does NOT exist (e.g., in a sparse-checkout), it is assumed to be a
+ # file and either triggers an error ("does not exist and --remove not passed")
+ # or is ignored completely (when using --remove)
+ test_all_match test_must_fail git update-index deep &&
+ run_on_all test_must_fail git update-index folder1 &&
+ test_must_fail git -C full-checkout update-index --remove folder1 &&
+ test_sparse_match git update-index --remove folder1 &&
+ test_all_match git status --porcelain=v2
+'
+
+test_expect_success 'update-index --again file outside sparse definition' '
+ init_repos &&
+
+ test_all_match git checkout -b test-reupdate &&
+
+ # Update HEAD without modifying the index to introduce a difference in
+ # folder1/a
+ test_sparse_match git reset --soft update-folder1 &&
+
+ # Because folder1/a differs in the index vs HEAD,
+ # `git update-index --no-skip-worktree --again` will effectively perform
+ # `git update-index --no-skip-worktree folder1/a` and remove the skip-worktree
+ # flag from folder1/a
+ test_sparse_match git update-index --no-skip-worktree --again &&
+ test_sparse_match git status --porcelain=v2 &&
+
+ cat >expect <<-EOF &&
+ D folder1/a
+ EOF
+ test_sparse_match git diff --name-status &&
+ test_cmp expect sparse-checkout-out
+'
+
+test_expect_success 'update-index --cacheinfo' '
+ init_repos &&
+
+ deep_a_oid=$(git -C full-checkout rev-parse update-deep:deep/a) &&
+ folder2_oid=$(git -C full-checkout rev-parse update-folder2:folder2) &&
+ folder1_a_oid=$(git -C full-checkout rev-parse update-folder1:folder1/a) &&
+
+ test_all_match git update-index --cacheinfo 100644 $deep_a_oid deep/a &&
+ test_all_match git status --porcelain=v2 &&
+
+ # Cannot add sparse directory, even in sparse index case
+ test_all_match test_must_fail git update-index --add --cacheinfo 040000 $folder2_oid folder2/ &&
+
+ # Sparse match only: the new outside-of-cone entry is added *without* skip-worktree,
+ # so `git status` reports it as "deleted" in the worktree
+ test_sparse_match git update-index --add --cacheinfo 100644 $folder1_a_oid folder1/a &&
+ test_sparse_match git status --porcelain=v2 &&
+ cat >expect <<-EOF &&
+ MD folder1/a
+ EOF
+ test_sparse_match git status --short -- folder1/a &&
+ test_cmp expect sparse-checkout-out &&
+
+ # To return folder1/a to "normal" for a sparse checkout (ignored &
+ # outside-of-cone), add the skip-worktree flag.
+ test_sparse_match git update-index --skip-worktree folder1/a &&
+ cat >expect <<-EOF &&
+ S folder1/a
+ EOF
+ test_sparse_match git ls-files -t -- folder1/a &&
+ test_cmp expect sparse-checkout-out
+'
+
+for MERGE_TREES in "base HEAD update-folder2" \
+ "update-folder1 update-folder2" \
+ "update-folder2"
+do
+ test_expect_success "'read-tree -mu $MERGE_TREES' with files outside sparse definition" '
+ init_repos &&
+
+ # Although the index matches, without --no-sparse-checkout, outside-of-
+ # definition files will not exist on disk for sparse checkouts
+ test_all_match git read-tree -mu $MERGE_TREES &&
+ test_all_match git status --porcelain=v2 &&
+ test_path_is_missing sparse-checkout/folder2 &&
+ test_path_is_missing sparse-index/folder2 &&
+
+ test_all_match git read-tree --reset -u HEAD &&
+ test_all_match git status --porcelain=v2 &&
+
+ test_all_match git read-tree -mu --no-sparse-checkout $MERGE_TREES &&
+ test_all_match git status --porcelain=v2 &&
+ test_cmp sparse-checkout/folder2/a sparse-index/folder2/a &&
+ test_cmp sparse-checkout/folder2/a full-checkout/folder2/a
+
+ '
+done
+
+test_expect_success 'read-tree --merge with edit/edit conflicts in sparse directories' '
+ init_repos &&
+
+ # Merge of multiple changes to same directory (but not same files) should
+ # succeed
+ test_all_match git read-tree -mu base rename-base update-folder1 &&
+ test_all_match git status --porcelain=v2 &&
+
+ test_all_match git reset --hard &&
+
+ test_all_match git read-tree -mu rename-base update-folder2 &&
+ test_all_match git status --porcelain=v2 &&
+
+ test_all_match git reset --hard &&
+
+ test_all_match test_must_fail git read-tree -mu base update-folder1 rename-out-to-in &&
+ test_all_match test_must_fail git read-tree -mu rename-out-to-in update-folder1
+'
+
+test_expect_success 'read-tree --prefix' '
+ init_repos &&
+
+ # If files differing between the index and target <commit-ish> exist
+ # inside the prefix, `read-tree --prefix` should fail
+ test_all_match test_must_fail git read-tree --prefix=deep/ deepest &&
+ test_all_match test_must_fail git read-tree --prefix=folder1/ update-folder1 &&
+
+ # If no differing index entries exist matching the prefix,
+ # `read-tree --prefix` updates the index successfully
+ test_all_match git rm -rf deep/deeper1/deepest/ &&
+ test_all_match git read-tree --prefix=deep/deeper1/deepest -u deepest &&
+ test_all_match git status --porcelain=v2 &&
+
+ test_all_match git rm -rf --sparse folder1/ &&
+ test_all_match git read-tree --prefix=folder1/ -u update-folder1 &&
+ test_all_match git status --porcelain=v2 &&
+
+ test_all_match git rm -rf --sparse folder2/0 &&
+ test_all_match git read-tree --prefix=folder2/0/ -u rename-out-to-out &&
+ test_all_match git status --porcelain=v2
+'
+
+test_expect_success 'read-tree --merge with directory-file conflicts' '
+ init_repos &&
+
+ test_all_match git checkout -b test-branch rename-base &&
+
+ # Although the index matches, without --no-sparse-checkout, outside-of-
+ # definition files will not exist on disk for sparse checkouts
+ test_sparse_match git read-tree -mu rename-out-to-out &&
+ test_sparse_match git status --porcelain=v2 &&
+ test_path_is_missing sparse-checkout/folder2 &&
+ test_path_is_missing sparse-index/folder2 &&
+
+ test_sparse_match git read-tree --reset -u HEAD &&
+ test_sparse_match git status --porcelain=v2 &&
+
+ test_sparse_match git read-tree -mu --no-sparse-checkout rename-out-to-out &&
+ test_sparse_match git status --porcelain=v2 &&
+ test_cmp sparse-checkout/folder2/0/1 sparse-index/folder2/0/1
+'
+
test_expect_success 'merge, cherry-pick, and rebase' '
init_repos &&
@@ -754,6 +1034,74 @@ test_expect_success 'cherry-pick with conflicts' '
test_all_match test_must_fail git cherry-pick to-cherry-pick
'
+test_expect_success 'checkout-index inside sparse definition' '
+ init_repos &&
+
+ run_on_all rm -f deep/a &&
+ test_all_match git checkout-index -- deep/a &&
+ test_all_match git status --porcelain=v2 &&
+
+ echo test >>new-a &&
+ run_on_all cp ../new-a a &&
+ test_all_match test_must_fail git checkout-index -- a &&
+ test_all_match git checkout-index -f -- a &&
+ test_all_match git status --porcelain=v2
+'
+
+test_expect_success 'checkout-index outside sparse definition' '
+ init_repos &&
+
+ # Without --ignore-skip-worktree-bits, outside-of-cone files will trigger
+ # an error
+ test_sparse_match test_must_fail git checkout-index -- folder1/a &&
+ test_i18ngrep "folder1/a has skip-worktree enabled" sparse-checkout-err &&
+ test_path_is_missing folder1/a &&
+
+ # With --ignore-skip-worktree-bits, outside-of-cone files are checked out
+ test_sparse_match git checkout-index --ignore-skip-worktree-bits -- folder1/a &&
+ test_cmp sparse-checkout/folder1/a sparse-index/folder1/a &&
+ test_cmp sparse-checkout/folder1/a full-checkout/folder1/a &&
+
+ run_on_sparse rm -rf folder1 &&
+ echo test >new-a &&
+ run_on_sparse mkdir -p folder1 &&
+ run_on_all cp ../new-a folder1/a &&
+
+ test_all_match test_must_fail git checkout-index --ignore-skip-worktree-bits -- folder1/a &&
+ test_all_match git checkout-index -f --ignore-skip-worktree-bits -- folder1/a &&
+ test_cmp sparse-checkout/folder1/a sparse-index/folder1/a &&
+ test_cmp sparse-checkout/folder1/a full-checkout/folder1/a
+'
+
+test_expect_success 'checkout-index with folders' '
+ init_repos &&
+
+ # Inside checkout definition
+ test_all_match test_must_fail git checkout-index -f -- deep/ &&
+
+ # Outside checkout definition
+ # Note: although all tests fail (as expected), the messaging differs. For
+ # non-sparse index checkouts, the error is that the "file" does not appear
+ # in the index; for sparse checkouts, the error is explicitly that the
+ # entry is a sparse directory.
+ run_on_all test_must_fail git checkout-index -f -- folder1/ &&
+ test_cmp full-checkout-err sparse-checkout-err &&
+ ! test_cmp full-checkout-err sparse-index-err &&
+ grep "is a sparse directory" sparse-index-err
+'
+
+test_expect_success 'checkout-index --all' '
+ init_repos &&
+
+ test_all_match git checkout-index --all &&
+ test_sparse_match test_path_is_missing folder1 &&
+
+ # --ignore-skip-worktree-bits will cause `skip-worktree` files to be
+ # checked out, causing the outside-of-cone `folder1` to exist on-disk
+ test_all_match git checkout-index --ignore-skip-worktree-bits --all &&
+ test_all_match test_path_exists folder1
+'
+
test_expect_success 'clean' '
init_repos &&
@@ -763,23 +1111,42 @@ test_expect_success 'clean' '
test_all_match git commit -m "ignore bogus files" &&
run_on_sparse mkdir folder1 &&
+ run_on_all mkdir -p deep/untracked-deep &&
run_on_all touch folder1/bogus &&
+ run_on_all touch folder1/untracked &&
+ run_on_all touch deep/untracked-deep/bogus &&
+ run_on_all touch deep/untracked-deep/untracked &&
test_all_match git status --porcelain=v2 &&
test_all_match git clean -f &&
test_all_match git status --porcelain=v2 &&
test_sparse_match ls &&
test_sparse_match ls folder1 &&
+ run_on_all test_path_exists folder1/bogus &&
+ run_on_all test_path_is_missing folder1/untracked &&
+ run_on_all test_path_exists deep/untracked-deep/bogus &&
+ run_on_all test_path_exists deep/untracked-deep/untracked &&
+
+ test_all_match git clean -fd &&
+ test_all_match git status --porcelain=v2 &&
+ test_sparse_match ls &&
+ test_sparse_match ls folder1 &&
+ run_on_all test_path_exists folder1/bogus &&
+ run_on_all test_path_exists deep/untracked-deep/bogus &&
+ run_on_all test_path_is_missing deep/untracked-deep/untracked &&
test_all_match git clean -xf &&
test_all_match git status --porcelain=v2 &&
test_sparse_match ls &&
test_sparse_match ls folder1 &&
+ run_on_all test_path_is_missing folder1/bogus &&
+ run_on_all test_path_exists deep/untracked-deep/bogus &&
test_all_match git clean -xdf &&
test_all_match git status --porcelain=v2 &&
test_sparse_match ls &&
test_sparse_match ls folder1 &&
+ run_on_all test_path_is_missing deep/untracked-deep/bogus &&
test_sparse_match test_path_is_dir folder1
'
@@ -898,6 +1265,8 @@ test_expect_success 'sparse-index is not expanded' '
echo >>sparse-index/untracked.txt &&
ensure_not_expanded add . &&
+ ensure_not_expanded checkout-index -f a &&
+ ensure_not_expanded checkout-index -f --all &&
for ref in update-deep update-folder1 update-folder2 update-deep
do
echo >>sparse-index/README.md &&
@@ -926,6 +1295,8 @@ test_expect_success 'sparse-index is not expanded' '
# Wildcard identifies only full sparse directories, no index expansion
ensure_not_expanded reset deepest -- folder\* &&
+ ensure_not_expanded clean -fd &&
+
ensure_not_expanded checkout -f update-deep &&
test_config -C sparse-index pull.twohead ort &&
(
@@ -1001,6 +1372,24 @@ test_expect_success 'sparse index is not expanded: diff' '
ensure_not_expanded diff --cached
'
+test_expect_success 'sparse index is not expanded: update-index' '
+ init_repos &&
+
+ deep_a_oid=$(git -C full-checkout rev-parse update-deep:deep/a) &&
+ ensure_not_expanded update-index --cacheinfo 100644 $deep_a_oid deep/a &&
+
+ echo "test" >sparse-index/README.md &&
+ echo "test2" >sparse-index/a &&
+ rm -f sparse-index/deep/a &&
+
+ ensure_not_expanded update-index --add README.md &&
+ ensure_not_expanded update-index a &&
+ ensure_not_expanded update-index --remove deep/a &&
+
+ ensure_not_expanded reset --soft update-deep &&
+ ensure_not_expanded update-index --add --remove --again
+'
+
test_expect_success 'sparse index is not expanded: blame' '
init_repos &&
@@ -1023,6 +1412,27 @@ test_expect_success 'sparse index is not expanded: fetch/pull' '
ensure_not_expanded pull full base
'
+test_expect_success 'sparse index is not expanded: read-tree' '
+ init_repos &&
+
+ ensure_not_expanded checkout -b test-branch update-folder1 &&
+ for MERGE_TREES in "base HEAD update-folder2" \
+ "base HEAD rename-base" \
+ "base update-folder2" \
+ "base rename-base" \
+ "update-folder2"
+ do
+ ensure_not_expanded read-tree -mu $MERGE_TREES &&
+ ensure_not_expanded reset --hard || return 1
+ done &&
+
+ rm -rf sparse-index/deep/deeper2 &&
+ ensure_not_expanded add . &&
+ ensure_not_expanded commit -m "test" &&
+
+ ensure_not_expanded read-tree --prefix=deep/deeper2 -u deepest
+'
+
test_expect_success 'ls-files' '
init_repos &&
@@ -1037,6 +1447,7 @@ test_expect_success 'ls-files' '
cat >expect <<-\EOF &&
a
+ before/
deep/
e
folder1-
@@ -1057,36 +1468,34 @@ test_expect_success 'ls-files' '
test_cmp dense sparse &&
# Set up a strange condition of having a file edit
- # outside of the sparse-checkout cone. This is just
- # to verify that sparse-checkout and sparse-index
- # behave the same in this case.
+ # outside of the sparse-checkout cone. We want to verify
+ # that all modes handle this the same, and detect the
+ # modification.
write_script edit-content <<-\EOF &&
- mkdir folder1 &&
+ mkdir -p folder1 &&
echo content >>folder1/a
EOF
- run_on_sparse ../edit-content &&
+ run_on_all ../edit-content &&
- # ls-files does not currently notice modified files whose
- # cache entries are marked SKIP_WORKTREE. This may change
- # in the future, but here we test that sparse index does
- # not accidentally create a change of behavior.
- test_sparse_match git ls-files --modified &&
- test_must_be_empty sparse-checkout-out &&
- test_must_be_empty sparse-index-out &&
+ test_all_match git ls-files --modified &&
git -C sparse-index ls-files --sparse --modified >sparse-index-out &&
- test_must_be_empty sparse-index-out &&
+ cat >expect <<-\EOF &&
+ folder1/a
+ EOF
+ test_cmp expect sparse-index-out &&
# Add folder1 to the sparse-checkout cone and
# check that ls-files shows the expanded files.
test_sparse_match git sparse-checkout add folder1 &&
- test_sparse_match git ls-files --modified &&
+ test_all_match git ls-files --modified &&
test_all_match git ls-files &&
git -C sparse-index ls-files --sparse >actual &&
cat >expect <<-\EOF &&
a
+ before/
deep/
e
folder1-
diff --git a/t/t1300-config.sh b/t/t1300-config.sh
index 78359f1..7dd9b32 100755
--- a/t/t1300-config.sh
+++ b/t/t1300-config.sh
@@ -2388,4 +2388,122 @@ test_expect_success '--get and --get-all with --fixed-value' '
test_must_fail git config --file=config --get-regexp --fixed-value fixed+ non-existent
'
+test_expect_success 'includeIf.hasconfig:remote.*.url' '
+ git init hasremoteurlTest &&
+ test_when_finished "rm -rf hasremoteurlTest" &&
+
+ cat >include-this <<-\EOF &&
+ [user]
+ this = this-is-included
+ EOF
+ cat >dont-include-that <<-\EOF &&
+ [user]
+ that = that-is-not-included
+ EOF
+ cat >>hasremoteurlTest/.git/config <<-EOF &&
+ [includeIf "hasconfig:remote.*.url:foourl"]
+ path = "$(pwd)/include-this"
+ [includeIf "hasconfig:remote.*.url:barurl"]
+ path = "$(pwd)/dont-include-that"
+ [remote "foo"]
+ url = foourl
+ EOF
+
+ echo this-is-included >expect-this &&
+ git -C hasremoteurlTest config --get user.this >actual-this &&
+ test_cmp expect-this actual-this &&
+
+ test_must_fail git -C hasremoteurlTest config --get user.that
+'
+
+test_expect_success 'includeIf.hasconfig:remote.*.url respects last-config-wins' '
+ git init hasremoteurlTest &&
+ test_when_finished "rm -rf hasremoteurlTest" &&
+
+ cat >include-two-three <<-\EOF &&
+ [user]
+ two = included-config
+ three = included-config
+ EOF
+ cat >>hasremoteurlTest/.git/config <<-EOF &&
+ [remote "foo"]
+ url = foourl
+ [user]
+ one = main-config
+ two = main-config
+ [includeIf "hasconfig:remote.*.url:foourl"]
+ path = "$(pwd)/include-two-three"
+ [user]
+ three = main-config
+ EOF
+
+ echo main-config >expect-main-config &&
+ echo included-config >expect-included-config &&
+
+ git -C hasremoteurlTest config --get user.one >actual &&
+ test_cmp expect-main-config actual &&
+
+ git -C hasremoteurlTest config --get user.two >actual &&
+ test_cmp expect-included-config actual &&
+
+ git -C hasremoteurlTest config --get user.three >actual &&
+ test_cmp expect-main-config actual
+'
+
+test_expect_success 'includeIf.hasconfig:remote.*.url globs' '
+ git init hasremoteurlTest &&
+ test_when_finished "rm -rf hasremoteurlTest" &&
+
+ printf "[user]\ndss = yes\n" >double-star-start &&
+ printf "[user]\ndse = yes\n" >double-star-end &&
+ printf "[user]\ndsm = yes\n" >double-star-middle &&
+ printf "[user]\nssm = yes\n" >single-star-middle &&
+ printf "[user]\nno = no\n" >no &&
+
+ cat >>hasremoteurlTest/.git/config <<-EOF &&
+ [remote "foo"]
+ url = https://foo/bar/baz
+ [includeIf "hasconfig:remote.*.url:**/baz"]
+ path = "$(pwd)/double-star-start"
+ [includeIf "hasconfig:remote.*.url:**/nomatch"]
+ path = "$(pwd)/no"
+ [includeIf "hasconfig:remote.*.url:https:/**"]
+ path = "$(pwd)/double-star-end"
+ [includeIf "hasconfig:remote.*.url:nomatch:/**"]
+ path = "$(pwd)/no"
+ [includeIf "hasconfig:remote.*.url:https:/**/baz"]
+ path = "$(pwd)/double-star-middle"
+ [includeIf "hasconfig:remote.*.url:https:/**/nomatch"]
+ path = "$(pwd)/no"
+ [includeIf "hasconfig:remote.*.url:https://*/bar/baz"]
+ path = "$(pwd)/single-star-middle"
+ [includeIf "hasconfig:remote.*.url:https://*/baz"]
+ path = "$(pwd)/no"
+ EOF
+
+ git -C hasremoteurlTest config --get user.dss &&
+ git -C hasremoteurlTest config --get user.dse &&
+ git -C hasremoteurlTest config --get user.dsm &&
+ git -C hasremoteurlTest config --get user.ssm &&
+ test_must_fail git -C hasremoteurlTest config --get user.no
+'
+
+test_expect_success 'includeIf.hasconfig:remote.*.url forbids remote url in such included files' '
+ git init hasremoteurlTest &&
+ test_when_finished "rm -rf hasremoteurlTest" &&
+
+ cat >include-with-url <<-\EOF &&
+ [remote "bar"]
+ url = barurl
+ EOF
+ cat >>hasremoteurlTest/.git/config <<-EOF &&
+ [includeIf "hasconfig:remote.*.url:foourl"]
+ path = "$(pwd)/include-with-url"
+ EOF
+
+ # test with any Git command
+ test_must_fail git -C hasremoteurlTest status 2>err &&
+ grep "fatal: remote URLs cannot be configured in file directly or indirectly included by includeIf.hasconfig:remote.*.url" err
+'
+
test_done
diff --git a/t/t1350-config-hooks-path.sh b/t/t1350-config-hooks-path.sh
index fa9647a..f6dc83e 100755
--- a/t/t1350-config-hooks-path.sh
+++ b/t/t1350-config-hooks-path.sh
@@ -6,11 +6,11 @@ test_description='Test the core.hooksPath configuration variable'
test_expect_success 'set up a pre-commit hook in core.hooksPath' '
>actual &&
- mkdir -p .git/custom-hooks .git/hooks &&
+ mkdir -p .git/custom-hooks &&
write_script .git/custom-hooks/pre-commit <<-\EOF &&
echo CUSTOM >>actual
EOF
- write_script .git/hooks/pre-commit <<-\EOF
+ test_hook --setup pre-commit <<-\EOF
echo NORMAL >>actual
EOF
'
diff --git a/t/t1405-main-ref-store.sh b/t/t1405-main-ref-store.sh
index 1a3ee88..51f8291 100755
--- a/t/t1405-main-ref-store.sh
+++ b/t/t1405-main-ref-store.sh
@@ -40,6 +40,12 @@ test_expect_success 'delete_refs(FOO, refs/tags/new-tag)' '
test_must_fail git rev-parse refs/tags/new-tag --
'
+# In reftable, we keep the reflogs around for deleted refs.
+test_expect_success !REFFILES 'delete-reflog(FOO, refs/tags/new-tag)' '
+ $RUN delete-reflog FOO &&
+ $RUN delete-reflog refs/tags/new-tag
+'
+
test_expect_success 'rename_refs(main, new-main)' '
git rev-parse main >expected &&
$RUN rename-ref refs/heads/main refs/heads/new-main &&
@@ -105,7 +111,7 @@ test_expect_success 'delete_reflog(HEAD)' '
test_must_fail git reflog exists HEAD
'
-test_expect_success 'create-reflog(HEAD)' '
+test_expect_success REFFILES 'create-reflog(HEAD)' '
$RUN create-reflog HEAD &&
git reflog exists HEAD
'
diff --git a/t/t1410-reflog.sh b/t/t1410-reflog.sh
index d7ddf76..aa59954 100755
--- a/t/t1410-reflog.sh
+++ b/t/t1410-reflog.sh
@@ -106,6 +106,28 @@ test_expect_success setup '
test_line_count = 4 output
'
+test_expect_success 'correct usage on sub-command -h' '
+ test_expect_code 129 git reflog expire -h >err &&
+ grep "git reflog expire" err
+'
+
+test_expect_success 'correct usage on "git reflog show -h"' '
+ test_expect_code 129 git reflog show -h >err &&
+ grep -F "git reflog [show]" err
+'
+
+test_expect_success 'pass through -- to sub-command' '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ test_commit -C repo message --a-file contents dash-tag &&
+
+ git -C repo reflog show -- --does-not-exist >out &&
+ test_must_be_empty out &&
+ git -C repo reflog show >expect &&
+ git -C repo reflog show -- --a-file >actual &&
+ test_cmp expect actual
+'
+
test_expect_success rewind '
test_tick && git reset --hard HEAD~2 &&
test -f C &&
@@ -341,7 +363,7 @@ test_expect_success 'stale dirs do not cause d/f conflicts (reflogs off)' '
# Each line is 114 characters, so we need 75 to still have a few before the
# last 8K. The 89-character padding on the final entry lines up our
# newline exactly.
-test_expect_success SHA1 'parsing reverse reflogs at BUFSIZ boundaries' '
+test_expect_success REFFILES,SHA1 'parsing reverse reflogs at BUFSIZ boundaries' '
git checkout -b reflogskip &&
zf=$(test_oid zero_2) &&
ident="abc <xyz> 0000000001 +0000" &&
@@ -418,8 +440,18 @@ test_expect_success 'expire with multiple worktrees' '
test_commit -C link-wt foobar &&
test_tick &&
git reflog expire --verbose --all --expire=$test_tick &&
- test_must_be_empty .git/worktrees/link-wt/logs/HEAD
+ test-tool ref-store worktree:link-wt for-each-reflog-ent HEAD >actual &&
+ test_must_be_empty actual
)
'
+test_expect_success REFFILES 'empty reflog' '
+ test_when_finished "rm -rf empty" &&
+ git init empty &&
+ test_commit -C empty A &&
+ >empty/.git/logs/refs/heads/foo &&
+ git -C empty reflog expire --all 2>err &&
+ test_must_be_empty err
+'
+
test_done
diff --git a/t/t1411-reflog-show.sh b/t/t1411-reflog-show.sh
index 0bb319b..975c4ea 100755
--- a/t/t1411-reflog-show.sh
+++ b/t/t1411-reflog-show.sh
@@ -169,9 +169,4 @@ test_expect_success 'git log -g -p shows diffs vs. parents' '
test_cmp expect actual
'
-test_expect_success 'reflog exists works' '
- git reflog exists refs/heads/main &&
- ! git reflog exists refs/heads/nonexistent
-'
-
test_done
diff --git a/t/t1416-ref-transaction-hooks.sh b/t/t1416-ref-transaction-hooks.sh
index 6c94102..085a7a4 100755
--- a/t/t1416-ref-transaction-hooks.sh
+++ b/t/t1416-ref-transaction-hooks.sh
@@ -8,7 +8,6 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
test_expect_success setup '
- mkdir -p .git/hooks &&
test_commit PRE &&
PRE_OID=$(git rev-parse PRE) &&
test_commit POST &&
@@ -16,9 +15,8 @@ test_expect_success setup '
'
test_expect_success 'hook allows updating ref if successful' '
- test_when_finished "rm .git/hooks/reference-transaction" &&
git reset --hard PRE &&
- write_script .git/hooks/reference-transaction <<-\EOF &&
+ test_hook reference-transaction <<-\EOF &&
echo "$*" >>actual
EOF
cat >expect <<-EOF &&
@@ -30,9 +28,8 @@ test_expect_success 'hook allows updating ref if successful' '
'
test_expect_success 'hook aborts updating ref in prepared state' '
- test_when_finished "rm .git/hooks/reference-transaction" &&
git reset --hard PRE &&
- write_script .git/hooks/reference-transaction <<-\EOF &&
+ test_hook reference-transaction <<-\EOF &&
if test "$1" = prepared
then
exit 1
@@ -43,9 +40,9 @@ test_expect_success 'hook aborts updating ref in prepared state' '
'
test_expect_success 'hook gets all queued updates in prepared state' '
- test_when_finished "rm .git/hooks/reference-transaction actual" &&
+ test_when_finished "rm actual" &&
git reset --hard PRE &&
- write_script .git/hooks/reference-transaction <<-\EOF &&
+ test_hook reference-transaction <<-\EOF &&
if test "$1" = prepared
then
while read -r line
@@ -66,9 +63,9 @@ test_expect_success 'hook gets all queued updates in prepared state' '
'
test_expect_success 'hook gets all queued updates in committed state' '
- test_when_finished "rm .git/hooks/reference-transaction actual" &&
+ test_when_finished "rm actual" &&
git reset --hard PRE &&
- write_script .git/hooks/reference-transaction <<-\EOF &&
+ test_hook reference-transaction <<-\EOF &&
if test "$1" = committed
then
while read -r line
@@ -86,9 +83,9 @@ test_expect_success 'hook gets all queued updates in committed state' '
'
test_expect_success 'hook gets all queued updates in aborted state' '
- test_when_finished "rm .git/hooks/reference-transaction actual" &&
+ test_when_finished "rm actual" &&
git reset --hard PRE &&
- write_script .git/hooks/reference-transaction <<-\EOF &&
+ test_hook reference-transaction <<-\EOF &&
if test "$1" = aborted
then
while read -r line
@@ -115,11 +112,11 @@ test_expect_success 'interleaving hook calls succeed' '
git init --bare target-repo.git &&
- write_script target-repo.git/hooks/reference-transaction <<-\EOF &&
+ test_hook -C target-repo.git reference-transaction <<-\EOF &&
echo $0 "$@" >>actual
EOF
- write_script target-repo.git/hooks/update <<-\EOF &&
+ test_hook -C target-repo.git update <<-\EOF &&
echo $0 "$@" >>actual
EOF
@@ -136,4 +133,54 @@ test_expect_success 'interleaving hook calls succeed' '
test_cmp expect target-repo.git/actual
'
+test_expect_success 'hook does not get called on packing refs' '
+ # Pack references first such that we are in a known state.
+ git pack-refs --all &&
+
+ test_hook reference-transaction <<-\EOF &&
+ echo "$@" >>actual
+ cat >>actual
+ EOF
+ rm -f actual &&
+
+ git update-ref refs/heads/unpacked-ref $POST_OID &&
+ git pack-refs --all &&
+
+ # We only expect a single hook invocation, which is the call to
+ # git-update-ref(1).
+ cat >expect <<-EOF &&
+ prepared
+ $ZERO_OID $POST_OID refs/heads/unpacked-ref
+ committed
+ $ZERO_OID $POST_OID refs/heads/unpacked-ref
+ EOF
+
+ test_cmp expect actual
+'
+
+test_expect_success 'deleting packed ref calls hook once' '
+ # Create a reference and pack it.
+ git update-ref refs/heads/to-be-deleted $POST_OID &&
+ git pack-refs --all &&
+
+ test_hook reference-transaction <<-\EOF &&
+ echo "$@" >>actual
+ cat >>actual
+ EOF
+ rm -f actual &&
+
+ git update-ref -d refs/heads/to-be-deleted $POST_OID &&
+
+ # We only expect a single hook invocation, which is the logical
+ # deletion.
+ cat >expect <<-EOF &&
+ prepared
+ $POST_OID $ZERO_OID refs/heads/to-be-deleted
+ committed
+ $POST_OID $ZERO_OID refs/heads/to-be-deleted
+ EOF
+
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t1418-reflog-exists.sh b/t/t1418-reflog-exists.sh
new file mode 100755
index 0000000..d51ecd5
--- /dev/null
+++ b/t/t1418-reflog-exists.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+
+test_description='Test reflog display routines'
+GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
+export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ test_commit A
+'
+
+test_expect_success 'usage' '
+ test_expect_code 129 git reflog exists &&
+ test_expect_code 129 git reflog exists -h
+'
+
+test_expect_success 'usage: unknown option' '
+ test_expect_code 129 git reflog exists --unknown-option
+'
+
+test_expect_success 'reflog exists works' '
+ git reflog exists refs/heads/main &&
+ test_must_fail git reflog exists refs/heads/nonexistent
+'
+
+test_expect_success 'reflog exists works with a "--" delimiter' '
+ git reflog exists -- refs/heads/main &&
+ test_must_fail git reflog exists -- refs/heads/nonexistent
+'
+
+test_expect_success 'reflog exists works with a "--end-of-options" delimiter' '
+ git reflog exists --end-of-options refs/heads/main &&
+ test_must_fail git reflog exists --end-of-options refs/heads/nonexistent
+'
+
+test_done
diff --git a/t/t1503-rev-parse-verify.sh b/t/t1503-rev-parse-verify.sh
index 94fe413..ba43168 100755
--- a/t/t1503-rev-parse-verify.sh
+++ b/t/t1503-rev-parse-verify.sh
@@ -132,8 +132,9 @@ test_expect_success 'use --default' '
test_must_fail git rev-parse --verify --default bar
'
-test_expect_success 'main@{n} for various n' '
- N=$(git reflog | wc -l) &&
+test_expect_success !SANITIZE_LEAK 'main@{n} for various n' '
+ git reflog >out &&
+ N=$(wc -l <out) &&
Nm1=$(($N-1)) &&
Np1=$(($N+1)) &&
git rev-parse --verify main@{0} &&
diff --git a/t/t1512-rev-parse-disambiguation.sh b/t/t1512-rev-parse-disambiguation.sh
index b0119bf..98cefe3 100755
--- a/t/t1512-rev-parse-disambiguation.sh
+++ b/t/t1512-rev-parse-disambiguation.sh
@@ -25,6 +25,87 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
+test_cmp_failed_rev_parse () {
+ dir=$1
+ rev=$2
+
+ cat >expect &&
+ test_must_fail git -C "$dir" rev-parse "$rev" 2>actual.raw &&
+ sed "s/\($rev\)[0-9a-f]*/\1.../" <actual.raw >actual &&
+ test_cmp expect actual
+}
+
+test_expect_success 'ambiguous blob output' '
+ git init --bare blob.prefix &&
+ (
+ cd blob.prefix &&
+
+ # Both start with "dead..", under both SHA-1 and SHA-256
+ echo brocdnra | git hash-object -w --stdin &&
+ echo brigddsv | git hash-object -w --stdin &&
+
+ # Both start with "beef.."
+ echo 1agllotbh | git hash-object -w --stdin &&
+ echo 1bbfctrkc | git hash-object -w --stdin
+ ) &&
+
+ test_must_fail git -C blob.prefix rev-parse dead &&
+ test_cmp_failed_rev_parse blob.prefix beef <<-\EOF
+ error: short object ID beef... is ambiguous
+ hint: The candidates are:
+ hint: beef... blob
+ hint: beef... blob
+ fatal: ambiguous argument '\''beef...'\'': unknown revision or path not in the working tree.
+ Use '\''--'\'' to separate paths from revisions, like this:
+ '\''git <command> [<revision>...] -- [<file>...]'\''
+ EOF
+'
+
+test_expect_success 'ambiguous loose bad object parsed as OBJ_BAD' '
+ git init --bare blob.bad &&
+ (
+ cd blob.bad &&
+
+ # Both have the prefix "bad0"
+ echo xyzfaowcoh | git hash-object -t bad -w --stdin --literally &&
+ echo xyzhjpyvwl | git hash-object -t bad -w --stdin --literally
+ ) &&
+
+ test_cmp_failed_rev_parse blob.bad bad0 <<-\EOF
+ error: short object ID bad0... is ambiguous
+ fatal: invalid object type
+ EOF
+'
+
+test_expect_success POSIXPERM 'ambigous zlib corrupt loose blob' '
+ git init --bare blob.corrupt &&
+ (
+ cd blob.corrupt &&
+
+ # Both have the prefix "cafe"
+ echo bnkxmdwz | git hash-object -w --stdin &&
+ oid=$(echo bmwsjxzi | git hash-object -w --stdin) &&
+
+ oidf=objects/$(test_oid_to_path "$oid") &&
+ chmod 755 $oidf &&
+ echo broken >$oidf
+ ) &&
+
+ test_cmp_failed_rev_parse blob.corrupt cafe <<-\EOF
+ error: short object ID cafe... is ambiguous
+ error: inflate: data stream error (incorrect header check)
+ error: unable to unpack cafe... header
+ error: inflate: data stream error (incorrect header check)
+ error: unable to unpack cafe... header
+ hint: The candidates are:
+ hint: cafe... [bad object]
+ hint: cafe... blob
+ fatal: ambiguous argument '\''cafe...'\'': unknown revision or path not in the working tree.
+ Use '\''--'\'' to separate paths from revisions, like this:
+ '\''git <command> [<revision>...] -- [<file>...]'\''
+ EOF
+'
+
if ! test_have_prereq SHA1
then
skip_all='not using SHA-1 for objects'
diff --git a/t/t1800-hook.sh b/t/t1800-hook.sh
new file mode 100755
index 0000000..26ed5e1
--- /dev/null
+++ b/t/t1800-hook.sh
@@ -0,0 +1,123 @@
+#!/bin/sh
+
+test_description='git-hook command'
+
+TEST_PASSES_SANITIZE_LEAK=true
+. ./test-lib.sh
+
+test_expect_success 'git hook usage' '
+ test_expect_code 129 git hook &&
+ test_expect_code 129 git hook run &&
+ test_expect_code 129 git hook run -h &&
+ test_expect_code 129 git hook run --unknown 2>err &&
+ grep "unknown option" err
+'
+
+test_expect_success 'git hook run: nonexistent hook' '
+ cat >stderr.expect <<-\EOF &&
+ error: cannot find a hook named test-hook
+ EOF
+ test_expect_code 1 git hook run test-hook 2>stderr.actual &&
+ test_cmp stderr.expect stderr.actual
+'
+
+test_expect_success 'git hook run: nonexistent hook with --ignore-missing' '
+ git hook run --ignore-missing does-not-exist 2>stderr.actual &&
+ test_must_be_empty stderr.actual
+'
+
+test_expect_success 'git hook run: basic' '
+ test_hook test-hook <<-EOF &&
+ echo Test hook
+ EOF
+
+ cat >expect <<-\EOF &&
+ Test hook
+ EOF
+ git hook run test-hook 2>actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'git hook run: stdout and stderr both write to our stderr' '
+ test_hook test-hook <<-EOF &&
+ echo >&1 Will end up on stderr
+ echo >&2 Will end up on stderr
+ EOF
+
+ cat >stderr.expect <<-\EOF &&
+ Will end up on stderr
+ Will end up on stderr
+ EOF
+ git hook run test-hook >stdout.actual 2>stderr.actual &&
+ test_cmp stderr.expect stderr.actual &&
+ test_must_be_empty stdout.actual
+'
+
+for code in 1 2 128 129
+do
+ test_expect_success "git hook run: exit code $code is passed along" '
+ test_hook test-hook <<-EOF &&
+ exit $code
+ EOF
+
+ test_expect_code $code git hook run test-hook
+ '
+done
+
+test_expect_success 'git hook run arg u ments without -- is not allowed' '
+ test_expect_code 129 git hook run test-hook arg u ments
+'
+
+test_expect_success 'git hook run -- pass arguments' '
+ test_hook test-hook <<-\EOF &&
+ echo $1
+ echo $2
+ EOF
+
+ cat >expect <<-EOF &&
+ arg
+ u ments
+ EOF
+
+ git hook run test-hook -- arg "u ments" 2>actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'git hook run -- out-of-repo runs excluded' '
+ test_hook test-hook <<-EOF &&
+ echo Test hook
+ EOF
+
+ nongit test_must_fail git hook run test-hook
+'
+
+test_expect_success 'git -c core.hooksPath=<PATH> hook run' '
+ mkdir my-hooks &&
+ write_script my-hooks/test-hook <<-\EOF &&
+ echo Hook ran $1 >>actual
+ EOF
+
+ cat >expect <<-\EOF &&
+ Test hook
+ Hook ran one
+ Hook ran two
+ Hook ran three
+ Hook ran four
+ EOF
+
+ test_hook test-hook <<-EOF &&
+ echo Test hook
+ EOF
+
+ # Test various ways of specifying the path. See also
+ # t1350-config-hooks-path.sh
+ >actual &&
+ git hook run test-hook -- ignored 2>>actual &&
+ git -c core.hooksPath=my-hooks hook run test-hook -- one 2>>actual &&
+ git -c core.hooksPath=my-hooks/ hook run test-hook -- two 2>>actual &&
+ git -c core.hooksPath="$PWD/my-hooks" hook run test-hook -- three 2>>actual &&
+ git -c core.hooksPath="$PWD/my-hooks/" hook run test-hook -- four 2>>actual &&
+ test_cmp expect actual
+'
+
+test_done
diff --git a/t/t2012-checkout-last.sh b/t/t2012-checkout-last.sh
index 42601d5..1f6c4ed 100755
--- a/t/t2012-checkout-last.sh
+++ b/t/t2012-checkout-last.sh
@@ -21,14 +21,20 @@ test_expect_success 'first branch switch' '
git checkout other
'
+test_cmp_symbolic_HEAD_ref () {
+ echo refs/heads/"$1" >expect &&
+ git symbolic-ref HEAD >actual &&
+ test_cmp expect actual
+}
+
test_expect_success '"checkout -" switches back' '
git checkout - &&
- test "z$(git symbolic-ref HEAD)" = "zrefs/heads/main"
+ test_cmp_symbolic_HEAD_ref main
'
test_expect_success '"checkout -" switches forth' '
git checkout - &&
- test "z$(git symbolic-ref HEAD)" = "zrefs/heads/other"
+ test_cmp_symbolic_HEAD_ref other
'
test_expect_success 'detach HEAD' '
@@ -37,12 +43,16 @@ test_expect_success 'detach HEAD' '
test_expect_success '"checkout -" attaches again' '
git checkout - &&
- test "z$(git symbolic-ref HEAD)" = "zrefs/heads/other"
+ test_cmp_symbolic_HEAD_ref other
'
test_expect_success '"checkout -" detaches again' '
git checkout - &&
- test "z$(git rev-parse HEAD)" = "z$(git rev-parse other)" &&
+
+ git rev-parse other >expect &&
+ git rev-parse HEAD >actual &&
+ test_cmp expect actual &&
+
test_must_fail git symbolic-ref HEAD
'
@@ -63,31 +73,31 @@ more_switches () {
test_expect_success 'switch to the last' '
more_switches &&
git checkout @{-1} &&
- test "z$(git symbolic-ref HEAD)" = "zrefs/heads/branch2"
+ test_cmp_symbolic_HEAD_ref branch2
'
test_expect_success 'switch to second from the last' '
more_switches &&
git checkout @{-2} &&
- test "z$(git symbolic-ref HEAD)" = "zrefs/heads/branch3"
+ test_cmp_symbolic_HEAD_ref branch3
'
test_expect_success 'switch to third from the last' '
more_switches &&
git checkout @{-3} &&
- test "z$(git symbolic-ref HEAD)" = "zrefs/heads/branch4"
+ test_cmp_symbolic_HEAD_ref branch4
'
test_expect_success 'switch to fourth from the last' '
more_switches &&
git checkout @{-4} &&
- test "z$(git symbolic-ref HEAD)" = "zrefs/heads/branch5"
+ test_cmp_symbolic_HEAD_ref branch5
'
test_expect_success 'switch to twelfth from the last' '
more_switches &&
git checkout @{-12} &&
- test "z$(git symbolic-ref HEAD)" = "zrefs/heads/branch13"
+ test_cmp_symbolic_HEAD_ref branch13
'
test_expect_success 'merge base test setup' '
@@ -98,19 +108,28 @@ test_expect_success 'merge base test setup' '
test_expect_success 'another...main' '
git checkout another &&
git checkout another...main &&
- test "z$(git rev-parse --verify HEAD)" = "z$(git rev-parse --verify main^)"
+
+ git rev-parse --verify main^ >expect &&
+ git rev-parse --verify HEAD >actual &&
+ test_cmp expect actual
'
test_expect_success '...main' '
git checkout another &&
git checkout ...main &&
- test "z$(git rev-parse --verify HEAD)" = "z$(git rev-parse --verify main^)"
+
+ git rev-parse --verify main^ >expect &&
+ git rev-parse --verify HEAD >actual &&
+ test_cmp expect actual
'
test_expect_success 'main...' '
git checkout another &&
git checkout main... &&
- test "z$(git rev-parse --verify HEAD)" = "z$(git rev-parse --verify main^)"
+
+ git rev-parse --verify main^ >expect &&
+ git rev-parse --verify HEAD >actual &&
+ test_cmp expect actual
'
test_expect_success '"checkout -" works after a rebase A' '
@@ -118,7 +137,7 @@ test_expect_success '"checkout -" works after a rebase A' '
git checkout other &&
git rebase main &&
git checkout - &&
- test "z$(git symbolic-ref HEAD)" = "zrefs/heads/main"
+ test_cmp_symbolic_HEAD_ref main
'
test_expect_success '"checkout -" works after a rebase A B' '
@@ -127,7 +146,7 @@ test_expect_success '"checkout -" works after a rebase A B' '
git checkout other &&
git rebase main moodle &&
git checkout - &&
- test "z$(git symbolic-ref HEAD)" = "zrefs/heads/main"
+ test_cmp_symbolic_HEAD_ref main
'
test_expect_success '"checkout -" works after a rebase -i A' '
@@ -135,7 +154,7 @@ test_expect_success '"checkout -" works after a rebase -i A' '
git checkout other &&
git rebase -i main &&
git checkout - &&
- test "z$(git symbolic-ref HEAD)" = "zrefs/heads/main"
+ test_cmp_symbolic_HEAD_ref main
'
test_expect_success '"checkout -" works after a rebase -i A B' '
@@ -144,7 +163,7 @@ test_expect_success '"checkout -" works after a rebase -i A B' '
git checkout other &&
git rebase main foodle &&
git checkout - &&
- test "z$(git symbolic-ref HEAD)" = "zrefs/heads/main"
+ test_cmp_symbolic_HEAD_ref main
'
test_done
diff --git a/t/t2060-switch.sh b/t/t2060-switch.sh
index ebb961b..5a7caf9 100755
--- a/t/t2060-switch.sh
+++ b/t/t2060-switch.sh
@@ -32,6 +32,17 @@ test_expect_success 'switch and detach' '
test_must_fail git symbolic-ref HEAD
'
+test_expect_success 'suggestion to detach' '
+ test_must_fail git switch main^{commit} 2>stderr &&
+ grep "try again with the --detach option" stderr
+'
+
+test_expect_success 'suggestion to detach is suppressed with advice.suggestDetachingHead=false' '
+ test_config advice.suggestDetachingHead false &&
+ test_must_fail git switch main^{commit} 2>stderr &&
+ ! grep "try again with the --detach option" stderr
+'
+
test_expect_success 'switch and detach current branch' '
test_when_finished git switch main &&
git switch main &&
diff --git a/t/t2108-update-index-refresh-racy.sh b/t/t2108-update-index-refresh-racy.sh
new file mode 100755
index 0000000..bc5f288
--- /dev/null
+++ b/t/t2108-update-index-refresh-racy.sh
@@ -0,0 +1,64 @@
+#!/bin/sh
+
+test_description='update-index refresh tests related to racy timestamps'
+
+TEST_PASSES_SANITIZE_LEAK=true
+. ./test-lib.sh
+
+reset_files () {
+ echo content >file &&
+ echo content >other &&
+ test_set_magic_mtime file &&
+ test_set_magic_mtime other
+}
+
+update_assert_changed () {
+ test_set_magic_mtime .git/index &&
+ test_might_fail git update-index "$1" &&
+ ! test_is_magic_mtime .git/index
+}
+
+test_expect_success 'setup' '
+ reset_files &&
+ # we are calling reset_files() a couple of times during tests;
+ # test-tool chmtime does not change the ctime; to not weaken
+ # or even break our tests, disable ctime-checks entirely
+ git config core.trustctime false &&
+ git add file other &&
+ git commit -m "initial import"
+'
+
+test_expect_success '--refresh has no racy timestamps to fix' '
+ reset_files &&
+ # set the index time far enough to the future;
+ # it must be at least 3 seconds for VFAT
+ test_set_magic_mtime .git/index +60 &&
+ git update-index --refresh &&
+ test_is_magic_mtime .git/index +60
+'
+
+test_expect_success '--refresh should fix racy timestamp' '
+ reset_files &&
+ update_assert_changed --refresh
+'
+
+test_expect_success '--really-refresh should fix racy timestamp' '
+ reset_files &&
+ update_assert_changed --really-refresh
+'
+
+test_expect_success '--refresh should fix racy timestamp if other file needs update' '
+ reset_files &&
+ echo content2 >other &&
+ test_set_magic_mtime other &&
+ update_assert_changed --refresh
+'
+
+test_expect_success '--refresh should fix racy timestamp if racy file needs update' '
+ reset_files &&
+ echo content2 >file &&
+ test_set_magic_mtime file &&
+ update_assert_changed --refresh
+'
+
+test_done
diff --git a/t/t2200-add-update.sh b/t/t2200-add-update.sh
index acd3650..0c38f8e 100755
--- a/t/t2200-add-update.sh
+++ b/t/t2200-add-update.sh
@@ -14,7 +14,6 @@ only the updates to dir/sub.
Also tested are "git add -u" without limiting, and "git add -u"
without contents changes, and other conditions'
-TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success setup '
@@ -41,20 +40,28 @@ test_expect_success update '
'
test_expect_success 'update noticed a removal' '
- test "$(git ls-files dir1/sub1)" = ""
+ git ls-files dir1/sub1 >out &&
+ test_must_be_empty out
'
test_expect_success 'update touched correct path' '
- test "$(git diff-files --name-status dir2/sub3)" = ""
+ git diff-files --name-status dir2/sub3 >out &&
+ test_must_be_empty out
'
test_expect_success 'update did not touch other tracked files' '
- test "$(git diff-files --name-status check)" = "M check" &&
- test "$(git diff-files --name-status top)" = "M top"
+ echo "M check" >expect &&
+ git diff-files --name-status check >actual &&
+ test_cmp expect actual &&
+
+ echo "M top" >expect &&
+ git diff-files --name-status top >actual &&
+ test_cmp expect actual
'
test_expect_success 'update did not touch untracked files' '
- test "$(git ls-files dir2/other)" = ""
+ git ls-files dir2/other >out &&
+ test_must_be_empty out
'
test_expect_success 'cache tree has not been corrupted' '
@@ -76,9 +83,8 @@ test_expect_success 'update from a subdirectory' '
'
test_expect_success 'change gets noticed' '
-
- test "$(git diff-files --name-status dir1)" = ""
-
+ git diff-files --name-status dir1 >out &&
+ test_must_be_empty out
'
test_expect_success 'non-qualified update in subdir updates from the root' '
@@ -103,7 +109,8 @@ test_expect_success 'replace a file with a symlink' '
test_expect_success 'add everything changed' '
git add -u &&
- test -z "$(git diff-files)"
+ git diff-files >out &&
+ test_must_be_empty out
'
@@ -111,7 +118,8 @@ test_expect_success 'touch and then add -u' '
touch check &&
git add -u &&
- test -z "$(git diff-files)"
+ git diff-files >out &&
+ test_must_be_empty out
'
@@ -119,7 +127,8 @@ test_expect_success 'touch and then add explicitly' '
touch check &&
git add check &&
- test -z "$(git diff-files)"
+ git diff-files >out &&
+ test_must_be_empty out
'
diff --git a/t/t2400-worktree-add.sh b/t/t2400-worktree-add.sh
index 37ad794..2f564d5 100755
--- a/t/t2400-worktree-add.sh
+++ b/t/t2400-worktree-add.sh
@@ -165,8 +165,62 @@ test_expect_success '"add" default branch of a bare repo' '
(
git clone --bare . bare2 &&
cd bare2 &&
- git worktree add ../there3 main
- )
+ git worktree add ../there3 main &&
+ cd ../there3 &&
+ # Simple check that a Git command does not
+ # immediately fail with the current setup
+ git status
+ ) &&
+ cat >expect <<-EOF &&
+ init.t
+ EOF
+ ls there3 >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '"add" to bare repo with worktree config' '
+ (
+ git clone --bare . bare3 &&
+ cd bare3 &&
+ git config extensions.worktreeconfig true &&
+
+ # Add config values that are erroneous to have in
+ # a config.worktree file outside of the main
+ # working tree, to check that Git filters them out
+ # when copying config during "git worktree add".
+ git config --worktree core.bare true &&
+ git config --worktree core.worktree "$(pwd)" &&
+
+ # We want to check that bogus.key is copied
+ git config --worktree bogus.key value &&
+ git config --unset core.bare &&
+ git worktree add ../there4 main &&
+ cd ../there4 &&
+
+ # Simple check that a Git command does not
+ # immediately fail with the current setup
+ git status &&
+ git worktree add --detach ../there5 &&
+ cd ../there5 &&
+ git status
+ ) &&
+
+ # the worktree has the arbitrary value copied.
+ test_cmp_config -C there4 value bogus.key &&
+ test_cmp_config -C there5 value bogus.key &&
+
+ # however, core.bare and core.worktree were removed.
+ test_must_fail git -C there4 config core.bare &&
+ test_must_fail git -C there4 config core.worktree &&
+
+ cat >expect <<-EOF &&
+ init.t
+ EOF
+
+ ls there4 >actual &&
+ test_cmp expect actual &&
+ ls there5 >actual &&
+ test_cmp expect actual
'
test_expect_success 'checkout with grafts' '
@@ -505,10 +559,7 @@ test_expect_success 'git worktree --no-guess-remote option overrides config' '
'
post_checkout_hook () {
- gitdir=${1:-.git}
- test_when_finished "rm -f $gitdir/hooks/post-checkout" &&
- mkdir -p $gitdir/hooks &&
- write_script $gitdir/hooks/post-checkout <<-\EOF
+ test_hook -C "$1" post-checkout <<-\EOF
{
echo $*
git rev-parse --git-dir --show-toplevel
diff --git a/t/t2402-worktree-list.sh b/t/t2402-worktree-list.sh
index c8a5a0a..79e0fce 100755
--- a/t/t2402-worktree-list.sh
+++ b/t/t2402-worktree-list.sh
@@ -64,6 +64,25 @@ test_expect_success '"list" all worktrees --porcelain' '
test_cmp expect actual
'
+test_expect_success '"list" all worktrees --porcelain -z' '
+ test_when_finished "rm -rf here _actual actual expect &&
+ git worktree prune" &&
+ printf "worktree %sQHEAD %sQbranch %sQQ" \
+ "$(git rev-parse --show-toplevel)" \
+ $(git rev-parse HEAD --symbolic-full-name HEAD) >expect &&
+ git worktree add --detach here main &&
+ printf "worktree %sQHEAD %sQdetachedQQ" \
+ "$(git -C here rev-parse --show-toplevel)" \
+ "$(git rev-parse HEAD)" >>expect &&
+ git worktree list --porcelain -z >_actual &&
+ nul_to_q <_actual >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '"list" -z fails without --porcelain' '
+ test_must_fail git worktree list -z
+'
+
test_expect_success '"list" all worktrees with locked annotation' '
test_when_finished "rm -rf locked unlocked out && git worktree prune" &&
git worktree add --detach locked main &&
diff --git a/t/t3007-ls-files-recurse-submodules.sh b/t/t3007-ls-files-recurse-submodules.sh
index 4a08000..dd7770e 100755
--- a/t/t3007-ls-files-recurse-submodules.sh
+++ b/t/t3007-ls-files-recurse-submodules.sh
@@ -34,6 +34,23 @@ test_expect_success 'ls-files correctly outputs files in submodule' '
test_cmp expect actual
'
+test_expect_success '--stage' '
+ GITMODULES_HASH=$(git rev-parse HEAD:.gitmodules) &&
+ A_HASH=$(git rev-parse HEAD:a) &&
+ B_HASH=$(git rev-parse HEAD:b/b) &&
+ C_HASH=$(git -C submodule rev-parse HEAD:c) &&
+
+ cat >expect <<-EOF &&
+ 100644 $GITMODULES_HASH 0 .gitmodules
+ 100644 $A_HASH 0 a
+ 100644 $B_HASH 0 b/b
+ 100644 $C_HASH 0 submodule/c
+ EOF
+
+ git ls-files --stage --recurse-submodules >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'ls-files correctly outputs files in submodule with -z' '
lf_to_nul >expect <<-\EOF &&
.gitmodules
@@ -292,7 +309,6 @@ test_incompatible_with_recurse_submodules () {
test_incompatible_with_recurse_submodules --deleted
test_incompatible_with_recurse_submodules --modified
test_incompatible_with_recurse_submodules --others
-test_incompatible_with_recurse_submodules --stage
test_incompatible_with_recurse_submodules --killed
test_incompatible_with_recurse_submodules --unmerged
diff --git a/t/t3101-ls-tree-dirname.sh b/t/t3101-ls-tree-dirname.sh
index 05fde64..217006d 100755
--- a/t/t3101-ls-tree-dirname.sh
+++ b/t/t3101-ls-tree-dirname.sh
@@ -201,31 +201,34 @@ EOF
test_cmp expected check
'
-test_expect_success 'ls-tree --name-only' '
- git ls-tree --name-only $tree >current &&
- cat >expected <<\EOF &&
-1.txt
-2.txt
-path0
-path1
-path2
-path3
-EOF
- test_output
-'
-
-test_expect_success 'ls-tree --name-only -r' '
- git ls-tree --name-only -r $tree >current &&
- cat >expected <<\EOF &&
-1.txt
-2.txt
-path0/a/b/c/1.txt
-path1/b/c/1.txt
-path2/1.txt
-path3/1.txt
-path3/2.txt
-EOF
- test_output
-'
+for opt in --name-only --name-status
+do
+ test_expect_success "ls-tree $opt" '
+ git ls-tree $opt $tree >current &&
+ cat >expected <<-\EOF &&
+ 1.txt
+ 2.txt
+ path0
+ path1
+ path2
+ path3
+ EOF
+ test_output
+ '
+
+ test_expect_success "ls-tree $opt -r" '
+ git ls-tree $opt -r $tree >current &&
+ cat >expected <<-\EOF &&
+ 1.txt
+ 2.txt
+ path0/a/b/c/1.txt
+ path1/b/c/1.txt
+ path2/1.txt
+ path3/1.txt
+ path3/2.txt
+ EOF
+ test_output
+ '
+done
test_done
diff --git a/t/t3103-ls-tree-misc.sh b/t/t3103-ls-tree-misc.sh
index d18ba1b..81c6343 100755
--- a/t/t3103-ls-tree-misc.sh
+++ b/t/t3103-ls-tree-misc.sh
@@ -23,4 +23,19 @@ test_expect_success 'ls-tree fails with non-zero exit code on broken tree' '
test_must_fail git ls-tree -r HEAD
'
+for opts in \
+ "--long --name-only" \
+ "--name-only --name-status" \
+ "--name-status --object-only" \
+ "--object-only --long"
+do
+ test_expect_success "usage: incompatible options: $opts" '
+ test_expect_code 129 git ls-tree $opts $tree
+ '
+
+ one_opt=$(echo "$opts" | cut -d' ' -f1)
+ test_expect_success "usage: incompatible options: $one_opt and --format" '
+ test_expect_code 129 git ls-tree $one_opt --format=fmt $tree
+ '
+done
test_done
diff --git a/t/t3104-ls-tree-format.sh b/t/t3104-ls-tree-format.sh
new file mode 100755
index 0000000..0769a93
--- /dev/null
+++ b/t/t3104-ls-tree-format.sh
@@ -0,0 +1,76 @@
+#!/bin/sh
+
+test_description='ls-tree --format'
+
+TEST_PASSES_SANITIZE_LEAK=true
+. ./test-lib.sh
+
+test_expect_success 'ls-tree --format usage' '
+ test_expect_code 129 git ls-tree --format=fmt -l HEAD &&
+ test_expect_code 129 git ls-tree --format=fmt --name-only HEAD &&
+ test_expect_code 129 git ls-tree --format=fmt --name-status HEAD
+'
+
+test_expect_success 'setup' '
+ mkdir dir &&
+ test_commit dir/sub-file &&
+ test_commit top-file
+'
+
+test_ls_tree_format () {
+ format=$1 &&
+ opts=$2 &&
+ fmtopts=$3 &&
+ shift 2 &&
+
+ test_expect_success "ls-tree '--format=<$format>' is like options '$opts $fmtopts'" '
+ git ls-tree $opts -r HEAD >expect &&
+ git ls-tree --format="$format" -r $fmtopts HEAD >actual &&
+ test_cmp expect actual
+ '
+
+ test_expect_success "ls-tree '--format=<$format>' on optimized v.s. non-optimized path" '
+ git ls-tree --format="$format" -r $fmtopts HEAD >expect &&
+ git ls-tree --format="> $format" -r $fmtopts HEAD >actual.raw &&
+ sed "s/^> //" >actual <actual.raw &&
+ test_cmp expect actual
+ '
+}
+
+test_ls_tree_format \
+ "%(objectmode) %(objecttype) %(objectname)%x09%(path)" \
+ ""
+
+test_ls_tree_format \
+ "%(objectmode) %(objecttype) %(objectname) %(objectsize:padded)%x09%(path)" \
+ "--long"
+
+test_ls_tree_format \
+ "%(path)" \
+ "--name-only"
+
+test_ls_tree_format \
+ "%(objectname)" \
+ "--object-only"
+
+test_ls_tree_format \
+ "%(objectname)" \
+ "--object-only --abbrev" \
+ "--abbrev"
+
+test_ls_tree_format \
+ "%(objectmode) %(objecttype) %(objectname)%x09%(path)" \
+ "-t" \
+ "-t"
+
+test_ls_tree_format \
+ "%(objectmode) %(objecttype) %(objectname)%x09%(path)" \
+ "--full-name" \
+ "--full-name"
+
+test_ls_tree_format \
+ "%(objectmode) %(objecttype) %(objectname)%x09%(path)" \
+ "--full-tree" \
+ "--full-tree"
+
+test_done
diff --git a/t/t3200-branch.sh b/t/t3200-branch.sh
index 1bc3795..e12db59 100755
--- a/t/t3200-branch.sh
+++ b/t/t3200-branch.sh
@@ -42,6 +42,23 @@ test_expect_success 'git branch abc should create a branch' '
git branch abc && test_path_is_file .git/refs/heads/abc
'
+test_expect_success 'git branch abc should fail when abc exists' '
+ test_must_fail git branch abc
+'
+
+test_expect_success 'git branch --force abc should fail when abc is checked out' '
+ test_when_finished git switch main &&
+ git switch abc &&
+ test_must_fail git branch --force abc HEAD~1
+'
+
+test_expect_success 'git branch --force abc should succeed when abc exists' '
+ git rev-parse HEAD~1 >expect &&
+ git branch --force abc HEAD~1 &&
+ git rev-parse abc >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'git branch a/b/c should create a branch' '
git branch a/b/c && test_path_is_file .git/refs/heads/a/b/c
'
@@ -1022,13 +1039,27 @@ test_expect_success 'checkout -b with -l makes reflog when core.logAllRefUpdates
git rev-parse --verify gamma@{0}
'
-test_expect_success 'avoid ambiguous track' '
+test_expect_success 'avoid ambiguous track and advise' '
git config branch.autosetupmerge true &&
git config remote.ambi1.url lalala &&
git config remote.ambi1.fetch refs/heads/lalala:refs/heads/main &&
git config remote.ambi2.url lilili &&
git config remote.ambi2.fetch refs/heads/lilili:refs/heads/main &&
- test_must_fail git branch all1 main &&
+ cat <<-EOF >expected &&
+ fatal: not tracking: ambiguous information for ref '\''refs/heads/main'\''
+ hint: There are multiple remotes whose fetch refspecs map to the remote
+ hint: tracking ref '\''refs/heads/main'\'':
+ hint: ambi1
+ hint: ambi2
+ hint: ''
+ hint: This is typically a configuration error.
+ hint: ''
+ hint: To support setting up tracking branches, ensure that
+ hint: different remotes'\'' fetch refspecs map into different
+ hint: tracking namespaces.
+ EOF
+ test_must_fail git branch all1 main 2>actual &&
+ test_cmp expected actual &&
test -z "$(git config branch.all1.merge)"
'
diff --git a/t/t3207-branch-submodule.sh b/t/t3207-branch-submodule.sh
new file mode 100755
index 0000000..cfde6b2
--- /dev/null
+++ b/t/t3207-branch-submodule.sh
@@ -0,0 +1,328 @@
+#!/bin/sh
+
+test_description='git branch submodule tests'
+
+GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
+export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+
+. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-rebase.sh
+
+pwd=$(pwd)
+
+# Creates a clean test environment in "pwd" by copying the repo setup
+# from test_dirs.
+reset_test () {
+ rm -fr super &&
+ rm -fr sub-sub-upstream &&
+ rm -fr sub-upstream &&
+ cp -r test_dirs/* .
+}
+
+# Tests that the expected branch does not exist
+test_no_branch () {
+ DIR=$1 &&
+ BRANCH_NAME=$2 &&
+ test_must_fail git -C "$DIR" rev-parse "$BRANCH_NAME" 2>err &&
+ grep "ambiguous argument .$BRANCH_NAME." err
+}
+
+test_expect_success 'setup superproject and submodule' '
+ mkdir test_dirs &&
+ (
+ cd test_dirs &&
+ git init super &&
+ test_commit -C super foo &&
+ git init sub-sub-upstream &&
+ test_commit -C sub-sub-upstream foo &&
+ git init sub-upstream &&
+ # Submodule in a submodule
+ git -C sub-upstream submodule add "${pwd}/test_dirs/sub-sub-upstream" sub-sub &&
+ git -C sub-upstream commit -m "add submodule" &&
+ # Regular submodule
+ git -C super submodule add "${pwd}/test_dirs/sub-upstream" sub &&
+ # Submodule in a subdirectory
+ git -C super submodule add "${pwd}/test_dirs/sub-sub-upstream" second/sub &&
+ git -C super commit -m "add submodule" &&
+ git -C super config submodule.propagateBranches true &&
+ git -C super/sub submodule update --init
+ ) &&
+ reset_test
+'
+
+# Test the argument parsing
+test_expect_success '--recurse-submodules should create branches' '
+ test_when_finished "reset_test" &&
+ (
+ cd super &&
+ git branch --recurse-submodules branch-a &&
+ git rev-parse branch-a &&
+ git -C sub rev-parse branch-a &&
+ git -C sub/sub-sub rev-parse branch-a &&
+ git -C second/sub rev-parse branch-a
+ )
+'
+
+test_expect_success '--recurse-submodules should die if submodule.propagateBranches is false' '
+ test_when_finished "reset_test" &&
+ (
+ cd super &&
+ echo "fatal: branch with --recurse-submodules can only be used if submodule.propagateBranches is enabled" >expected &&
+ test_must_fail git -c submodule.propagateBranches=false branch --recurse-submodules branch-a 2>actual &&
+ test_cmp expected actual
+ )
+'
+
+test_expect_success '--recurse-submodules should fail when not creating branches' '
+ test_when_finished "reset_test" &&
+ (
+ cd super &&
+ git branch --recurse-submodules branch-a &&
+ echo "fatal: --recurse-submodules can only be used to create branches" >expected &&
+ test_must_fail git branch --recurse-submodules -D branch-a 2>actual &&
+ test_cmp expected actual &&
+ # Assert that the branches were not deleted
+ git rev-parse branch-a &&
+ git -C sub rev-parse branch-a
+ )
+'
+
+test_expect_success 'should respect submodule.recurse when creating branches' '
+ test_when_finished "reset_test" &&
+ (
+ cd super &&
+ git -c submodule.recurse=true branch branch-a &&
+ git rev-parse branch-a &&
+ git -C sub rev-parse branch-a
+ )
+'
+
+test_expect_success 'should ignore submodule.recurse when not creating branches' '
+ test_when_finished "reset_test" &&
+ (
+ cd super &&
+ git branch --recurse-submodules branch-a &&
+ git -c submodule.recurse=true branch -D branch-a &&
+ test_no_branch . branch-a &&
+ git -C sub rev-parse branch-a
+ )
+'
+
+# Test branch creation behavior
+test_expect_success 'should create branches based off commit id in superproject' '
+ test_when_finished "reset_test" &&
+ (
+ cd super &&
+ git branch --recurse-submodules branch-a &&
+ git checkout --recurse-submodules branch-a &&
+ git -C sub rev-parse HEAD >expected &&
+ # Move the tip of sub:branch-a so that it no longer matches the commit in super:branch-a
+ git -C sub checkout branch-a &&
+ test_commit -C sub bar &&
+ # Create a new branch-b branch with start-point=branch-a
+ git branch --recurse-submodules branch-b branch-a &&
+ git rev-parse branch-b &&
+ git -C sub rev-parse branch-b >actual &&
+ # Assert that the commit id of sub:second-branch matches super:branch-a and not sub:branch-a
+ test_cmp expected actual
+ )
+'
+
+test_expect_success 'should not create any branches if branch is not valid for all repos' '
+ test_when_finished "reset_test" &&
+ (
+ cd super &&
+ git -C sub branch branch-a &&
+ test_must_fail git branch --recurse-submodules branch-a 2>actual &&
+ test_no_branch . branch-a &&
+ grep "submodule .sub.: fatal: a branch named .branch-a. already exists" actual
+ )
+'
+
+test_expect_success 'should create branches if branch exists and --force is given' '
+ test_when_finished "reset_test" &&
+ (
+ cd super &&
+ git -C sub rev-parse HEAD >expected &&
+ test_commit -C sub baz &&
+ # branch-a in sub now points to a newer commit.
+ git -C sub branch branch-a HEAD &&
+ git -C sub rev-parse branch-a >actual-old-branch-a &&
+ git branch --recurse-submodules --force branch-a &&
+ git rev-parse branch-a &&
+ git -C sub rev-parse branch-a >actual-new-branch-a &&
+ test_cmp expected actual-new-branch-a &&
+ # assert that branch --force actually moved the sub
+ # branch
+ ! test_cmp expected actual-old-branch-a
+ )
+'
+
+test_expect_success 'should create branch when submodule is not in HEAD:.gitmodules' '
+ test_when_finished "reset_test" &&
+ (
+ cd super &&
+ git branch branch-a &&
+ git checkout -b branch-b &&
+ git submodule add ../sub-upstream sub2 &&
+ git -C sub2 submodule update --init &&
+ # branch-b now has a committed submodule not in branch-a
+ git commit -m "add second submodule" &&
+ git checkout branch-a &&
+ git branch --recurse-submodules branch-c branch-b &&
+ git checkout --recurse-submodules branch-c &&
+ git -C sub2 rev-parse branch-c &&
+ git -C sub2/sub-sub rev-parse branch-c
+ )
+'
+
+test_expect_success 'should not create branches in inactive submodules' '
+ test_when_finished "reset_test" &&
+ test_config -C super submodule.sub.active false &&
+ (
+ cd super &&
+ git branch --recurse-submodules branch-a &&
+ git rev-parse branch-a &&
+ test_no_branch sub branch-a
+ )
+'
+
+test_expect_success 'should set up tracking of local branches with track=always' '
+ test_when_finished "reset_test" &&
+ (
+ cd super &&
+ git -c branch.autoSetupMerge=always branch --recurse-submodules branch-a main &&
+ git -C sub rev-parse main &&
+ test_cmp_config -C sub . branch.branch-a.remote &&
+ test_cmp_config -C sub refs/heads/main branch.branch-a.merge
+ )
+'
+
+test_expect_success 'should set up tracking of local branches with explicit track' '
+ test_when_finished "reset_test" &&
+ (
+ cd super &&
+ git branch --track --recurse-submodules branch-a main &&
+ git -C sub rev-parse main &&
+ test_cmp_config -C sub . branch.branch-a.remote &&
+ test_cmp_config -C sub refs/heads/main branch.branch-a.merge
+ )
+'
+
+test_expect_success 'should not set up unnecessary tracking of local branches' '
+ test_when_finished "reset_test" &&
+ (
+ cd super &&
+ git branch --recurse-submodules branch-a main &&
+ git -C sub rev-parse main &&
+ test_cmp_config -C sub "" --default "" branch.branch-a.remote &&
+ test_cmp_config -C sub "" --default "" branch.branch-a.merge
+ )
+'
+
+reset_remote_test () {
+ rm -fr super-clone &&
+ reset_test
+}
+
+test_expect_success 'setup tests with remotes' '
+ (
+ cd test_dirs &&
+ (
+ cd super &&
+ git branch branch-a &&
+ git checkout -b branch-b &&
+ git submodule add ../sub-upstream sub2 &&
+ # branch-b now has a committed submodule not in branch-a
+ git commit -m "add second submodule"
+ ) &&
+ git clone --branch main --recurse-submodules super super-clone &&
+ git -C super-clone config submodule.propagateBranches true
+ ) &&
+ reset_remote_test
+'
+
+test_expect_success 'should get fatal error upon branch creation when submodule is not in .git/modules' '
+ test_when_finished "reset_remote_test" &&
+ (
+ cd super-clone &&
+ # This should succeed because super-clone has sub in .git/modules
+ git branch --recurse-submodules branch-a origin/branch-a &&
+ # This should fail because super-clone does not have sub2 .git/modules
+ test_must_fail git branch --recurse-submodules branch-b origin/branch-b 2>actual &&
+ grep "fatal: submodule .sub2.: unable to find submodule" actual &&
+ test_no_branch . branch-b &&
+ test_no_branch sub branch-b &&
+ # User can fix themselves by initializing the submodule
+ git checkout origin/branch-b &&
+ git submodule update --init --recursive &&
+ git branch --recurse-submodules branch-b origin/branch-b
+ )
+'
+
+test_expect_success 'should set up tracking of remote-tracking branches by default' '
+ test_when_finished "reset_remote_test" &&
+ (
+ cd super-clone &&
+ git branch --recurse-submodules branch-a origin/branch-a &&
+ test_cmp_config origin branch.branch-a.remote &&
+ test_cmp_config refs/heads/branch-a branch.branch-a.merge &&
+ # "origin/branch-a" does not exist for "sub", but it matches the refspec
+ # so tracking should be set up
+ test_cmp_config -C sub origin branch.branch-a.remote &&
+ test_cmp_config -C sub refs/heads/branch-a branch.branch-a.merge &&
+ test_cmp_config -C sub/sub-sub origin branch.branch-a.remote &&
+ test_cmp_config -C sub/sub-sub refs/heads/branch-a branch.branch-a.merge
+ )
+'
+
+test_expect_success 'should not fail when unable to set up tracking in submodule' '
+ test_when_finished "reset_remote_test" &&
+ (
+ cd super-clone &&
+ git remote rename origin ex-origin &&
+ git branch --recurse-submodules branch-a ex-origin/branch-a &&
+ test_cmp_config ex-origin branch.branch-a.remote &&
+ test_cmp_config refs/heads/branch-a branch.branch-a.merge &&
+ test_cmp_config -C sub "" --default "" branch.branch-a.remote &&
+ test_cmp_config -C sub "" --default "" branch.branch-a.merge
+ )
+'
+
+test_expect_success '--track=inherit should set up tracking correctly' '
+ test_when_finished "reset_remote_test" &&
+ (
+ cd super-clone &&
+ git branch --recurse-submodules branch-a origin/branch-a &&
+ # Set this manually instead of using branch --set-upstream-to
+ # to circumvent the "nonexistent upstream" check.
+ git -C sub config branch.branch-a.remote origin &&
+ git -C sub config branch.branch-a.merge refs/heads/sub-branch-a &&
+ git -C sub/sub-sub config branch.branch-a.remote other &&
+ git -C sub/sub-sub config branch.branch-a.merge refs/heads/sub-sub-branch-a &&
+
+ git branch --recurse-submodules --track=inherit branch-b branch-a &&
+ test_cmp_config origin branch.branch-b.remote &&
+ test_cmp_config refs/heads/branch-a branch.branch-b.merge &&
+ test_cmp_config -C sub origin branch.branch-b.remote &&
+ test_cmp_config -C sub refs/heads/sub-branch-a branch.branch-b.merge &&
+ test_cmp_config -C sub/sub-sub other branch.branch-b.remote &&
+ test_cmp_config -C sub/sub-sub refs/heads/sub-sub-branch-a branch.branch-b.merge
+ )
+'
+
+test_expect_success '--no-track should not set up tracking' '
+ test_when_finished "reset_remote_test" &&
+ (
+ cd super-clone &&
+ git branch --recurse-submodules --no-track branch-a origin/branch-a &&
+ test_cmp_config "" --default "" branch.branch-a.remote &&
+ test_cmp_config "" --default "" branch.branch-a.merge &&
+ test_cmp_config -C sub "" --default "" branch.branch-a.remote &&
+ test_cmp_config -C sub "" --default "" branch.branch-a.merge &&
+ test_cmp_config -C sub/sub-sub "" --default "" branch.branch-a.remote &&
+ test_cmp_config -C sub/sub-sub "" --default "" branch.branch-a.merge
+ )
+'
+
+test_done
diff --git a/t/t3302-notes-index-expensive.sh b/t/t3302-notes-index-expensive.sh
index bc9d8ee..bb5fea0 100755
--- a/t/t3302-notes-index-expensive.sh
+++ b/t/t3302-notes-index-expensive.sh
@@ -8,7 +8,6 @@ test_description='Test commit notes index (expensive!)'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
-TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
create_repo () {
@@ -65,7 +64,8 @@ create_repo () {
test_notes () {
count=$1 &&
git config core.notesRef refs/notes/commits &&
- git log | grep "^ " >output &&
+ git log >tmp &&
+ grep "^ " tmp >output &&
i=$count &&
while test $i -gt 0
do
@@ -90,7 +90,7 @@ write_script time_notes <<\EOF
unset GIT_NOTES_REF
;;
esac
- git log
+ git log || exit $?
i=$(($i+1))
done >/dev/null
EOF
diff --git a/t/t3303-notes-subtrees.sh b/t/t3303-notes-subtrees.sh
index 7e0a896..eac1937 100755
--- a/t/t3303-notes-subtrees.sh
+++ b/t/t3303-notes-subtrees.sh
@@ -5,7 +5,6 @@ test_description='Test commit notes organized in subtrees'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
-TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
number_of_commits=100
@@ -79,7 +78,7 @@ test_sha1_based () {
(
start_note_commit &&
nr=$number_of_commits &&
- git rev-list refs/heads/main |
+ git rev-list refs/heads/main >out &&
while read sha1; do
note_path=$(echo "$sha1" | sed "$1")
cat <<INPUT_END &&
@@ -91,9 +90,9 @@ EOF
INPUT_END
nr=$(($nr-1))
- done
- ) |
- git fast-import --quiet
+ done <out
+ ) >gfi &&
+ git fast-import --quiet <gfi
}
test_expect_success 'test notes in 2/38-fanout' 'test_sha1_based "s|^..|&/|"'
diff --git a/t/t3305-notes-fanout.sh b/t/t3305-notes-fanout.sh
index 1f59648..9976d78 100755
--- a/t/t3305-notes-fanout.sh
+++ b/t/t3305-notes-fanout.sh
@@ -2,7 +2,6 @@
test_description='Test that adding/removing many notes triggers automatic fanout restructuring'
-TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
path_has_fanout() {
@@ -24,7 +23,7 @@ touched_one_note_with_fanout() {
all_notes_have_fanout() {
notes_commit=$1 &&
fanout=$2 &&
- git ls-tree -r --name-only $notes_commit 2>/dev/null |
+ git ls-tree -r --name-only $notes_commit |
while read path
do
path_has_fanout $path $fanout || return 1
@@ -51,8 +50,9 @@ test_expect_success 'creating many notes with git-notes' '
done
'
-test_expect_success 'many notes created correctly with git-notes' '
- git log | grep "^ " > output &&
+test_expect_success !SANITIZE_LEAK 'many notes created correctly with git-notes' '
+ git log >output.raw &&
+ grep "^ " output.raw >output &&
i=$num_notes &&
while test $i -gt 0
do
@@ -91,13 +91,13 @@ test_expect_success 'stable fanout 0 is followed by stable fanout 1' '
test_expect_success 'deleting most notes with git-notes' '
remove_notes=285 &&
i=0 &&
- git rev-list HEAD |
+ git rev-list HEAD >revs &&
while test $i -lt $remove_notes && read sha1
do
i=$(($i + 1)) &&
test_tick &&
- git notes remove "$sha1" 2>/dev/null || return 1
- done
+ git notes remove "$sha1" || return 1
+ done <revs
'
test_expect_success 'most notes deleted correctly with git-notes' '
diff --git a/t/t3400-rebase.sh b/t/t3400-rebase.sh
index 71b1735..d5a8ee3 100755
--- a/t/t3400-rebase.sh
+++ b/t/t3400-rebase.sh
@@ -18,10 +18,7 @@ GIT_AUTHOR_EMAIL=bogus@email@address
export GIT_AUTHOR_NAME GIT_AUTHOR_EMAIL
test_expect_success 'prepare repository with topic branches' '
- git config core.logAllRefUpdates true &&
- echo First >A &&
- git update-index --add A &&
- git commit -m "Add A." &&
+ test_commit "Add A." A First First &&
git checkout -b force-3way &&
echo Dummy >Y &&
git update-index --add Y &&
@@ -32,9 +29,7 @@ test_expect_success 'prepare repository with topic branches' '
git mv A D/A &&
git commit -m "Move A." &&
git checkout -b my-topic-branch main &&
- echo Second >B &&
- git update-index --add B &&
- git commit -m "Add B." &&
+ test_commit "Add B." B Second Second &&
git checkout -f main &&
echo Third >>A &&
git update-index A &&
@@ -399,6 +394,15 @@ test_expect_success 'switch to branch not checked out' '
git rebase main other
'
+test_expect_success 'switch to non-branch detaches HEAD' '
+ git checkout main &&
+ old_main=$(git rev-parse HEAD) &&
+ git rebase First Second^0 &&
+ test_cmp_rev HEAD Second &&
+ test_cmp_rev main $old_main &&
+ test_must_fail git symbolic-ref HEAD
+'
+
test_expect_success 'refuse to switch to branch checked out elsewhere' '
git checkout main &&
git worktree add wt &&
diff --git a/t/t3404-rebase-interactive.sh b/t/t3404-rebase-interactive.sh
index a38f2da..f31afd4 100755
--- a/t/t3404-rebase-interactive.sh
+++ b/t/t3404-rebase-interactive.sh
@@ -619,9 +619,7 @@ test_expect_success 'rebase a detached HEAD' '
'
test_expect_success 'rebase a commit violating pre-commit' '
-
- mkdir -p .git/hooks &&
- write_script .git/hooks/pre-commit <<-\EOF &&
+ test_hook pre-commit <<-\EOF &&
test -z "$(git diff --cached --check)"
EOF
echo "monde! " >> file1 &&
@@ -636,8 +634,6 @@ test_expect_success 'rebase a commit violating pre-commit' '
'
test_expect_success 'rebase with a file named HEAD in worktree' '
-
- rm -fr .git/hooks &&
git reset --hard &&
git checkout -b branch3 A &&
@@ -1688,10 +1684,8 @@ test_expect_success 'valid author header when author contains single quote' '
'
test_expect_success 'post-commit hook is called' '
- test_when_finished "rm -f .git/hooks/post-commit" &&
>actual &&
- mkdir -p .git/hooks &&
- write_script .git/hooks/post-commit <<-\EOS &&
+ test_hook post-commit <<-\EOS &&
git rev-parse HEAD >>actual
EOS
(
diff --git a/t/t3406-rebase-message.sh b/t/t3406-rebase-message.sh
index 77a313f..d17b450 100755
--- a/t/t3406-rebase-message.sh
+++ b/t/t3406-rebase-message.sh
@@ -105,6 +105,29 @@ test_expect_success 'GIT_REFLOG_ACTION' '
test_cmp expect actual
'
+test_expect_success 'rebase --apply reflog' '
+ git checkout -b reflog-apply start &&
+ old_head_reflog="$(git log -g --format=%gs -1 HEAD)" &&
+
+ git rebase --apply Y &&
+
+ git log -g --format=%gs -4 HEAD >actual &&
+ cat >expect <<-EOF &&
+ rebase finished: returning to refs/heads/reflog-apply
+ rebase: Z
+ rebase: checkout Y
+ $old_head_reflog
+ EOF
+ test_cmp expect actual &&
+
+ git log -g --format=%gs -2 reflog-apply >actual &&
+ cat >expect <<-EOF &&
+ rebase finished: refs/heads/reflog-apply onto $(git rev-parse Y)
+ branch: Created from start
+ EOF
+ test_cmp expect actual
+'
+
test_expect_success 'rebase -i onto unrelated history' '
git init unrelated &&
test_commit -C unrelated 1 &&
diff --git a/t/t3412-rebase-root.sh b/t/t3412-rebase-root.sh
index 19c6f4a..58371d8 100755
--- a/t/t3412-rebase-root.sh
+++ b/t/t3412-rebase-root.sh
@@ -11,7 +11,7 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
log_with_names () {
git rev-list --topo-order --parents --pretty="tformat:%s" HEAD |
- git name-rev --stdin --name-only --refs=refs/heads/$1
+ git name-rev --annotate-stdin --name-only --refs=refs/heads/$1
}
@@ -31,12 +31,9 @@ test_expect_success 'rebase --root fails with too many args' '
'
test_expect_success 'setup pre-rebase hook' '
- mkdir -p .git/hooks &&
- cat >.git/hooks/pre-rebase <<EOF &&
-#!$SHELL_PATH
-echo "\$1,\$2" >.git/PRE-REBASE-INPUT
-EOF
- chmod +x .git/hooks/pre-rebase
+ test_hook --setup pre-rebase <<-\EOF
+ echo "$1,$2" >.git/PRE-REBASE-INPUT
+ EOF
'
cat > expect <<EOF
4
@@ -141,12 +138,9 @@ commit work7~5
EOF
test_expect_success 'setup pre-rebase hook that fails' '
- mkdir -p .git/hooks &&
- cat >.git/hooks/pre-rebase <<EOF &&
-#!$SHELL_PATH
-false
-EOF
- chmod +x .git/hooks/pre-rebase
+ test_hook --setup --clobber pre-rebase <<-\EOF
+ false
+ EOF
'
test_expect_success 'pre-rebase hook stops rebase' '
diff --git a/t/t3413-rebase-hook.sh b/t/t3413-rebase-hook.sh
index b4acb3b..9fab0d7 100755
--- a/t/t3413-rebase-hook.sh
+++ b/t/t3413-rebase-hook.sh
@@ -41,12 +41,9 @@ test_expect_success 'rebase -i' '
'
test_expect_success 'setup pre-rebase hook' '
- mkdir -p .git/hooks &&
- cat >.git/hooks/pre-rebase <<EOF &&
-#!$SHELL_PATH
-echo "\$1,\$2" >.git/PRE-REBASE-INPUT
-EOF
- chmod +x .git/hooks/pre-rebase
+ test_hook --setup pre-rebase <<-\EOF
+ echo "$1,$2" >.git/PRE-REBASE-INPUT
+ EOF
'
test_expect_success 'pre-rebase hook gets correct input (1)' '
@@ -102,12 +99,9 @@ test_expect_success 'pre-rebase hook gets correct input (6)' '
'
test_expect_success 'setup pre-rebase hook that fails' '
- mkdir -p .git/hooks &&
- cat >.git/hooks/pre-rebase <<EOF &&
-#!$SHELL_PATH
-false
-EOF
- chmod +x .git/hooks/pre-rebase
+ test_hook --setup --clobber pre-rebase <<-\EOF
+ false
+ EOF
'
test_expect_success 'pre-rebase hook stops rebase (1)' '
diff --git a/t/t3418-rebase-continue.sh b/t/t3418-rebase-continue.sh
index 22eca73..130e2f9 100755
--- a/t/t3418-rebase-continue.sh
+++ b/t/t3418-rebase-continue.sh
@@ -308,4 +308,30 @@ test_expect_success 'there is no --no-reschedule-failed-exec in an ongoing rebas
test_expect_code 129 git rebase --edit-todo --no-reschedule-failed-exec
'
+test_orig_head_helper () {
+ test_when_finished 'git rebase --abort &&
+ git checkout topic &&
+ git reset --hard commit-new-file-F2-on-topic-branch' &&
+ git update-ref -d ORIG_HEAD &&
+ test_must_fail git rebase "$@" &&
+ test_cmp_rev ORIG_HEAD commit-new-file-F2-on-topic-branch
+}
+
+test_orig_head () {
+ type=$1
+ test_expect_success "rebase $type sets ORIG_HEAD correctly" '
+ git checkout topic &&
+ git reset --hard commit-new-file-F2-on-topic-branch &&
+ test_orig_head_helper $type main
+ '
+
+ test_expect_success "rebase $type <upstream> <branch> sets ORIG_HEAD correctly" '
+ git checkout main &&
+ test_orig_head_helper $type main topic
+ '
+}
+
+test_orig_head --apply
+test_orig_head --merge
+
test_done
diff --git a/t/t3430-rebase-merges.sh b/t/t3430-rebase-merges.sh
index 43c82d9..f351701 100755
--- a/t/t3430-rebase-merges.sh
+++ b/t/t3430-rebase-merges.sh
@@ -292,9 +292,9 @@ test_expect_success 'post-rewrite hook and fixups work for merges' '
git commit --fixup HEAD same2.t &&
fixup="$(git rev-parse HEAD)" &&
- mkdir -p .git/hooks &&
- test_when_finished "rm .git/hooks/post-rewrite" &&
- echo "cat >actual" | write_script .git/hooks/post-rewrite &&
+ test_hook post-rewrite <<-\EOF &&
+ cat >actual
+ EOF
test_tick &&
git rebase -i --autosquash -r HEAD^^^ &&
diff --git a/t/t3701-add-interactive.sh b/t/t3701-add-interactive.sh
index 2077146..94537a6 100755
--- a/t/t3701-add-interactive.sh
+++ b/t/t3701-add-interactive.sh
@@ -326,7 +326,9 @@ test_expect_success 'correct message when there is nothing to do' '
test_expect_success 'setup again' '
git reset --hard &&
test_chmod +x file &&
- echo content >>file
+ echo content >>file &&
+ test_write_lines A B C D>file2 &&
+ git add file2
'
# Write the patch file with a new line at the top and bottom
@@ -341,13 +343,27 @@ test_expect_success 'setup patch' '
content
+lastline
\ No newline at end of file
+ diff --git a/file2 b/file2
+ index 8422d40..35b930a 100644
+ --- a/file2
+ +++ b/file2
+ @@ -1,4 +1,5 @@
+ -A
+ +Z
+ B
+ +Y
+ C
+ -D
+ +X
EOF
'
# Expected output, diff is similar to the patch but w/ diff at the top
test_expect_success 'setup expected' '
echo diff --git a/file b/file >expected &&
- cat patch |sed "/^index/s/ 100644/ 100755/" >>expected &&
+ sed -e "/^index 180b47c/s/ 100644/ 100755/" \
+ -e /1,5/s//1,4/ \
+ -e /Y/d patch >>expected &&
cat >expected-output <<-\EOF
--- a/file
+++ b/file
@@ -366,6 +382,28 @@ test_expect_success 'setup expected' '
content
+lastline
\ No newline at end of file
+ --- a/file2
+ +++ b/file2
+ @@ -1,4 +1,5 @@
+ -A
+ +Z
+ B
+ +Y
+ C
+ -D
+ +X
+ @@ -1,2 +1,2 @@
+ -A
+ +Z
+ B
+ @@ -2,2 +2,3 @@
+ B
+ +Y
+ C
+ @@ -3,2 +4,2 @@
+ C
+ -D
+ +X
EOF
'
@@ -373,9 +411,9 @@ test_expect_success 'setup expected' '
test_expect_success 'add first line works' '
git commit -am "clear local changes" &&
git apply patch &&
- printf "%s\n" s y y | git add -p file 2>error |
- sed -n -e "s/^([1-2]\/[1-2]) Stage this hunk[^@]*\(@@ .*\)/\1/" \
- -e "/^[-+@ \\\\]"/p >output &&
+ test_write_lines s y y s y n y | git add -p 2>error >raw-output &&
+ sed -n -e "s/^([1-9]\/[1-9]) Stage this hunk[^@]*\(@@ .*\)/\1/" \
+ -e "/^[-+@ \\\\]"/p raw-output >output &&
test_must_be_empty error &&
git diff --cached >diff &&
diff_cmp expected diff &&
diff --git a/t/t3705-add-sparse-checkout.sh b/t/t3705-add-sparse-checkout.sh
index 81f3384..9560904 100755
--- a/t/t3705-add-sparse-checkout.sh
+++ b/t/t3705-add-sparse-checkout.sh
@@ -19,6 +19,7 @@ setup_sparse_entry () {
fi &&
git add sparse_entry &&
git update-index --skip-worktree sparse_entry &&
+ git config core.sparseCheckout false &&
git commit --allow-empty -m "ensure sparse_entry exists at HEAD" &&
SPARSE_ENTRY_BLOB=$(git rev-parse :sparse_entry)
}
@@ -126,6 +127,7 @@ test_expect_success 'git add --chmod does not update sparse entries' '
'
test_expect_success 'git add --renormalize does not update sparse entries' '
+ test_when_finished rm .gitattributes &&
test_config core.autocrlf false &&
setup_sparse_entry "LINEONE\r\nLINETWO\r\n" &&
echo "sparse_entry text=auto" >.gitattributes &&
diff --git a/t/t3903-stash.sh b/t/t3903-stash.sh
index 686747e..4abbc8f 100755
--- a/t/t3903-stash.sh
+++ b/t/t3903-stash.sh
@@ -41,7 +41,7 @@ diff_cmp () {
rm -f "$1.compare" "$2.compare"
}
-test_expect_success 'stash some dirty working directory' '
+setup_stash() {
echo 1 >file &&
git add file &&
echo unrelated >other-file &&
@@ -55,6 +55,10 @@ test_expect_success 'stash some dirty working directory' '
git stash &&
git diff-files --quiet &&
git diff-index --cached --quiet HEAD
+}
+
+test_expect_success 'stash some dirty working directory' '
+ setup_stash
'
cat >expect <<EOF
@@ -185,6 +189,43 @@ test_expect_success 'drop middle stash by index' '
test 1 = $(git show HEAD:file)
'
+test_expect_success 'drop stash reflog updates refs/stash' '
+ git reset --hard &&
+ git rev-parse refs/stash >expect &&
+ echo 9 >file &&
+ git stash &&
+ git stash drop stash@{0} &&
+ git rev-parse refs/stash >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success REFFILES 'drop stash reflog updates refs/stash with rewrite' '
+ git init repo &&
+ (
+ cd repo &&
+ setup_stash
+ ) &&
+ echo 9 >repo/file &&
+
+ old_oid="$(git -C repo rev-parse stash@{0})" &&
+ git -C repo stash &&
+ new_oid="$(git -C repo rev-parse stash@{0})" &&
+
+ cat >expect <<-EOF &&
+ $(test_oid zero) $old_oid
+ $old_oid $new_oid
+ EOF
+ cut -d" " -f1-2 repo/.git/logs/refs/stash >actual &&
+ test_cmp expect actual &&
+
+ git -C repo stash drop stash@{1} &&
+ cut -d" " -f1-2 repo/.git/logs/refs/stash >actual &&
+ cat >expect <<-EOF &&
+ $(test_oid zero) $new_oid
+ EOF
+ test_cmp expect actual
+'
+
test_expect_success 'stash pop' '
git reset --hard &&
git stash pop &&
@@ -261,6 +302,18 @@ test_expect_success 'apply -q is quiet' '
test_must_be_empty output.out
'
+test_expect_success 'apply --index -q is quiet' '
+ # Added file, deleted file, modified file all staged for commit
+ echo foo >new-file &&
+ echo test >file &&
+ git add new-file file &&
+ git rm other-file &&
+
+ git stash &&
+ git stash apply --index -q >output.out 2>&1 &&
+ test_must_be_empty output.out
+'
+
test_expect_success 'save -q is quiet' '
git stash save --quiet >output.out 2>&1 &&
test_must_be_empty output.out
@@ -291,6 +344,27 @@ test_expect_success 'drop -q is quiet' '
test_must_be_empty output.out
'
+test_expect_success 'stash push -q --staged refreshes the index' '
+ git reset --hard &&
+ echo test >file &&
+ git add file &&
+ git stash push -q --staged &&
+ git diff-files >output.out &&
+ test_must_be_empty output.out
+'
+
+test_expect_success 'stash apply -q --index refreshes the index' '
+ echo test >other-file &&
+ git add other-file &&
+ echo another-change >other-file &&
+ git diff-files >expect &&
+ git stash &&
+
+ git stash apply -q --index &&
+ git diff-files >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'stash -k' '
echo bar3 >file &&
echo bar4 >file2 &&
@@ -390,10 +464,11 @@ test_expect_success SYMLINKS 'stash file to symlink' '
rm file &&
ln -s file2 file &&
git stash save "file to symlink" &&
- test -f file &&
+ test_path_is_file_not_symlink file &&
test bar = "$(cat file)" &&
git stash apply &&
- case "$(ls -l file)" in *" file -> file2") :;; *) false;; esac
+ test_path_is_symlink file &&
+ test "$(test_readlink file)" = file2
'
test_expect_success SYMLINKS 'stash file to symlink (stage rm)' '
@@ -401,10 +476,11 @@ test_expect_success SYMLINKS 'stash file to symlink (stage rm)' '
git rm file &&
ln -s file2 file &&
git stash save "file to symlink (stage rm)" &&
- test -f file &&
+ test_path_is_file_not_symlink file &&
test bar = "$(cat file)" &&
git stash apply &&
- case "$(ls -l file)" in *" file -> file2") :;; *) false;; esac
+ test_path_is_symlink file &&
+ test "$(test_readlink file)" = file2
'
test_expect_success SYMLINKS 'stash file to symlink (full stage)' '
@@ -413,10 +489,11 @@ test_expect_success SYMLINKS 'stash file to symlink (full stage)' '
ln -s file2 file &&
git add file &&
git stash save "file to symlink (full stage)" &&
- test -f file &&
+ test_path_is_file_not_symlink file &&
test bar = "$(cat file)" &&
git stash apply &&
- case "$(ls -l file)" in *" file -> file2") :;; *) false;; esac
+ test_path_is_symlink file &&
+ test "$(test_readlink file)" = file2
'
# This test creates a commit with a symlink used for the following tests
@@ -487,7 +564,7 @@ test_expect_failure 'stash directory to file' '
rm -fr dir &&
echo bar >dir &&
git stash save "directory to file" &&
- test -d dir &&
+ test_path_is_dir dir &&
test foo = "$(cat dir/file)" &&
test_must_fail git stash apply &&
test bar = "$(cat dir)" &&
@@ -500,10 +577,10 @@ test_expect_failure 'stash file to directory' '
mkdir file &&
echo foo >file/file &&
git stash save "file to directory" &&
- test -f file &&
+ test_path_is_file file &&
test bar = "$(cat file)" &&
git stash apply &&
- test -f file/file &&
+ test_path_is_file file/file &&
test foo = "$(cat file/file)"
'
@@ -1042,6 +1119,17 @@ test_expect_success 'create stores correct message' '
test_cmp expect actual
'
+test_expect_success 'create when branch name has /' '
+ test_when_finished "git checkout main" &&
+ git checkout -b some/topic &&
+ >foo &&
+ git add foo &&
+ STASH_ID=$(git stash create "create test message") &&
+ echo "On some/topic: create test message" >expect &&
+ git show --pretty=%s -s ${STASH_ID} >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'create with multiple arguments for the message' '
>foo &&
git add foo &&
@@ -1272,7 +1360,6 @@ test_expect_success 'stash works when user.name and user.email are not set' '
>2 &&
git add 2 &&
test_config user.useconfigonly true &&
- test_config stash.usebuiltin true &&
(
sane_unset GIT_AUTHOR_NAME &&
sane_unset GIT_AUTHOR_EMAIL &&
@@ -1323,20 +1410,6 @@ test_expect_success 'stash handles skip-worktree entries nicely' '
git rev-parse --verify refs/stash:A.t
'
-test_expect_success 'stash -c stash.useBuiltin=false warning ' '
- expected="stash.useBuiltin support has been removed" &&
-
- git -c stash.useBuiltin=false stash 2>err &&
- test_i18ngrep "$expected" err &&
- env GIT_TEST_STASH_USE_BUILTIN=false git stash 2>err &&
- test_i18ngrep "$expected" err &&
-
- git -c stash.useBuiltin=true stash 2>err &&
- test_must_be_empty err &&
- env GIT_TEST_STASH_USE_BUILTIN=true git stash 2>err &&
- test_must_be_empty err
-'
-
test_expect_success 'git stash succeeds despite directory/file change' '
test_create_repo directory_file_switch_v1 &&
(
diff --git a/t/t4015-diff-whitespace.sh b/t/t4015-diff-whitespace.sh
index 9babf13..f3e20dd 100755
--- a/t/t4015-diff-whitespace.sh
+++ b/t/t4015-diff-whitespace.sh
@@ -6,6 +6,8 @@
test_description='Test special whitespace in diff engine.
'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-diff.sh
@@ -1622,7 +1624,7 @@ test_expect_success 'cmd option assumes configured colored-moved' '
test_cmp expected actual
'
-test_expect_success 'no effect from --color-moved with --word-diff' '
+test_expect_success 'no effect on diff from --color-moved with --word-diff' '
cat <<-\EOF >text.txt &&
Lorem Ipsum is simply dummy text of the printing and typesetting industry.
EOF
@@ -1636,6 +1638,12 @@ test_expect_success 'no effect from --color-moved with --word-diff' '
test_cmp expect actual
'
+test_expect_success !SANITIZE_LEAK 'no effect on show from --color-moved with --word-diff' '
+ git show --color-moved --word-diff >actual &&
+ git show --word-diff >expect &&
+ test_cmp expect actual
+'
+
test_expect_success 'set up whitespace tests' '
git reset --hard &&
# Note that these lines have no leading or trailing whitespace.
@@ -2016,7 +2024,7 @@ test_expect_success '--color-moved rewinds for MIN_ALNUM_COUNT' '
test_cmp expected actual
'
-test_expect_success 'move detection with submodules' '
+test_expect_success !SANITIZE_LEAK 'move detection with submodules' '
test_create_repo bananas &&
echo ripe >bananas/recipe &&
git -C bananas add recipe &&
diff --git a/t/t4018/kotlin-class b/t/t4018/kotlin-class
new file mode 100644
index 0000000..bb864f2
--- /dev/null
+++ b/t/t4018/kotlin-class
@@ -0,0 +1,5 @@
+class RIGHT {
+ //comment
+ //comment
+ return ChangeMe
+}
diff --git a/t/t4018/kotlin-enum-class b/t/t4018/kotlin-enum-class
new file mode 100644
index 0000000..8885f90
--- /dev/null
+++ b/t/t4018/kotlin-enum-class
@@ -0,0 +1,5 @@
+enum class RIGHT{
+ // Left
+ // a comment
+ ChangeMe
+}
diff --git a/t/t4018/kotlin-fun b/t/t4018/kotlin-fun
new file mode 100644
index 0000000..2a60280
--- /dev/null
+++ b/t/t4018/kotlin-fun
@@ -0,0 +1,5 @@
+fun RIGHT(){
+ //a comment
+ //b comment
+ return ChangeMe()
+}
diff --git a/t/t4018/kotlin-inheritace-class b/t/t4018/kotlin-inheritace-class
new file mode 100644
index 0000000..77376c1
--- /dev/null
+++ b/t/t4018/kotlin-inheritace-class
@@ -0,0 +1,5 @@
+open class RIGHT{
+ // a comment
+ // b comment
+ // ChangeMe
+}
diff --git a/t/t4018/kotlin-inline-class b/t/t4018/kotlin-inline-class
new file mode 100644
index 0000000..7bf46dd
--- /dev/null
+++ b/t/t4018/kotlin-inline-class
@@ -0,0 +1,5 @@
+value class RIGHT(Args){
+ // a comment
+ // b comment
+ ChangeMe
+}
diff --git a/t/t4018/kotlin-interface b/t/t4018/kotlin-interface
new file mode 100644
index 0000000..f686ba7
--- /dev/null
+++ b/t/t4018/kotlin-interface
@@ -0,0 +1,5 @@
+interface RIGHT{
+ //another comment
+ //another comment
+ //ChangeMe
+}
diff --git a/t/t4018/kotlin-nested-fun b/t/t4018/kotlin-nested-fun
new file mode 100644
index 0000000..1218685
--- /dev/null
+++ b/t/t4018/kotlin-nested-fun
@@ -0,0 +1,9 @@
+class LEFT{
+ class CENTER{
+ fun RIGHT( a:Int){
+ //comment
+ //comment
+ ChangeMe
+ }
+ }
+}
diff --git a/t/t4018/kotlin-public-class b/t/t4018/kotlin-public-class
new file mode 100644
index 0000000..9433fcc
--- /dev/null
+++ b/t/t4018/kotlin-public-class
@@ -0,0 +1,5 @@
+public class RIGHT{
+ //comment1
+ //comment2
+ ChangeMe
+}
diff --git a/t/t4018/kotlin-sealed-class b/t/t4018/kotlin-sealed-class
new file mode 100644
index 0000000..0efa4a4
--- /dev/null
+++ b/t/t4018/kotlin-sealed-class
@@ -0,0 +1,5 @@
+sealed class RIGHT {
+ // a comment
+ // b comment
+ ChangeMe
+}
diff --git a/t/t4020-diff-external.sh b/t/t4020-diff-external.sh
index 54bb8ef..1219f8b 100755
--- a/t/t4020-diff-external.sh
+++ b/t/t4020-diff-external.sh
@@ -24,45 +24,38 @@ test_expect_success setup '
'
test_expect_success 'GIT_EXTERNAL_DIFF environment' '
-
- GIT_EXTERNAL_DIFF=echo git diff | {
- read path oldfile oldhex oldmode newfile newhex newmode &&
- test "z$path" = zfile &&
- test "z$oldmode" = z100644 &&
- test "z$newhex" = "z$ZERO_OID" &&
- test "z$newmode" = z100644 &&
- oh=$(git rev-parse --verify HEAD:file) &&
- test "z$oh" = "z$oldhex"
- }
+ cat >expect <<-EOF &&
+ file $(git rev-parse --verify HEAD:file) 100644 file $(test_oid zero) 100644
+ EOF
+ GIT_EXTERNAL_DIFF=echo git diff >out &&
+ cut -d" " -f1,3- <out >actual &&
+ test_cmp expect actual
'
-test_expect_success 'GIT_EXTERNAL_DIFF environment should apply only to diff' '
-
- GIT_EXTERNAL_DIFF=echo git log -p -1 HEAD |
- grep "^diff --git a/file b/file"
+test_expect_success !SANITIZE_LEAK 'GIT_EXTERNAL_DIFF environment should apply only to diff' '
+ GIT_EXTERNAL_DIFF=echo git log -p -1 HEAD >out &&
+ grep "^diff --git a/file b/file" out
'
test_expect_success 'GIT_EXTERNAL_DIFF environment and --no-ext-diff' '
-
- GIT_EXTERNAL_DIFF=echo git diff --no-ext-diff |
- grep "^diff --git a/file b/file"
+ GIT_EXTERNAL_DIFF=echo git diff --no-ext-diff >out &&
+ grep "^diff --git a/file b/file" out
'
test_expect_success SYMLINKS 'typechange diff' '
rm -f file &&
ln -s elif file &&
- GIT_EXTERNAL_DIFF=echo git diff | {
- read path oldfile oldhex oldmode newfile newhex newmode &&
- test "z$path" = zfile &&
- test "z$oldmode" = z100644 &&
- test "z$newhex" = "z$ZERO_OID" &&
- test "z$newmode" = z120000 &&
- oh=$(git rev-parse --verify HEAD:file) &&
- test "z$oh" = "z$oldhex"
- } &&
+
+ cat >expect <<-EOF &&
+ file $(git rev-parse --verify HEAD:file) 100644 $(test_oid zero) 120000
+ EOF
+ GIT_EXTERNAL_DIFF=echo git diff >out &&
+ cut -d" " -f1,3-4,6- <out >actual &&
+ test_cmp expect actual &&
+
GIT_EXTERNAL_DIFF=echo git diff --no-ext-diff >actual &&
git diff >expect &&
test_cmp expect actual
@@ -72,27 +65,25 @@ test_expect_success 'diff.external' '
git reset --hard &&
echo third >file &&
test_config diff.external echo &&
- git diff | {
- read path oldfile oldhex oldmode newfile newhex newmode &&
- test "z$path" = zfile &&
- test "z$oldmode" = z100644 &&
- test "z$newhex" = "z$ZERO_OID" &&
- test "z$newmode" = z100644 &&
- oh=$(git rev-parse --verify HEAD:file) &&
- test "z$oh" = "z$oldhex"
- }
+
+ cat >expect <<-EOF &&
+ file $(git rev-parse --verify HEAD:file) 100644 $(test_oid zero) 100644
+ EOF
+ git diff >out &&
+ cut -d" " -f1,3-4,6- <out >actual &&
+ test_cmp expect actual
'
-test_expect_success 'diff.external should apply only to diff' '
+test_expect_success !SANITIZE_LEAK 'diff.external should apply only to diff' '
test_config diff.external echo &&
- git log -p -1 HEAD |
- grep "^diff --git a/file b/file"
+ git log -p -1 HEAD >out &&
+ grep "^diff --git a/file b/file" out
'
test_expect_success 'diff.external and --no-ext-diff' '
test_config diff.external echo &&
- git diff --no-ext-diff |
- grep "^diff --git a/file b/file"
+ git diff --no-ext-diff >out &&
+ grep "^diff --git a/file b/file" out
'
test_expect_success 'diff attribute' '
@@ -103,29 +94,23 @@ test_expect_success 'diff attribute' '
echo >.gitattributes "file diff=parrot" &&
- git diff | {
- read path oldfile oldhex oldmode newfile newhex newmode &&
- test "z$path" = zfile &&
- test "z$oldmode" = z100644 &&
- test "z$newhex" = "z$ZERO_OID" &&
- test "z$newmode" = z100644 &&
- oh=$(git rev-parse --verify HEAD:file) &&
- test "z$oh" = "z$oldhex"
- }
-
+ cat >expect <<-EOF &&
+ file $(git rev-parse --verify HEAD:file) 100644 $(test_oid zero) 100644
+ EOF
+ git diff >out &&
+ cut -d" " -f1,3-4,6- <out >actual &&
+ test_cmp expect actual
'
-test_expect_success 'diff attribute should apply only to diff' '
-
- git log -p -1 HEAD |
- grep "^diff --git a/file b/file"
+test_expect_success !SANITIZE_LEAK 'diff attribute should apply only to diff' '
+ git log -p -1 HEAD >out &&
+ grep "^diff --git a/file b/file" out
'
test_expect_success 'diff attribute and --no-ext-diff' '
-
- git diff --no-ext-diff |
- grep "^diff --git a/file b/file"
+ git diff --no-ext-diff >out &&
+ grep "^diff --git a/file b/file" out
'
@@ -136,48 +121,55 @@ test_expect_success 'diff attribute' '
echo >.gitattributes "file diff=color" &&
- git diff | {
- read path oldfile oldhex oldmode newfile newhex newmode &&
- test "z$path" = zfile &&
- test "z$oldmode" = z100644 &&
- test "z$newhex" = "z$ZERO_OID" &&
- test "z$newmode" = z100644 &&
- oh=$(git rev-parse --verify HEAD:file) &&
- test "z$oh" = "z$oldhex"
- }
-
+ cat >expect <<-EOF &&
+ file $(git rev-parse --verify HEAD:file) 100644 $(test_oid zero) 100644
+ EOF
+ git diff >out &&
+ cut -d" " -f1,3-4,6- <out >actual &&
+ test_cmp expect actual
'
-test_expect_success 'diff attribute should apply only to diff' '
-
- git log -p -1 HEAD |
- grep "^diff --git a/file b/file"
+test_expect_success !SANITIZE_LEAK 'diff attribute should apply only to diff' '
+ git log -p -1 HEAD >out &&
+ grep "^diff --git a/file b/file" out
'
test_expect_success 'diff attribute and --no-ext-diff' '
-
- git diff --no-ext-diff |
- grep "^diff --git a/file b/file"
+ git diff --no-ext-diff >out &&
+ grep "^diff --git a/file b/file" out
'
test_expect_success 'GIT_EXTERNAL_DIFF trumps diff.external' '
>.gitattributes &&
test_config diff.external "echo ext-global" &&
- GIT_EXTERNAL_DIFF="echo ext-env" git diff | grep ext-env
+
+ cat >expect <<-EOF &&
+ ext-env file $(git rev-parse --verify HEAD:file) 100644 file $(test_oid zero) 100644
+ EOF
+ GIT_EXTERNAL_DIFF="echo ext-env" git diff >out &&
+ cut -d" " -f1-2,4- <out >actual &&
+ test_cmp expect actual
'
test_expect_success 'attributes trump GIT_EXTERNAL_DIFF and diff.external' '
test_config diff.foo.command "echo ext-attribute" &&
test_config diff.external "echo ext-global" &&
echo "file diff=foo" >.gitattributes &&
- GIT_EXTERNAL_DIFF="echo ext-env" git diff | grep ext-attribute
+
+ cat >expect <<-EOF &&
+ ext-attribute file $(git rev-parse --verify HEAD:file) 100644 file $(test_oid zero) 100644
+ EOF
+ GIT_EXTERNAL_DIFF="echo ext-env" git diff >out &&
+ cut -d" " -f1-2,4- <out >actual &&
+ test_cmp expect actual
'
test_expect_success 'no diff with -diff' '
echo >.gitattributes "file -diff" &&
- git diff | grep Binary
+ git diff >out &&
+ grep Binary out
'
echo NULZbetweenZwords | perl -pe 'y/Z/\000/' > file
@@ -217,7 +209,12 @@ test_expect_success 'GIT_EXTERNAL_DIFF generates pretty paths' '
touch file.ext &&
git add file.ext &&
echo with extension > file.ext &&
- GIT_EXTERNAL_DIFF=echo git diff file.ext | grep ......_file\.ext &&
+
+ cat >expect <<-EOF &&
+ file.ext file $(git rev-parse --verify HEAD:file) 100644 file.ext $(test_oid zero) 100644
+ EOF
+ GIT_EXTERNAL_DIFF=echo git diff file.ext >out &&
+ cut -d" " -f1,3- <out >actual &&
git update-index --force-remove file.ext &&
rm file.ext
'
diff --git a/t/t4027-diff-submodule.sh b/t/t4027-diff-submodule.sh
index 6cef0da..295da98 100755
--- a/t/t4027-diff-submodule.sh
+++ b/t/t4027-diff-submodule.sh
@@ -2,7 +2,6 @@
test_description='difference in submodules'
-TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-diff.sh
@@ -28,10 +27,8 @@ test_expect_success setup '
git commit -m "submodule #2"
) &&
- set x $(
- cd sub &&
- git rev-list HEAD
- ) &&
+ git -C sub rev-list HEAD >revs &&
+ set x $(cat revs) &&
echo ":160000 160000 $3 $ZERO_OID M sub" >expect &&
subtip=$3 subprev=$2
'
diff --git a/t/t4034-diff-words.sh b/t/t4034-diff-words.sh
index d5abcf4..15764ee 100755
--- a/t/t4034-diff-words.sh
+++ b/t/t4034-diff-words.sh
@@ -324,6 +324,7 @@ test_language_driver dts
test_language_driver fortran
test_language_driver html
test_language_driver java
+test_language_driver kotlin
test_language_driver matlab
test_language_driver objc
test_language_driver pascal
diff --git a/t/t4034/kotlin/expect b/t/t4034/kotlin/expect
new file mode 100644
index 0000000..7f76f75
--- /dev/null
+++ b/t/t4034/kotlin/expect
@@ -0,0 +1,43 @@
+<BOLD>diff --git a/pre b/post<RESET>
+<BOLD>index 11ea3de..2e1df4c 100644<RESET>
+<BOLD>--- a/pre<RESET>
+<BOLD>+++ b/post<RESET>
+<CYAN>@@ -1,30 +1,30 @@<RESET>
+println("Hello World<RED>!\n<RESET><GREEN>?<RESET>")
+<GREEN>(<RESET>1<GREEN>) (<RESET>-1e10<GREEN>) (<RESET>0xabcdef<GREEN>)<RESET> '<RED>x<RESET><GREEN>y<RESET>'
+[<RED>a<RESET><GREEN>x<RESET>] <RED>a<RESET><GREEN>x<RESET>-><RED>b a<RESET><GREEN>y x<RESET>.<RED>b<RESET><GREEN>y<RESET>
+!<RED>a a<RESET><GREEN>x x<RESET>.inv() <RED>a<RESET><GREEN>x<RESET>*<RED>b a<RESET><GREEN>y x<RESET>&<RED>b<RESET>
+<RED>a<RESET><GREEN>y<RESET>
+<GREEN>x<RESET>*<RED>b a<RESET><GREEN>y x<RESET>/<RED>b a<RESET><GREEN>y x<RESET>%<RED>b<RESET>
+<RED>a<RESET><GREEN>y<RESET>
+<GREEN>x<RESET>+<RED>b a<RESET><GREEN>y x<RESET>-<RED>b<RESET><GREEN>y<RESET>
+a <RED>shr<RESET><GREEN>shl<RESET> b
+<RED>a<RESET><GREEN>x<RESET><<RED>b a<RESET><GREEN>y x<RESET><=<RED>b a<RESET><GREEN>y x<RESET>><RED>b a<RESET><GREEN>y x<RESET>>=<RED>b<RESET>
+<RED>a<RESET><GREEN>y<RESET>
+<GREEN>x<RESET>==<RED>b a<RESET><GREEN>y x<RESET>!=<RED>b a<RESET><GREEN>y x<RESET>===<RED>b<RESET>
+<RED>a<RESET><GREEN>y<RESET>
+<GREEN>x<RESET> and <RED>b<RESET>
+<RED>a<RESET><GREEN>y<RESET>
+<GREEN>x<RESET>^<RED>b<RESET>
+<RED>a<RESET><GREEN>y<RESET>
+<GREEN>x<RESET> or <RED>b<RESET>
+<RED>a<RESET><GREEN>y<RESET>
+<GREEN>x<RESET>&&<RED>b a<RESET><GREEN>y x<RESET>||<RED>b<RESET>
+<RED>a<RESET><GREEN>y<RESET>
+<GREEN>x<RESET>=<RED>b a<RESET><GREEN>y x<RESET>+=<RED>b a<RESET><GREEN>y x<RESET>-=<RED>b a<RESET><GREEN>y x<RESET>*=<RED>b a<RESET><GREEN>y x<RESET>/=<RED>b a<RESET><GREEN>y x<RESET>%=<RED>b a<RESET><GREEN>y x<RESET><<=<RED>b a<RESET><GREEN>y x<RESET>>>=<RED>b a<RESET><GREEN>y x<RESET>&=<RED>b a<RESET><GREEN>y x<RESET>^=<RED>b a<RESET><GREEN>y x<RESET>|=<RED>b<RESET><GREEN>y<RESET>
+a<RED>=<RESET><GREEN>+=<RESET>b c<RED>+=<RESET><GREEN>=<RESET>d e<RED>-=<RESET><GREEN><=<RESET>f g<RED>*=<RESET><GREEN>>=<RESET>h i<RED>/=<RESET><GREEN>/<RESET>j k<RED>%=<RESET><GREEN>%<RESET>l m<RED><<=<RESET><GREEN><<<RESET>n o<RED>>>=<RESET><GREEN>>><RESET>p q<RED>&=<RESET><GREEN>&<RESET>r s<RED>^=<RESET><GREEN>^<RESET>t u<RED>|=<RESET><GREEN>|<RESET>v
+a<RED><<=<RESET><GREEN><=<RESET>b
+a<RED>||<RESET><GREEN>|<RESET>b a<RED>&&<RESET><GREEN>&<RESET>b
+<RED>a<RESET><GREEN>x<RESET>,y
+--a<RED>==<RESET><GREEN>!=<RESET>--b
+a++<RED>==<RESET><GREEN>!=<RESET>++b
+<RED>0xFF_EC_DE_5E 0b100_000 100_000<RESET><GREEN>0xFF_E1_DE_5E 0b100_100 200_000<RESET>
+a<RED>==<RESET><GREEN>===<RESET>b
+a<RED>!!<RESET><GREEN>!=<RESET>b
+<RED>_32<RESET><GREEN>_33<RESET>.find(arr)
+X.<RED>fill<RESET><GREEN>find<RESET>()
+X.<RED>u<RESET><GREEN>f<RESET>+1
+X.u<RED>-<RESET><GREEN>+<RESET>2
+a<RED>.<RESET><GREEN>..<RESET>b
+a<RED>?.<RESET><GREEN>?:<RESET>b
+<RED>.32_00_456<RESET><GREEN>.32_00_446<RESET>
diff --git a/t/t4034/kotlin/post b/t/t4034/kotlin/post
new file mode 100644
index 0000000..2e1df4c
--- /dev/null
+++ b/t/t4034/kotlin/post
@@ -0,0 +1,30 @@
+println("Hello World?")
+(1) (-1e10) (0xabcdef) 'y'
+[x] x->y x.y
+!x x.inv() x*y x&y
+x*y x/y x%y
+x+y x-y
+a shl b
+x<y x<=y x>y x>=y
+x==y x!=y x===y
+x and y
+x^y
+x or y
+x&&y x||y
+x=y x+=y x-=y x*=y x/=y x%=y x<<=y x>>=y x&=y x^=y x|=y
+a+=b c=d e<=f g>=h i/j k%l m<<n o>>p q&r s^t u|v
+a<=b
+a|b a&b
+x,y
+--a!=--b
+a++!=++b
+0xFF_E1_DE_5E 0b100_100 200_000
+a===b
+a!=b
+_33.find(arr)
+X.find()
+X.f+1
+X.u+2
+a..b
+a?:b
+.32_00_446
diff --git a/t/t4034/kotlin/pre b/t/t4034/kotlin/pre
new file mode 100644
index 0000000..11ea3de
--- /dev/null
+++ b/t/t4034/kotlin/pre
@@ -0,0 +1,30 @@
+println("Hello World!\n")
+1 -1e10 0xabcdef 'x'
+[a] a->b a.b
+!a a.inv() a*b a&b
+a*b a/b a%b
+a+b a-b
+a shr b
+a<b a<=b a>b a>=b
+a==b a!=b a===b
+a and b
+a^b
+a or b
+a&&b a||b
+a=b a+=b a-=b a*=b a/=b a%=b a<<=b a>>=b a&=b a^=b a|=b
+a=b c+=d e-=f g*=h i/=j k%=l m<<=n o>>=p q&=r s^=t u|=v
+a<<=b
+a||b a&&b
+a,y
+--a==--b
+a++==++b
+0xFF_EC_DE_5E 0b100_000 100_000
+a==b
+a!!b
+_32.find(arr)
+X.fill()
+X.u+1
+X.u-2
+a.b
+a?.b
+.32_00_456
diff --git a/t/t4069-remerge-diff.sh b/t/t4069-remerge-diff.sh
new file mode 100755
index 0000000..35f9495
--- /dev/null
+++ b/t/t4069-remerge-diff.sh
@@ -0,0 +1,291 @@
+#!/bin/sh
+
+test_description='remerge-diff handling'
+
+. ./test-lib.sh
+
+# This test is ort-specific
+if test "${GIT_TEST_MERGE_ALGORITHM}" != ort
+then
+ skip_all="GIT_TEST_MERGE_ALGORITHM != ort"
+ test_done
+fi
+
+test_expect_success 'setup basic merges' '
+ test_write_lines 1 2 3 4 5 6 7 8 9 >numbers &&
+ git add numbers &&
+ git commit -m base &&
+
+ git branch feature_a &&
+ git branch feature_b &&
+ git branch feature_c &&
+
+ git branch ab_resolution &&
+ git branch bc_resolution &&
+
+ git checkout feature_a &&
+ test_write_lines 1 2 three 4 5 6 7 eight 9 >numbers &&
+ git commit -a -m change_a &&
+
+ git checkout feature_b &&
+ test_write_lines 1 2 tres 4 5 6 7 8 9 >numbers &&
+ git commit -a -m change_b &&
+
+ git checkout feature_c &&
+ test_write_lines 1 2 3 4 5 6 7 8 9 10 >numbers &&
+ git commit -a -m change_c &&
+
+ git checkout bc_resolution &&
+ git merge --ff-only feature_b &&
+ # no conflict
+ git merge feature_c &&
+
+ git checkout ab_resolution &&
+ git merge --ff-only feature_a &&
+ # conflicts!
+ test_must_fail git merge feature_b &&
+ # Resolve conflict...and make another change elsewhere
+ test_write_lines 1 2 drei 4 5 6 7 acht 9 >numbers &&
+ git add numbers &&
+ git merge --continue
+'
+
+test_expect_success 'remerge-diff on a clean merge' '
+ git log -1 --oneline bc_resolution >expect &&
+ git show --oneline --remerge-diff bc_resolution >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'remerge-diff with both a resolved conflict and an unrelated change' '
+ git log -1 --oneline ab_resolution >tmp &&
+ cat <<-EOF >>tmp &&
+ diff --git a/numbers b/numbers
+ remerge CONFLICT (content): Merge conflict in numbers
+ index a1fb731..6875544 100644
+ --- a/numbers
+ +++ b/numbers
+ @@ -1,13 +1,9 @@
+ 1
+ 2
+ -<<<<<<< b0ed5cb (change_a)
+ -three
+ -=======
+ -tres
+ ->>>>>>> 6cd3f82 (change_b)
+ +drei
+ 4
+ 5
+ 6
+ 7
+ -eight
+ +acht
+ 9
+ EOF
+ # Hashes above are sha1; rip them out so test works with sha256
+ sed -e "s/[0-9a-f]\{7,\}/HASH/g" tmp >expect &&
+
+ git show --oneline --remerge-diff ab_resolution >tmp &&
+ sed -e "s/[0-9a-f]\{7,\}/HASH/g" tmp >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'setup non-content conflicts' '
+ git switch --orphan base &&
+
+ test_write_lines 1 2 3 4 5 6 7 8 9 >numbers &&
+ test_write_lines a b c d e f g h i >letters &&
+ test_write_lines in the way >content &&
+ git add numbers letters content &&
+ git commit -m base &&
+
+ git branch side1 &&
+ git branch side2 &&
+
+ git checkout side1 &&
+ test_write_lines 1 2 three 4 5 6 7 8 9 >numbers &&
+ git mv letters letters_side1 &&
+ git mv content file_or_directory &&
+ git add numbers &&
+ git commit -m side1 &&
+
+ git checkout side2 &&
+ git rm numbers &&
+ git mv letters letters_side2 &&
+ mkdir file_or_directory &&
+ echo hello >file_or_directory/world &&
+ git add file_or_directory/world &&
+ git commit -m side2 &&
+
+ git checkout -b resolution side1 &&
+ test_must_fail git merge side2 &&
+ test_write_lines 1 2 three 4 5 6 7 8 9 >numbers &&
+ git add numbers &&
+ git add letters_side1 &&
+ git rm letters &&
+ git rm letters_side2 &&
+ git add file_or_directory~HEAD &&
+ git mv file_or_directory~HEAD wanted_content &&
+ git commit -m resolved
+'
+
+test_expect_success 'remerge-diff with non-content conflicts' '
+ git log -1 --oneline resolution >tmp &&
+ cat <<-EOF >>tmp &&
+ diff --git a/file_or_directory~HASH (side1) b/wanted_content
+ similarity index 100%
+ rename from file_or_directory~HASH (side1)
+ rename to wanted_content
+ remerge CONFLICT (file/directory): directory in the way of file_or_directory from HASH (side1); moving it to file_or_directory~HASH (side1) instead.
+ diff --git a/letters b/letters
+ remerge CONFLICT (rename/rename): letters renamed to letters_side1 in HASH (side1) and to letters_side2 in HASH (side2).
+ diff --git a/letters_side2 b/letters_side2
+ deleted file mode 100644
+ index b236ae5..0000000
+ --- a/letters_side2
+ +++ /dev/null
+ @@ -1,9 +0,0 @@
+ -a
+ -b
+ -c
+ -d
+ -e
+ -f
+ -g
+ -h
+ -i
+ diff --git a/numbers b/numbers
+ remerge CONFLICT (modify/delete): numbers deleted in HASH (side2) and modified in HASH (side1). Version HASH (side1) of numbers left in tree.
+ EOF
+ # We still have some sha1 hashes above; rip them out so test works
+ # with sha256
+ sed -e "s/[0-9a-f]\{7,\}/HASH/g" tmp >expect &&
+
+ git show --oneline --remerge-diff resolution >tmp &&
+ sed -e "s/[0-9a-f]\{7,\}/HASH/g" tmp >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'remerge-diff w/ diff-filter=U: all conflict headers, no diff content' '
+ git log -1 --oneline resolution >tmp &&
+ cat <<-EOF >>tmp &&
+ diff --git a/file_or_directory~HASH (side1) b/file_or_directory~HASH (side1)
+ remerge CONFLICT (file/directory): directory in the way of file_or_directory from HASH (side1); moving it to file_or_directory~HASH (side1) instead.
+ diff --git a/letters b/letters
+ remerge CONFLICT (rename/rename): letters renamed to letters_side1 in HASH (side1) and to letters_side2 in HASH (side2).
+ diff --git a/numbers b/numbers
+ remerge CONFLICT (modify/delete): numbers deleted in HASH (side2) and modified in HASH (side1). Version HASH (side1) of numbers left in tree.
+ EOF
+ # We still have some sha1 hashes above; rip them out so test works
+ # with sha256
+ sed -e "s/[0-9a-f]\{7,\}/HASH/g" tmp >expect &&
+
+ git show --oneline --remerge-diff --diff-filter=U resolution >tmp &&
+ sed -e "s/[0-9a-f]\{7,\}/HASH/g" tmp >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'remerge-diff w/ diff-filter=R: relevant file + conflict header' '
+ git log -1 --oneline resolution >tmp &&
+ cat <<-EOF >>tmp &&
+ diff --git a/file_or_directory~HASH (side1) b/wanted_content
+ similarity index 100%
+ rename from file_or_directory~HASH (side1)
+ rename to wanted_content
+ remerge CONFLICT (file/directory): directory in the way of file_or_directory from HASH (side1); moving it to file_or_directory~HASH (side1) instead.
+ EOF
+ # We still have some sha1 hashes above; rip them out so test works
+ # with sha256
+ sed -e "s/[0-9a-f]\{7,\}/HASH/g" tmp >expect &&
+
+ git show --oneline --remerge-diff --diff-filter=R resolution >tmp &&
+ sed -e "s/[0-9a-f]\{7,\}/HASH/g" tmp >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'remerge-diff w/ pathspec: limits to relevant file including conflict header' '
+ git log -1 --oneline resolution >tmp &&
+ cat <<-EOF >>tmp &&
+ diff --git a/letters b/letters
+ remerge CONFLICT (rename/rename): letters renamed to letters_side1 in HASH (side1) and to letters_side2 in HASH (side2).
+ diff --git a/letters_side2 b/letters_side2
+ deleted file mode 100644
+ index b236ae5..0000000
+ --- a/letters_side2
+ +++ /dev/null
+ @@ -1,9 +0,0 @@
+ -a
+ -b
+ -c
+ -d
+ -e
+ -f
+ -g
+ -h
+ -i
+ EOF
+ # We still have some sha1 hashes above; rip them out so test works
+ # with sha256
+ sed -e "s/[0-9a-f]\{7,\}/HASH/g" tmp >expect &&
+
+ git show --oneline --remerge-diff resolution -- "letters*" >tmp &&
+ sed -e "s/[0-9a-f]\{7,\}/HASH/g" tmp >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'setup non-content conflicts' '
+ git switch --orphan newbase &&
+
+ test_write_lines 1 2 3 4 5 6 7 8 9 >numbers &&
+ git add numbers &&
+ git commit -m base &&
+
+ git branch newside1 &&
+ git branch newside2 &&
+
+ git checkout newside1 &&
+ test_write_lines 1 2 three 4 5 6 7 8 9 >numbers &&
+ git add numbers &&
+ git commit -m side1 &&
+
+ git checkout newside2 &&
+ test_write_lines 1 2 drei 4 5 6 7 8 9 >numbers &&
+ git add numbers &&
+ git commit -m side2 &&
+
+ git checkout -b newresolution newside1 &&
+ test_must_fail git merge newside2 &&
+ git checkout --theirs numbers &&
+ git add -u numbers &&
+ git commit -m resolved
+'
+
+test_expect_success 'remerge-diff turns off history simplification' '
+ git log -1 --oneline newresolution >tmp &&
+ cat <<-EOF >>tmp &&
+ diff --git a/numbers b/numbers
+ remerge CONFLICT (content): Merge conflict in numbers
+ index 070e9e7..5335e78 100644
+ --- a/numbers
+ +++ b/numbers
+ @@ -1,10 +1,6 @@
+ 1
+ 2
+ -<<<<<<< 96f1e45 (side1)
+ -three
+ -=======
+ drei
+ ->>>>>>> 4fd522f (side2)
+ 4
+ 5
+ 6
+ EOF
+ # We still have some sha1 hashes above; rip them out so test works
+ # with sha256
+ sed -e "s/[0-9a-f]\{7,\}/HASH/g" tmp >expect &&
+
+ git show --oneline --remerge-diff newresolution -- numbers >tmp &&
+ sed -e "s/[0-9a-f]\{7,\}/HASH/g" tmp >actual &&
+ test_cmp expect actual
+'
+
+test_done
diff --git a/t/t4123-apply-shrink.sh b/t/t4123-apply-shrink.sh
index dfa053f..3ef8461 100755
--- a/t/t4123-apply-shrink.sh
+++ b/t/t4123-apply-shrink.sh
@@ -2,8 +2,6 @@
test_description='apply a patch that is larger than the preimage'
-
-TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
cat >F <<\EOF
@@ -41,20 +39,8 @@ test_expect_success setup '
'
test_expect_success 'apply should fail gracefully' '
-
- if git apply --index patch
- then
- echo Oops, should not have succeeded
- false
- else
- status=$? &&
- echo "Status was $status" &&
- if test -f .git/index.lock
- then
- echo Oops, should not have crashed
- false
- fi
- fi
+ test_must_fail git apply --index patch &&
+ test_path_is_missing .git/index.lock
'
test_done
diff --git a/t/t4128-apply-root.sh b/t/t4128-apply-root.sh
index cb3181e..f6db5a7 100755
--- a/t/t4128-apply-root.sh
+++ b/t/t4128-apply-root.sh
@@ -2,8 +2,6 @@
test_description='apply same filename'
-
-TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
@@ -26,10 +24,11 @@ diff a/bla/blub/dir/file b/bla/blub/dir/file
EOF
test_expect_success 'apply --directory -p (1)' '
-
git apply --directory=some/sub -p3 --index patch &&
- test Bello = $(git show :some/sub/dir/file) &&
- test Bello = $(cat some/sub/dir/file)
+ echo Bello >expect &&
+ git show :some/sub/dir/file >actual &&
+ test_cmp expect actual &&
+ test_cmp expect some/sub/dir/file
'
@@ -37,8 +36,10 @@ test_expect_success 'apply --directory -p (2) ' '
git reset --hard initial &&
git apply --directory=some/sub/ -p3 --index patch &&
- test Bello = $(git show :some/sub/dir/file) &&
- test Bello = $(cat some/sub/dir/file)
+ echo Bello >expect &&
+ git show :some/sub/dir/file >actual &&
+ test_cmp expect actual &&
+ test_cmp expect some/sub/dir/file
'
@@ -55,8 +56,10 @@ EOF
test_expect_success 'apply --directory (new file)' '
git reset --hard initial &&
git apply --directory=some/sub/dir/ --index patch &&
- test content = $(git show :some/sub/dir/newfile) &&
- test content = $(cat some/sub/dir/newfile)
+ echo content >expect &&
+ git show :some/sub/dir/newfile >actual &&
+ test_cmp expect actual &&
+ test_cmp expect some/sub/dir/newfile
'
cat > patch << EOF
@@ -72,8 +75,10 @@ EOF
test_expect_success 'apply --directory -p (new file)' '
git reset --hard initial &&
git apply -p2 --directory=some/sub/dir/ --index patch &&
- test content = $(git show :some/sub/dir/newfile2) &&
- test content = $(cat some/sub/dir/newfile2)
+ echo content >expect &&
+ git show :some/sub/dir/newfile2 >actual &&
+ test_cmp expect actual &&
+ test_cmp expect some/sub/dir/newfile2
'
cat > patch << EOF
@@ -91,7 +96,8 @@ test_expect_success 'apply --directory (delete file)' '
echo content >some/sub/dir/delfile &&
git add some/sub/dir/delfile &&
git apply --directory=some/sub/dir/ --index patch &&
- ! (git ls-files | grep delfile)
+ git ls-files >out &&
+ ! grep delfile out
'
cat > patch << 'EOF'
@@ -107,8 +113,10 @@ EOF
test_expect_success 'apply --directory (quoted filename)' '
git reset --hard initial &&
git apply --directory=some/sub/dir/ --index patch &&
- test content = $(git show :some/sub/dir/quotefile) &&
- test content = $(cat some/sub/dir/quotefile)
+ echo content >expect &&
+ git show :some/sub/dir/quotefile >actual &&
+ test_cmp expect actual &&
+ test_cmp expect some/sub/dir/quotefile
'
test_done
diff --git a/t/t4150-am.sh b/t/t4150-am.sh
index 6caff0c..cdad4b6 100755
--- a/t/t4150-am.sh
+++ b/t/t4150-am.sh
@@ -315,12 +315,10 @@ test_expect_success 'am --patch-format=hg applies hg patch' '
'
test_expect_success 'am with applypatch-msg hook' '
- test_when_finished "rm -f .git/hooks/applypatch-msg" &&
rm -fr .git/rebase-apply &&
git reset --hard &&
git checkout first &&
- mkdir -p .git/hooks &&
- write_script .git/hooks/applypatch-msg <<-\EOF &&
+ test_hook applypatch-msg <<-\EOF &&
cat "$1" >actual-msg &&
echo hook-message >"$1"
EOF
@@ -335,12 +333,10 @@ test_expect_success 'am with applypatch-msg hook' '
'
test_expect_success 'am with failing applypatch-msg hook' '
- test_when_finished "rm -f .git/hooks/applypatch-msg" &&
rm -fr .git/rebase-apply &&
git reset --hard &&
git checkout first &&
- mkdir -p .git/hooks &&
- write_script .git/hooks/applypatch-msg <<-\EOF &&
+ test_hook applypatch-msg <<-\EOF &&
exit 1
EOF
test_must_fail git am patch1 &&
@@ -350,12 +346,10 @@ test_expect_success 'am with failing applypatch-msg hook' '
'
test_expect_success 'am with pre-applypatch hook' '
- test_when_finished "rm -f .git/hooks/pre-applypatch" &&
rm -fr .git/rebase-apply &&
git reset --hard &&
git checkout first &&
- mkdir -p .git/hooks &&
- write_script .git/hooks/pre-applypatch <<-\EOF &&
+ test_hook pre-applypatch <<-\EOF &&
git diff first >diff.actual
exit 0
EOF
@@ -368,12 +362,10 @@ test_expect_success 'am with pre-applypatch hook' '
'
test_expect_success 'am with failing pre-applypatch hook' '
- test_when_finished "rm -f .git/hooks/pre-applypatch" &&
rm -fr .git/rebase-apply &&
git reset --hard &&
git checkout first &&
- mkdir -p .git/hooks &&
- write_script .git/hooks/pre-applypatch <<-\EOF &&
+ test_hook pre-applypatch <<-\EOF &&
exit 1
EOF
test_must_fail git am patch1 &&
@@ -383,12 +375,10 @@ test_expect_success 'am with failing pre-applypatch hook' '
'
test_expect_success 'am with post-applypatch hook' '
- test_when_finished "rm -f .git/hooks/post-applypatch" &&
rm -fr .git/rebase-apply &&
git reset --hard &&
git checkout first &&
- mkdir -p .git/hooks &&
- write_script .git/hooks/post-applypatch <<-\EOF &&
+ test_hook post-applypatch <<-\EOF &&
git rev-parse HEAD >head.actual
git diff second >diff.actual
exit 0
@@ -403,12 +393,10 @@ test_expect_success 'am with post-applypatch hook' '
'
test_expect_success 'am with failing post-applypatch hook' '
- test_when_finished "rm -f .git/hooks/post-applypatch" &&
rm -fr .git/rebase-apply &&
git reset --hard &&
git checkout first &&
- mkdir -p .git/hooks &&
- write_script .git/hooks/post-applypatch <<-\EOF &&
+ test_hook post-applypatch <<-\EOF &&
git rev-parse HEAD >head.actual
exit 1
EOF
@@ -1169,7 +1157,7 @@ test_expect_success 'invalid when passing the --empty option alone' '
test_when_finished "git am --abort || :" &&
git checkout empty-commit^ &&
test_must_fail git am --empty empty-commit.patch 2>err &&
- echo "error: Invalid value for --empty: empty-commit.patch" >expected &&
+ echo "error: invalid value for '\''--empty'\'': '\''empty-commit.patch'\''" >expected &&
test_cmp expected err
'
diff --git a/t/t4202-log.sh b/t/t4202-log.sh
index 5049559..be07407 100755
--- a/t/t4202-log.sh
+++ b/t/t4202-log.sh
@@ -142,6 +142,19 @@ test_expect_success 'diff-filter=R' '
'
+test_expect_success 'multiple --diff-filter bits' '
+
+ git log -M --pretty="format:%s" --diff-filter=R HEAD >expect &&
+ git log -M --pretty="format:%s" --diff-filter=Ra HEAD >actual &&
+ test_cmp expect actual &&
+ git log -M --pretty="format:%s" --diff-filter=aR HEAD >actual &&
+ test_cmp expect actual &&
+ git log -M --pretty="format:%s" \
+ --diff-filter=a --diff-filter=R HEAD >actual &&
+ test_cmp expect actual
+
+'
+
test_expect_success 'diff-filter=C' '
git log -C -C --pretty="format:%s" --diff-filter=C HEAD >actual &&
@@ -449,6 +462,29 @@ test_expect_success !FAIL_PREREQS 'log with various grep.patternType configurati
)
'
+for cmd in show whatchanged reflog format-patch
+do
+ case "$cmd" in
+ format-patch) myarg="HEAD~.." ;;
+ *) myarg= ;;
+ esac
+
+ test_expect_success "$cmd: understands grep.patternType, like 'log'" '
+ git init "pattern-type-$cmd" &&
+ (
+ cd "pattern-type-$cmd" &&
+ test_commit 1 file A &&
+ test_commit "(1|2)" file B 2 &&
+
+ git -c grep.patternType=fixed $cmd --grep="..." $myarg >actual &&
+ test_must_be_empty actual &&
+
+ git -c grep.patternType=basic $cmd --grep="..." $myarg >actual &&
+ test_file_not_empty actual
+ )
+ '
+done
+
test_expect_success 'log --author' '
cat >expect <<-\EOF &&
Author: <BOLD;RED>A U<RESET> Thor <author@example.com>
@@ -659,7 +695,7 @@ EOF
test_expect_success 'log --graph with full output' '
git log --graph --date-order --pretty=short |
- git name-rev --name-only --stdin |
+ git name-rev --name-only --annotate-stdin |
sed "s/Merge:.*/Merge: A B/;s/ *\$//" >actual &&
test_cmp expect actual
'
@@ -1671,6 +1707,75 @@ test_expect_success 'log --graph with --name-only' '
test_cmp_graph --name-only tangle..reach
'
+test_expect_success '--no-graph countermands --graph' '
+ git log >expect &&
+ git log --graph --no-graph >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '--graph countermands --no-graph' '
+ git log --graph >expect &&
+ git log --no-graph --graph >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '--no-graph does not unset --topo-order' '
+ git log --topo-order >expect &&
+ git log --topo-order --no-graph >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '--no-graph does not unset --parents' '
+ git log --parents >expect &&
+ git log --parents --no-graph >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '--reverse and --graph conflict' '
+ test_must_fail git log --reverse --graph 2>stderr &&
+ test_i18ngrep "cannot be used together" stderr
+'
+
+test_expect_success '--reverse --graph --no-graph works' '
+ git log --reverse >expect &&
+ git log --reverse --graph --no-graph >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '--show-linear-break and --graph conflict' '
+ test_must_fail git log --show-linear-break --graph 2>stderr &&
+ test_i18ngrep "cannot be used together" stderr
+'
+
+test_expect_success '--show-linear-break --graph --no-graph works' '
+ git log --show-linear-break >expect &&
+ git log --show-linear-break --graph --no-graph >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '--no-walk and --graph conflict' '
+ test_must_fail git log --no-walk --graph 2>stderr &&
+ test_i18ngrep "cannot be used together" stderr
+'
+
+test_expect_success '--no-walk --graph --no-graph works' '
+ git log --no-walk >expect &&
+ git log --no-walk --graph --no-graph >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '--walk-reflogs and --graph conflict' '
+ test_must_fail git log --walk-reflogs --graph 2>stderr &&
+ (test_i18ngrep "cannot combine" stderr ||
+ test_i18ngrep "cannot be used together" stderr)
+'
+
+test_expect_success '--walk-reflogs --graph --no-graph works' '
+ git log --walk-reflogs >expect &&
+ git log --walk-reflogs --graph --no-graph >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'dotdot is a parent directory' '
mkdir -p a/b &&
( echo sixth && echo fifth ) >expect &&
@@ -1931,7 +2036,8 @@ test_expect_success GPGSM 'log --graph --show-signature for merged tag x509 miss
git merge --no-ff -m msg signed_tag_x509_nokey &&
GNUPGHOME=. git log --graph --show-signature -n1 plain-x509-nokey >actual &&
grep "^|\\\ merged tag" actual &&
- grep "^| | gpgsm: certificate not found" actual
+ grep -e "^| | gpgsm: certificate not found" \
+ -e "^| | gpgsm: failed to find the certificate: Not found" actual
'
test_expect_success GPGSM 'log --graph --show-signature for merged tag x509 bad signature' '
diff --git a/t/t4204-patch-id.sh b/t/t4204-patch-id.sh
index 80f4a65..a730c0d 100755
--- a/t/t4204-patch-id.sh
+++ b/t/t4204-patch-id.sh
@@ -38,7 +38,7 @@ calc_patch_id () {
shift
git patch-id "$@" >patch-id.output &&
sed "s/ .*//" patch-id.output >patch-id_"$patch_name" &&
- test_line_count -gt 0 patch-id_"$patch_name"
+ test_line_count -eq 1 patch-id_"$patch_name"
}
get_top_diff () {
@@ -166,40 +166,67 @@ test_expect_success 'patch-id respects config from subdir' '
)
'
-cat >nonl <<\EOF
-diff --git i/a w/a
-index e69de29..2e65efe 100644
---- i/a
-+++ w/a
-@@ -0,0 +1 @@
-+a
-\ No newline at end of file
-diff --git i/b w/b
-index e69de29..6178079 100644
---- i/b
-+++ w/b
-@@ -0,0 +1 @@
-+b
-EOF
-
-cat >withnl <<\EOF
-diff --git i/a w/a
-index e69de29..7898192 100644
---- i/a
-+++ w/a
-@@ -0,0 +1 @@
-+a
-diff --git i/b w/b
-index e69de29..6178079 100644
---- i/b
-+++ w/b
-@@ -0,0 +1 @@
-+b
-EOF
-
test_expect_success 'patch-id handles no-nl-at-eof markers' '
- cat nonl | calc_patch_id nonl &&
- cat withnl | calc_patch_id withnl &&
+ cat >nonl <<-\EOF &&
+ diff --git i/a w/a
+ index e69de29..2e65efe 100644
+ --- i/a
+ +++ w/a
+ @@ -0,0 +1 @@
+ +a
+ \ No newline at end of file
+ diff --git i/b w/b
+ index e69de29..6178079 100644
+ --- i/b
+ +++ w/b
+ @@ -0,0 +1 @@
+ +b
+ EOF
+ cat >withnl <<-\EOF &&
+ diff --git i/a w/a
+ index e69de29..7898192 100644
+ --- i/a
+ +++ w/a
+ @@ -0,0 +1 @@
+ +a
+ diff --git i/b w/b
+ index e69de29..6178079 100644
+ --- i/b
+ +++ w/b
+ @@ -0,0 +1 @@
+ +b
+ EOF
+ calc_patch_id nonl <nonl &&
+ calc_patch_id withnl <withnl &&
test_cmp patch-id_nonl patch-id_withnl
'
+
+test_expect_success 'patch-id handles diffs with one line of before/after' '
+ cat >diffu1 <<-\EOF &&
+ diff --git a/bar b/bar
+ index bdaf90f..31051f6 100644
+ --- a/bar
+ +++ b/bar
+ @@ -2 +2,2 @@
+ b
+ +c
+ diff --git a/car b/car
+ index 00750ed..2ae5e34 100644
+ --- a/car
+ +++ b/car
+ @@ -1 +1,2 @@
+ 3
+ +d
+ diff --git a/foo b/foo
+ index e439850..7146eb8 100644
+ --- a/foo
+ +++ b/foo
+ @@ -2 +2,2 @@
+ a
+ +e
+ EOF
+ calc_patch_id diffu1 <diffu1 &&
+ test_config patchid.stable true &&
+ calc_patch_id diffu1stable <diffu1
+'
test_done
diff --git a/t/t4216-log-bloom.sh b/t/t4216-log-bloom.sh
index cc3cebf..fa9d32f 100755
--- a/t/t4216-log-bloom.sh
+++ b/t/t4216-log-bloom.sh
@@ -48,6 +48,7 @@ graph_read_expect () {
header: 43475048 1 $(test_oid oid_version) $NUM_CHUNKS 0
num_commits: $1
chunks: oid_fanout oid_lookup commit_metadata generation_data bloom_indexes bloom_data
+ options: bloom(1,10,7) read_generation_data
EOF
test-tool read-graph >actual &&
test_cmp expect actual
diff --git a/t/t5300-pack-object.sh b/t/t5300-pack-object.sh
index 2fd8451..a11d612 100755
--- a/t/t5300-pack-object.sh
+++ b/t/t5300-pack-object.sh
@@ -315,8 +315,10 @@ test_expect_success \
git index-pack -o tmp.idx test-3.pack &&
cmp tmp.idx test-1-${packname_1}.idx &&
- git index-pack test-3.pack &&
+ git index-pack --promisor=message test-3.pack &&
cmp test-3.idx test-1-${packname_1}.idx &&
+ echo message >expect &&
+ test_cmp expect test-3.promisor &&
cat test-2-${packname_2}.pack >test-3.pack &&
git index-pack -o tmp.idx test-2-${packname_2}.pack &&
diff --git a/t/t5302-pack-index.sh b/t/t5302-pack-index.sh
index 8ee67df..b0095ab 100755
--- a/t/t5302-pack-index.sh
+++ b/t/t5302-pack-index.sh
@@ -284,4 +284,12 @@ test_expect_success 'index-pack -v --stdin produces progress for both phases' '
test_i18ngrep "Resolving deltas" err
'
+test_expect_success 'too-large packs report the breach' '
+ pack=$(git pack-objects --all pack </dev/null) &&
+ sz="$(test_file_size pack-$pack.pack)" &&
+ test "$sz" -gt 20 &&
+ test_must_fail git index-pack --max-input-size=20 pack-$pack.pack 2>err &&
+ grep "maximum allowed size (20 bytes)" err
+'
+
test_done
diff --git a/t/t5310-pack-bitmaps.sh b/t/t5310-pack-bitmaps.sh
index d05ab71..f775fc1 100755
--- a/t/t5310-pack-bitmaps.sh
+++ b/t/t5310-pack-bitmaps.sh
@@ -397,4 +397,32 @@ test_expect_success 'pack.preferBitmapTips' '
)
'
+test_expect_success 'complains about multiple pack bitmaps' '
+ rm -fr repo &&
+ git init repo &&
+ test_when_finished "rm -fr repo" &&
+ (
+ cd repo &&
+
+ test_commit base &&
+
+ git repack -adb &&
+ bitmap="$(ls .git/objects/pack/pack-*.bitmap)" &&
+ mv "$bitmap" "$bitmap.bak" &&
+
+ test_commit other &&
+ git repack -ab &&
+
+ mv "$bitmap.bak" "$bitmap" &&
+
+ find .git/objects/pack -type f -name "*.pack" >packs &&
+ find .git/objects/pack -type f -name "*.bitmap" >bitmaps &&
+ test_line_count = 2 packs &&
+ test_line_count = 2 bitmaps &&
+
+ git rev-list --use-bitmap-index HEAD 2>err &&
+ grep "ignoring extra bitmap file" err
+ )
+'
+
test_done
diff --git a/t/t5312-prune-corruption.sh b/t/t5312-prune-corruption.sh
index ea889c0..9d8e249 100755
--- a/t/t5312-prune-corruption.sh
+++ b/t/t5312-prune-corruption.sh
@@ -22,8 +22,8 @@ test_expect_success 'disable reflogs' '
'
create_bogus_ref () {
- test_when_finished 'rm -f .git/refs/heads/bogus..name' &&
- echo $bogus >.git/refs/heads/bogus..name
+ test-tool ref-store main update-ref msg "refs/heads/bogus..name" $bogus $ZERO_OID REF_SKIP_REFNAME_VERIFICATION &&
+ test_when_finished "test-tool ref-store main delete-refs REF_NO_DEREF msg refs/heads/bogus..name"
}
test_expect_success 'create history reachable only from a bogus-named ref' '
@@ -113,7 +113,7 @@ test_expect_success 'pack-refs does not silently delete broken loose ref' '
# we do not want to count on running pack-refs to
# actually pack it, as it is perfectly reasonable to
# skip processing a broken ref
-test_expect_success 'create packed-refs file with broken ref' '
+test_expect_success REFFILES 'create packed-refs file with broken ref' '
rm -f .git/refs/heads/main &&
cat >.git/packed-refs <<-EOF &&
$missing refs/heads/main
@@ -124,13 +124,13 @@ test_expect_success 'create packed-refs file with broken ref' '
test_cmp expect actual
'
-test_expect_success 'pack-refs does not silently delete broken packed ref' '
+test_expect_success REFFILES 'pack-refs does not silently delete broken packed ref' '
git pack-refs --all --prune &&
git rev-parse refs/heads/main >actual &&
test_cmp expect actual
'
-test_expect_success 'pack-refs does not drop broken refs during deletion' '
+test_expect_success REFFILES 'pack-refs does not drop broken refs during deletion' '
git update-ref -d refs/heads/other &&
git rev-parse refs/heads/main >actual &&
test_cmp expect actual
diff --git a/t/t5316-pack-delta-depth.sh b/t/t5316-pack-delta-depth.sh
index df524f7..e904500 100755
--- a/t/t5316-pack-delta-depth.sh
+++ b/t/t5316-pack-delta-depth.sh
@@ -64,7 +64,11 @@ test_expect_success 'create series of packs' '
echo $cur &&
echo "$(git rev-parse :file) file"
} | git pack-objects --stdout >tmp &&
- git index-pack --stdin --fix-thin <tmp || return 1
+ GIT_TRACE2_EVENT=$PWD/trace \
+ git index-pack -v --stdin --fix-thin <tmp || return 1 &&
+ grep -c region_enter.*progress trace >enter &&
+ grep -c region_leave.*progress trace >leave &&
+ test_cmp enter leave &&
prev=$cur
done
'
diff --git a/t/t5318-commit-graph.sh b/t/t5318-commit-graph.sh
index edb728f..fbf0d64 100755
--- a/t/t5318-commit-graph.sh
+++ b/t/t5318-commit-graph.sh
@@ -29,12 +29,7 @@ test_expect_success 'setup full repo' '
cd "$TRASH_DIRECTORY/full" &&
git init &&
git config core.commitGraph true &&
- objdir=".git/objects" &&
-
- test_oid_cache <<-EOF
- oid_version sha1:1
- oid_version sha256:2
- EOF
+ objdir=".git/objects"
'
test_expect_success POSIXPERM 'tweak umask for modebit tests' '
@@ -69,46 +64,10 @@ test_expect_success 'create commits and repack' '
git repack
'
-graph_git_two_modes() {
- git -c core.commitGraph=true $1 >output &&
- git -c core.commitGraph=false $1 >expect &&
- test_cmp expect output
-}
-
-graph_git_behavior() {
- MSG=$1
- DIR=$2
- BRANCH=$3
- COMPARE=$4
- test_expect_success "check normal git operations: $MSG" '
- cd "$TRASH_DIRECTORY/$DIR" &&
- graph_git_two_modes "log --oneline $BRANCH" &&
- graph_git_two_modes "log --topo-order $BRANCH" &&
- graph_git_two_modes "log --graph $COMPARE..$BRANCH" &&
- graph_git_two_modes "branch -vv" &&
- graph_git_two_modes "merge-base -a $BRANCH $COMPARE"
- '
-}
+. "$TEST_DIRECTORY"/lib-commit-graph.sh
graph_git_behavior 'no graph' full commits/3 commits/1
-graph_read_expect() {
- OPTIONAL=""
- NUM_CHUNKS=3
- if test ! -z "$2"
- then
- OPTIONAL=" $2"
- NUM_CHUNKS=$((3 + $(echo "$2" | wc -w)))
- fi
- cat >expect <<- EOF
- header: 43475048 1 $(test_oid oid_version) $NUM_CHUNKS 0
- num_commits: $1
- chunks: oid_fanout oid_lookup commit_metadata$OPTIONAL
- EOF
- test-tool read-graph >output &&
- test_cmp expect output
-}
-
test_expect_success 'exit with correct error on bad input to --stdin-commits' '
cd "$TRASH_DIRECTORY/full" &&
# invalid, non-hex OID
@@ -466,10 +425,10 @@ test_expect_success 'warn on improper hash version' '
)
'
-test_expect_success 'lower layers have overflow chunk' '
+test_expect_success TIME_IS_64BIT,TIME_T_IS_64BIT 'lower layers have overflow chunk' '
cd "$TRASH_DIRECTORY/full" &&
UNIX_EPOCH_ZERO="@0 +0000" &&
- FUTURE_DATE="@2147483646 +0000" &&
+ FUTURE_DATE="@4147483646 +0000" &&
rm -f .git/objects/info/commit-graph &&
test_commit --date "$FUTURE_DATE" future-1 &&
test_commit --date "$UNIX_EPOCH_ZERO" old-1 &&
@@ -497,7 +456,7 @@ test_expect_success 'git commit-graph verify' '
cd "$TRASH_DIRECTORY/full" &&
git rev-parse commits/8 | git -c commitGraph.generationVersion=1 commit-graph write --stdin-commits &&
git commit-graph verify >output &&
- graph_read_expect 9 extra_edges
+ graph_read_expect 9 extra_edges 1
'
NUM_COMMITS=9
@@ -825,10 +784,6 @@ test_expect_success 'set up and verify repo with generation data overflow chunk'
objdir=".git/objects" &&
UNIX_EPOCH_ZERO="@0 +0000" &&
FUTURE_DATE="@2147483646 +0000" &&
- test_oid_cache <<-EOF &&
- oid_version sha1:1
- oid_version sha256:2
- EOF
cd "$TRASH_DIRECTORY" &&
mkdir repo &&
cd repo &&
diff --git a/t/t5324-split-commit-graph.sh b/t/t5324-split-commit-graph.sh
index 847b809..669ddc6 100755
--- a/t/t5324-split-commit-graph.sh
+++ b/t/t5324-split-commit-graph.sh
@@ -30,10 +30,16 @@ graph_read_expect() {
then
NUM_BASE=$2
fi
+ OPTIONS=
+ if test -z "$3"
+ then
+ OPTIONS=" read_generation_data"
+ fi
cat >expect <<- EOF
header: 43475048 1 $(test_oid oid_version) 4 $NUM_BASE
num_commits: $1
chunks: oid_fanout oid_lookup commit_metadata generation_data
+ options:$OPTIONS
EOF
test-tool read-graph >output &&
test_cmp expect output
@@ -508,6 +514,7 @@ test_expect_success 'setup repo for mixed generation commit-graph-chain' '
header: 43475048 1 $(test_oid oid_version) 4 1
num_commits: $NUM_SECOND_LAYER_COMMITS
chunks: oid_fanout oid_lookup commit_metadata
+ options:
EOF
test_cmp expect output &&
git commit-graph verify &&
@@ -540,6 +547,7 @@ test_expect_success 'do not write generation data chunk if not present on existi
header: 43475048 1 $(test_oid oid_version) 4 2
num_commits: $NUM_THIRD_LAYER_COMMITS
chunks: oid_fanout oid_lookup commit_metadata
+ options:
EOF
test_cmp expect output &&
git commit-graph verify
@@ -581,6 +589,7 @@ test_expect_success 'do not write generation data chunk if the topmost remaining
header: 43475048 1 $(test_oid oid_version) 4 2
num_commits: $(($NUM_THIRD_LAYER_COMMITS + $NUM_FOURTH_LAYER_COMMITS))
chunks: oid_fanout oid_lookup commit_metadata
+ options:
EOF
test_cmp expect output &&
git commit-graph verify
@@ -620,6 +629,7 @@ test_expect_success 'write generation data chunk if topmost remaining layer has
header: 43475048 1 $(test_oid oid_version) 5 1
num_commits: $(($NUM_SECOND_LAYER_COMMITS + $NUM_THIRD_LAYER_COMMITS + $NUM_FOURTH_LAYER_COMMITS + $NUM_FIFTH_LAYER_COMMITS))
chunks: oid_fanout oid_lookup commit_metadata generation_data
+ options: read_generation_data
EOF
test_cmp expect output
)
diff --git a/t/t5326-multi-pack-bitmaps.sh b/t/t5326-multi-pack-bitmaps.sh
index e187f90..4fe5741 100755
--- a/t/t5326-multi-pack-bitmaps.sh
+++ b/t/t5326-multi-pack-bitmaps.sh
@@ -9,125 +9,13 @@ test_description='exercise basic multi-pack bitmap functionality'
GIT_TEST_MULTI_PACK_INDEX=0
GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0
-objdir=.git/objects
-midx=$objdir/pack/multi-pack-index
+# This test exercise multi-pack bitmap functionality where the object order is
+# stored and read from a special chunk within the MIDX, so use the default
+# behavior here.
+sane_unset GIT_TEST_MIDX_WRITE_REV
+sane_unset GIT_TEST_MIDX_READ_RIDX
-# midx_pack_source <obj>
-midx_pack_source () {
- test-tool read-midx --show-objects .git/objects | grep "^$1 " | cut -f2
-}
-
-setup_bitmap_history
-
-test_expect_success 'enable core.multiPackIndex' '
- git config core.multiPackIndex true
-'
-
-test_expect_success 'create single-pack midx with bitmaps' '
- git repack -ad &&
- git multi-pack-index write --bitmap &&
- test_path_is_file $midx &&
- test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
- test_path_is_file $midx-$(midx_checksum $objdir).rev
-'
-
-basic_bitmap_tests
-
-test_expect_success 'create new additional packs' '
- for i in $(test_seq 1 16)
- do
- test_commit "$i" &&
- git repack -d || return 1
- done &&
-
- git checkout -b other2 HEAD~8 &&
- for i in $(test_seq 1 8)
- do
- test_commit "side-$i" &&
- git repack -d || return 1
- done &&
- git checkout second
-'
-
-test_expect_success 'create multi-pack midx with bitmaps' '
- git multi-pack-index write --bitmap &&
-
- ls $objdir/pack/pack-*.pack >packs &&
- test_line_count = 25 packs &&
-
- test_path_is_file $midx &&
- test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
- test_path_is_file $midx-$(midx_checksum $objdir).rev
-'
-
-basic_bitmap_tests
-
-test_expect_success '--no-bitmap is respected when bitmaps exist' '
- git multi-pack-index write --bitmap &&
-
- test_commit respect--no-bitmap &&
- git repack -d &&
-
- test_path_is_file $midx &&
- test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
- test_path_is_file $midx-$(midx_checksum $objdir).rev &&
-
- git multi-pack-index write --no-bitmap &&
-
- test_path_is_file $midx &&
- test_path_is_missing $midx-$(midx_checksum $objdir).bitmap &&
- test_path_is_missing $midx-$(midx_checksum $objdir).rev
-'
-
-test_expect_success 'setup midx with base from later pack' '
- # Write a and b so that "a" is a delta on top of base "b", since Git
- # prefers to delete contents out of a base rather than add to a shorter
- # object.
- test_seq 1 128 >a &&
- test_seq 1 130 >b &&
-
- git add a b &&
- git commit -m "initial commit" &&
-
- a=$(git rev-parse HEAD:a) &&
- b=$(git rev-parse HEAD:b) &&
-
- # In the first pack, "a" is stored as a delta to "b".
- p1=$(git pack-objects .git/objects/pack/pack <<-EOF
- $a
- $b
- EOF
- ) &&
-
- # In the second pack, "a" is missing, and "b" is not a delta nor base to
- # any other object.
- p2=$(git pack-objects .git/objects/pack/pack <<-EOF
- $b
- $(git rev-parse HEAD)
- $(git rev-parse HEAD^{tree})
- EOF
- ) &&
-
- git prune-packed &&
- # Use the second pack as the preferred source, so that "b" occurs
- # earlier in the MIDX object order, rendering "a" unusable for pack
- # reuse.
- git multi-pack-index write --bitmap --preferred-pack=pack-$p2.idx &&
-
- have_delta $a $b &&
- test $(midx_pack_source $a) != $(midx_pack_source $b)
-'
-
-rev_list_tests 'full bitmap with backwards delta'
-
-test_expect_success 'clone with bitmaps enabled' '
- git clone --no-local --bare . clone-reverse-delta.git &&
- test_when_finished "rm -fr clone-reverse-delta.git" &&
-
- git rev-parse HEAD >expect &&
- git --git-dir=clone-reverse-delta.git rev-parse HEAD >actual &&
- test_cmp expect actual
-'
+midx_bitmap_core
bitmap_reuse_tests() {
from=$1
@@ -204,17 +92,7 @@ test_expect_success 'missing object closure fails gracefully' '
)
'
-test_expect_success 'setup partial bitmaps' '
- test_commit packed &&
- git repack &&
- test_commit loose &&
- git multi-pack-index write --bitmap 2>err &&
- test_path_is_file $midx &&
- test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
- test_path_is_file $midx-$(midx_checksum $objdir).rev
-'
-
-basic_bitmap_tests HEAD~
+midx_bitmap_partial_tests
test_expect_success 'removing a MIDX clears stale bitmaps' '
rm -fr repo &&
@@ -228,7 +106,6 @@ test_expect_success 'removing a MIDX clears stale bitmaps' '
# Write a MIDX and bitmap; remove the MIDX but leave the bitmap.
stale_bitmap=$midx-$(midx_checksum $objdir).bitmap &&
- stale_rev=$midx-$(midx_checksum $objdir).rev &&
rm $midx &&
# Then write a new MIDX.
@@ -238,9 +115,7 @@ test_expect_success 'removing a MIDX clears stale bitmaps' '
test_path_is_file $midx &&
test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
- test_path_is_file $midx-$(midx_checksum $objdir).rev &&
- test_path_is_missing $stale_bitmap &&
- test_path_is_missing $stale_rev
+ test_path_is_missing $stale_bitmap
)
'
@@ -261,7 +136,6 @@ test_expect_success 'pack.preferBitmapTips' '
git multi-pack-index write --bitmap &&
test_path_is_file $midx &&
test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
- test_path_is_file $midx-$(midx_checksum $objdir).rev &&
test-tool bitmap list-commits | sort >bitmaps &&
comm -13 bitmaps commits >before &&
@@ -271,7 +145,6 @@ test_expect_success 'pack.preferBitmapTips' '
<before | git update-ref --stdin &&
rm -fr $midx-$(midx_checksum $objdir).bitmap &&
- rm -fr $midx-$(midx_checksum $objdir).rev &&
rm -fr $midx &&
git -c pack.preferBitmapTips=refs/tags/include \
@@ -309,7 +182,6 @@ test_expect_success 'writing a bitmap with --refs-snapshot' '
grep "$(git rev-parse two)" bitmaps &&
rm -fr $midx-$(midx_checksum $objdir).bitmap &&
- rm -fr $midx-$(midx_checksum $objdir).rev &&
rm -fr $midx &&
# Then again, but with a refs snapshot which only sees
@@ -354,7 +226,6 @@ test_expect_success 'write a bitmap with --refs-snapshot (preferred tips)' '
) >snapshot &&
rm -fr $midx-$(midx_checksum $objdir).bitmap &&
- rm -fr $midx-$(midx_checksum $objdir).rev &&
rm -fr $midx &&
git multi-pack-index write --bitmap --refs-snapshot=snapshot &&
@@ -395,4 +266,45 @@ test_expect_success 'hash-cache values are propagated from pack bitmaps' '
)
'
+test_expect_success 'no .bitmap is written without any objects' '
+ rm -fr repo &&
+ git init repo &&
+ test_when_finished "rm -fr repo" &&
+ (
+ cd repo &&
+
+ empty="$(git pack-objects $objdir/pack/pack </dev/null)" &&
+ cat >packs <<-EOF &&
+ pack-$empty.idx
+ EOF
+
+ git multi-pack-index write --bitmap --stdin-packs \
+ <packs 2>err &&
+
+ grep "bitmap without any objects" err &&
+
+ test_path_is_file $midx &&
+ test_path_is_missing $midx-$(midx_checksum $objdir).bitmap
+ )
+'
+
+test_expect_success 'graceful fallback when missing reverse index' '
+ rm -fr repo &&
+ git init repo &&
+ test_when_finished "rm -fr repo" &&
+ (
+ cd repo &&
+
+ test_commit base &&
+
+ # write a pack and MIDX bitmap containing base
+ git repack -adb &&
+ git multi-pack-index write --bitmap &&
+
+ GIT_TEST_MIDX_READ_RIDX=0 \
+ git rev-list --use-bitmap-index HEAD 2>err &&
+ ! grep "ignoring extra bitmap file" err
+ )
+'
+
test_done
diff --git a/t/t5327-multi-pack-bitmaps-rev.sh b/t/t5327-multi-pack-bitmaps-rev.sh
new file mode 100755
index 0000000..d30ba63
--- /dev/null
+++ b/t/t5327-multi-pack-bitmaps-rev.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+test_description='exercise basic multi-pack bitmap functionality (.rev files)'
+
+. ./test-lib.sh
+. "${TEST_DIRECTORY}/lib-bitmap.sh"
+
+# We'll be writing our own midx and bitmaps, so avoid getting confused by the
+# automatic ones.
+GIT_TEST_MULTI_PACK_INDEX=0
+GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0
+
+# Unlike t5326, this test exercise multi-pack bitmap functionality where the
+# object order is stored in a separate .rev file.
+GIT_TEST_MIDX_WRITE_REV=1
+GIT_TEST_MIDX_READ_RIDX=0
+export GIT_TEST_MIDX_WRITE_REV
+export GIT_TEST_MIDX_READ_RIDX
+
+midx_bitmap_core rev
+midx_bitmap_partial_tests rev
+
+test_done
diff --git a/t/t5328-commit-graph-64bit-time.sh b/t/t5328-commit-graph-64bit-time.sh
new file mode 100755
index 0000000..093f0c0
--- /dev/null
+++ b/t/t5328-commit-graph-64bit-time.sh
@@ -0,0 +1,66 @@
+#!/bin/sh
+
+test_description='commit graph with 64-bit timestamps'
+. ./test-lib.sh
+
+if ! test_have_prereq TIME_IS_64BIT || ! test_have_prereq TIME_T_IS_64BIT
+then
+ skip_all='skipping 64-bit timestamp tests'
+ test_done
+fi
+
+. "$TEST_DIRECTORY"/lib-commit-graph.sh
+
+UNIX_EPOCH_ZERO="@0 +0000"
+FUTURE_DATE="@4147483646 +0000"
+
+GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS=0
+
+test_expect_success 'lower layers have overflow chunk' '
+ rm -f .git/objects/info/commit-graph &&
+ test_commit --date "$FUTURE_DATE" future-1 &&
+ test_commit --date "$UNIX_EPOCH_ZERO" old-1 &&
+ git commit-graph write --reachable &&
+ test_commit --date "$FUTURE_DATE" future-2 &&
+ test_commit --date "$UNIX_EPOCH_ZERO" old-2 &&
+ git commit-graph write --reachable --split=no-merge &&
+ test_commit extra &&
+ git commit-graph write --reachable --split=no-merge &&
+ git commit-graph write --reachable &&
+ graph_read_expect 5 "generation_data generation_data_overflow" &&
+ mv .git/objects/info/commit-graph commit-graph-upgraded &&
+ git commit-graph write --reachable &&
+ graph_read_expect 5 "generation_data generation_data_overflow" &&
+ test_cmp .git/objects/info/commit-graph commit-graph-upgraded
+'
+
+graph_git_behavior 'overflow' '' HEAD~2 HEAD
+
+test_expect_success 'set up and verify repo with generation data overflow chunk' '
+ mkdir repo &&
+ cd repo &&
+ git init &&
+ test_commit --date "$UNIX_EPOCH_ZERO" 1 &&
+ test_commit 2 &&
+ test_commit --date "$UNIX_EPOCH_ZERO" 3 &&
+ git commit-graph write --reachable &&
+ graph_read_expect 3 generation_data &&
+ test_commit --date "$FUTURE_DATE" 4 &&
+ test_commit 5 &&
+ test_commit --date "$UNIX_EPOCH_ZERO" 6 &&
+ git branch left &&
+ git reset --hard 3 &&
+ test_commit 7 &&
+ test_commit --date "$FUTURE_DATE" 8 &&
+ test_commit 9 &&
+ git branch right &&
+ git reset --hard 3 &&
+ test_merge M left right &&
+ git commit-graph write --reachable &&
+ graph_read_expect 10 "generation_data generation_data_overflow" &&
+ git commit-graph verify
+'
+
+graph_git_behavior 'overflow 2' repo left right
+
+test_done
diff --git a/t/t5401-update-hooks.sh b/t/t5401-update-hooks.sh
index 6012cc8..001b7a1 100755
--- a/t/t5401-update-hooks.sh
+++ b/t/t5401-update-hooks.sh
@@ -20,45 +20,37 @@ test_expect_success setup '
git clone --bare ./. victim.git &&
GIT_DIR=victim.git git update-ref refs/heads/tofail $commit1 &&
git update-ref refs/heads/main $commit1 &&
- git update-ref refs/heads/tofail $commit0
-'
+ git update-ref refs/heads/tofail $commit0 &&
-cat >victim.git/hooks/pre-receive <<'EOF'
-#!/bin/sh
-printf %s "$@" >>$GIT_DIR/pre-receive.args
-cat - >$GIT_DIR/pre-receive.stdin
-echo STDOUT pre-receive
-echo STDERR pre-receive >&2
-EOF
-chmod u+x victim.git/hooks/pre-receive
+ test_hook --setup -C victim.git pre-receive <<-\EOF &&
+ printf %s "$@" >>$GIT_DIR/pre-receive.args
+ cat - >$GIT_DIR/pre-receive.stdin
+ echo STDOUT pre-receive
+ echo STDERR pre-receive >&2
+ EOF
-cat >victim.git/hooks/update <<'EOF'
-#!/bin/sh
-echo "$@" >>$GIT_DIR/update.args
-read x; printf %s "$x" >$GIT_DIR/update.stdin
-echo STDOUT update $1
-echo STDERR update $1 >&2
-test "$1" = refs/heads/main || exit
-EOF
-chmod u+x victim.git/hooks/update
+ test_hook --setup -C victim.git update <<-\EOF &&
+ echo "$@" >>$GIT_DIR/update.args
+ read x; printf %s "$x" >$GIT_DIR/update.stdin
+ echo STDOUT update $1
+ echo STDERR update $1 >&2
+ test "$1" = refs/heads/main || exit
+ EOF
-cat >victim.git/hooks/post-receive <<'EOF'
-#!/bin/sh
-printf %s "$@" >>$GIT_DIR/post-receive.args
-cat - >$GIT_DIR/post-receive.stdin
-echo STDOUT post-receive
-echo STDERR post-receive >&2
-EOF
-chmod u+x victim.git/hooks/post-receive
+ test_hook --setup -C victim.git post-receive <<-\EOF &&
+ printf %s "$@" >>$GIT_DIR/post-receive.args
+ cat - >$GIT_DIR/post-receive.stdin
+ echo STDOUT post-receive
+ echo STDERR post-receive >&2
+ EOF
-cat >victim.git/hooks/post-update <<'EOF'
-#!/bin/sh
-echo "$@" >>$GIT_DIR/post-update.args
-read x; printf %s "$x" >$GIT_DIR/post-update.stdin
-echo STDOUT post-update
-echo STDERR post-update >&2
-EOF
-chmod u+x victim.git/hooks/post-update
+ test_hook --setup -C victim.git post-update <<-\EOF
+ echo "$@" >>$GIT_DIR/post-update.args
+ read x; printf %s "$x" >$GIT_DIR/post-update.stdin
+ echo STDOUT post-update
+ echo STDERR post-update >&2
+ EOF
+'
test_expect_success push '
test_must_fail git send-pack --force ./victim.git \
@@ -136,7 +128,7 @@ test_expect_success 'send-pack stderr contains hook messages' '
'
test_expect_success 'pre-receive hook that forgets to read its input' '
- write_script victim.git/hooks/pre-receive <<-\EOF &&
+ test_hook --clobber -C victim.git pre-receive <<-\EOF &&
exit 0
EOF
rm -f victim.git/hooks/update victim.git/hooks/post-update &&
diff --git a/t/t5402-post-merge-hook.sh b/t/t5402-post-merge-hook.sh
index 3e5e19c..915af2d 100755
--- a/t/t5402-post-merge-hook.sh
+++ b/t/t5402-post-merge-hook.sh
@@ -25,13 +25,15 @@ test_expect_success setup '
GIT_DIR=clone2/.git git update-index --add a
'
-for clone in 1 2; do
- cat >clone${clone}/.git/hooks/post-merge <<'EOF'
-#!/bin/sh
-echo $@ >> $GIT_DIR/post-merge.args
-EOF
- chmod u+x clone${clone}/.git/hooks/post-merge
-done
+test_expect_success 'setup clone hooks' '
+ test_when_finished "rm -f hook" &&
+ cat >hook <<-\EOF &&
+ echo $@ >>$GIT_DIR/post-merge.args
+ EOF
+
+ test_hook --setup -C clone1 post-merge <hook &&
+ test_hook --setup -C clone2 post-merge <hook
+'
test_expect_success 'post-merge does not run for up-to-date ' '
GIT_DIR=clone1/.git git merge $commit0 &&
diff --git a/t/t5403-post-checkout-hook.sh b/t/t5403-post-checkout-hook.sh
index 1ec9e23..978f240 100755
--- a/t/t5403-post-checkout-hook.sh
+++ b/t/t5403-post-checkout-hook.sh
@@ -10,8 +10,7 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
test_expect_success setup '
- mkdir -p .git/hooks &&
- write_script .git/hooks/post-checkout <<-\EOF &&
+ test_hook --setup post-checkout <<-\EOF &&
echo "$@" >.git/post-checkout.args
EOF
test_commit one &&
@@ -49,23 +48,60 @@ test_expect_success 'post-checkout receives the right args when not switching br
test $old = $new && test $flag = 0
'
-test_expect_success 'post-checkout is triggered on rebase' '
- test_when_finished "rm -f .git/post-checkout.args" &&
- git checkout -b rebase-test main &&
- rm -f .git/post-checkout.args &&
- git rebase rebase-on-me &&
- read old new flag <.git/post-checkout.args &&
- test $old != $new && test $flag = 1
-'
+test_rebase () {
+ args="$*" &&
+ test_expect_success "post-checkout is triggered on rebase $args" '
+ test_when_finished "rm -f .git/post-checkout.args" &&
+ git checkout -B rebase-test main &&
+ rm -f .git/post-checkout.args &&
+ git rebase $args rebase-on-me &&
+ read old new flag <.git/post-checkout.args &&
+ test_cmp_rev main $old &&
+ test_cmp_rev rebase-on-me $new &&
+ test $flag = 1
+ '
+
+ test_expect_success "post-checkout is triggered on rebase $args with fast-forward" '
+ test_when_finished "rm -f .git/post-checkout.args" &&
+ git checkout -B ff-rebase-test rebase-on-me^ &&
+ rm -f .git/post-checkout.args &&
+ git rebase $args rebase-on-me &&
+ read old new flag <.git/post-checkout.args &&
+ test_cmp_rev rebase-on-me^ $old &&
+ test_cmp_rev rebase-on-me $new &&
+ test $flag = 1
+ '
+
+ test_expect_success "rebase $args fast-forward branch checkout runs post-checkout hook" '
+ test_when_finished "test_might_fail git rebase --abort" &&
+ test_when_finished "rm -f .git/post-checkout.args" &&
+ git update-ref refs/heads/rebase-fast-forward three &&
+ git checkout two &&
+ rm -f .git/post-checkout.args &&
+ git rebase $args HEAD rebase-fast-forward &&
+ read old new flag <.git/post-checkout.args &&
+ test_cmp_rev two $old &&
+ test_cmp_rev three $new &&
+ test $flag = 1
+ '
+
+ test_expect_success "rebase $args checkout does not remove untracked files" '
+ test_when_finished "test_might_fail git rebase --abort" &&
+ test_when_finished "rm -f .git/post-checkout.args" &&
+ git update-ref refs/heads/rebase-fast-forward three &&
+ git checkout two &&
+ rm -f .git/post-checkout.args &&
+ echo untracked >three.t &&
+ test_when_finished "rm three.t" &&
+ test_must_fail git rebase $args HEAD rebase-fast-forward 2>err &&
+ grep "untracked working tree files would be overwritten by checkout" err &&
+ test_path_is_missing .git/post-checkout.args
-test_expect_success 'post-checkout is triggered on rebase with fast-forward' '
- test_when_finished "rm -f .git/post-checkout.args" &&
- git checkout -b ff-rebase-test rebase-on-me^ &&
- rm -f .git/post-checkout.args &&
- git rebase rebase-on-me &&
- read old new flag <.git/post-checkout.args &&
- test $old != $new && test $flag = 1
'
+}
+
+test_rebase --apply &&
+test_rebase --merge
test_expect_success 'post-checkout hook is triggered by clone' '
mkdir -p templates/hooks &&
diff --git a/t/t5406-remote-rejects.sh b/t/t5406-remote-rejects.sh
index 5c509db..dcbeb42 100755
--- a/t/t5406-remote-rejects.sh
+++ b/t/t5406-remote-rejects.sh
@@ -5,7 +5,7 @@ test_description='remote push rejects are reported by client'
. ./test-lib.sh
test_expect_success 'setup' '
- write_script .git/hooks/update <<-\EOF &&
+ test_hook update <<-\EOF &&
exit 1
EOF
echo 1 >file &&
diff --git a/t/t5407-post-rewrite-hook.sh b/t/t5407-post-rewrite-hook.sh
index 6da8d76..5f3ff05 100755
--- a/t/t5407-post-rewrite-hook.sh
+++ b/t/t5407-post-rewrite-hook.sh
@@ -17,15 +17,13 @@ test_expect_success 'setup' '
git checkout A^0 &&
test_commit E bar E &&
test_commit F foo F &&
- git checkout main
-'
+ git checkout main &&
-cat >.git/hooks/post-rewrite <<EOF
-#!/bin/sh
-echo \$@ > "$TRASH_DIRECTORY"/post-rewrite.args
-cat > "$TRASH_DIRECTORY"/post-rewrite.data
-EOF
-chmod u+x .git/hooks/post-rewrite
+ test_hook --setup post-rewrite <<-EOF
+ echo \$@ > "$TRASH_DIRECTORY"/post-rewrite.args
+ cat > "$TRASH_DIRECTORY"/post-rewrite.data
+ EOF
+'
clear_hook_input () {
rm -f post-rewrite.args post-rewrite.data
diff --git a/t/t5409-colorize-remote-messages.sh b/t/t5409-colorize-remote-messages.sh
index 9f1a483..fa5de45 100755
--- a/t/t5409-colorize-remote-messages.sh
+++ b/t/t5409-colorize-remote-messages.sh
@@ -5,7 +5,7 @@ test_description='remote messages are colorized on the client'
. ./test-lib.sh
test_expect_success 'setup' '
- write_script .git/hooks/update <<-\EOF &&
+ test_hook --setup update <<-\EOF &&
echo error: error
echo ERROR: also highlighted
echo hint: hint
diff --git a/t/t5411-proc-receive-hook.sh b/t/t5411-proc-receive-hook.sh
index 98b0e81..92cf52c 100755
--- a/t/t5411-proc-receive-hook.sh
+++ b/t/t5411-proc-receive-hook.sh
@@ -36,7 +36,7 @@ setup_upstream_and_workbench () {
TAG=$(git -C workbench rev-parse v123) &&
# setup pre-receive hook
- write_script upstream.git/hooks/pre-receive <<-\EOF &&
+ test_hook --setup -C upstream.git pre-receive <<-\EOF &&
exec >&2
echo "# pre-receive hook"
while read old new ref
@@ -46,7 +46,7 @@ setup_upstream_and_workbench () {
EOF
# setup post-receive hook
- write_script upstream.git/hooks/post-receive <<-\EOF &&
+ test_hook --setup -C upstream.git post-receive <<-\EOF &&
exec >&2
echo "# post-receive hook"
while read old new ref
diff --git a/t/t5411/once-0010-report-status-v1.sh b/t/t5411/once-0010-report-status-v1.sh
index 297b109..f9ffb01 100644
--- a/t/t5411/once-0010-report-status-v1.sh
+++ b/t/t5411/once-0010-report-status-v1.sh
@@ -3,7 +3,7 @@ test_expect_success "setup receive.procReceiveRefs" '
'
test_expect_success "setup proc-receive hook" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic1" \
diff --git a/t/t5411/test-0002-pre-receive-declined.sh b/t/t5411/test-0002-pre-receive-declined.sh
index 0c3490c..98a9d13 100644
--- a/t/t5411/test-0002-pre-receive-declined.sh
+++ b/t/t5411/test-0002-pre-receive-declined.sh
@@ -1,6 +1,6 @@
test_expect_success "setup pre-receive hook ($PROTOCOL)" '
mv "$upstream/hooks/pre-receive" "$upstream/hooks/pre-receive.ok" &&
- write_script "$upstream/hooks/pre-receive" <<-EOF
+ test_hook -C "$upstream" --clobber pre-receive <<-\EOF
exit 1
EOF
'
@@ -21,7 +21,7 @@ test_expect_success "git-push is declined ($PROTOCOL)" '
EOF
test_cmp expect actual &&
- test_cmp_refs -C "$upstream" <<-EOF
+ test_cmp_refs -C "$upstream" <<-\EOF
<COMMIT-A> refs/heads/main
EOF
'
diff --git a/t/t5411/test-0003-pre-receive-declined--porcelain.sh b/t/t5411/test-0003-pre-receive-declined--porcelain.sh
index 2393b04..67ca6dc 100644
--- a/t/t5411/test-0003-pre-receive-declined--porcelain.sh
+++ b/t/t5411/test-0003-pre-receive-declined--porcelain.sh
@@ -1,6 +1,6 @@
test_expect_success "setup pre-receive hook ($PROTOCOL/porcelain)" '
mv "$upstream/hooks/pre-receive" "$upstream/hooks/pre-receive.ok" &&
- write_script "$upstream/hooks/pre-receive" <<-EOF
+ test_hook -C "$upstream" --clobber pre-receive <<-\EOF
exit 1
EOF
'
diff --git a/t/t5411/test-0013-bad-protocol.sh b/t/t5411/test-0013-bad-protocol.sh
index c08a00d..8d22e17 100644
--- a/t/t5411/test-0013-bad-protocol.sh
+++ b/t/t5411/test-0013-bad-protocol.sh
@@ -1,5 +1,5 @@
test_expect_success "setup proc-receive hook (unknown version, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v --version 2
EOF
@@ -40,7 +40,7 @@ test_expect_success "proc-receive: bad protocol (unknown version, $PROTOCOL)" '
'
test_expect_success "setup proc-receive hook (hook --die-read-version, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v --die-read-version
EOF
@@ -65,13 +65,13 @@ test_expect_success "proc-receive: bad protocol (hook --die-read-version, $PROTO
grep "remote: fatal: die with the --die-read-version option" out-$test_count &&
grep "remote: error: fail to negotiate version with proc-receive hook" out-$test_count &&
- test_cmp_refs -C "$upstream" <<-EOF
+ test_cmp_refs -C "$upstream" <<-\EOF
<COMMIT-A> refs/heads/main
EOF
'
test_expect_success "setup proc-receive hook (hook --die-write-version, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v --die-write-version
EOF
@@ -102,7 +102,7 @@ test_expect_success "proc-receive: bad protocol (hook --die-write-version, $PROT
'
test_expect_success "setup proc-receive hook (hook --die-read-commands, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v --die-read-commands
EOF
@@ -132,7 +132,7 @@ test_expect_success "proc-receive: bad protocol (hook --die-read-commands, $PROT
'
test_expect_success "setup proc-receive hook (hook --die-read-push-options, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v --die-read-push-options
EOF
@@ -164,7 +164,7 @@ test_expect_success "proc-receive: bad protocol (hook --die-read-push-options, $
'
test_expect_success "setup proc-receive hook (hook --die-write-report, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v --die-write-report
EOF
@@ -194,7 +194,7 @@ test_expect_success "proc-receive: bad protocol (hook --die-write-report, $PROTO
'
test_expect_success "setup proc-receive hook (no report, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v
EOF
@@ -236,7 +236,7 @@ test_expect_success "cleanup ($PROTOCOL)" '
'
test_expect_success "setup proc-receive hook (no ref, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok"
@@ -269,7 +269,7 @@ test_expect_success "proc-receive: bad protocol (no ref, $PROTOCOL)" '
'
test_expect_success "setup proc-receive hook (unknown status, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "xx refs/for/main/topic"
diff --git a/t/t5411/test-0014-bad-protocol--porcelain.sh b/t/t5411/test-0014-bad-protocol--porcelain.sh
index 3eaa597..298a3d1 100644
--- a/t/t5411/test-0014-bad-protocol--porcelain.sh
+++ b/t/t5411/test-0014-bad-protocol--porcelain.sh
@@ -1,5 +1,5 @@
test_expect_success "setup proc-receive hook (unknown version, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v --version 2
EOF
@@ -40,7 +40,7 @@ test_expect_success "proc-receive: bad protocol (unknown version, $PROTOCOL/porc
'
test_expect_success "setup proc-receive hook (hook --die-read-version, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v --die-read-version
EOF
@@ -71,7 +71,7 @@ test_expect_success "proc-receive: bad protocol (hook --die-read-version, $PROTO
'
test_expect_success "setup proc-receive hook (hook --die-write-version, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v --die-write-version
EOF
@@ -102,7 +102,7 @@ test_expect_success "proc-receive: bad protocol (hook --die-write-version, $PROT
'
test_expect_success "setup proc-receive hook (hook --die-read-commands, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v --die-read-commands
EOF
@@ -132,7 +132,7 @@ test_expect_success "proc-receive: bad protocol (hook --die-read-commands, $PROT
'
test_expect_success "setup proc-receive hook (hook --die-read-push-options, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v --die-read-push-options
EOF
@@ -164,7 +164,7 @@ test_expect_success "proc-receive: bad protocol (hook --die-read-push-options, $
'
test_expect_success "setup proc-receive hook (hook --die-write-report, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v --die-write-report
EOF
@@ -194,7 +194,7 @@ test_expect_success "proc-receive: bad protocol (hook --die-write-report, $PROTO
'
test_expect_success "setup proc-receive hook (no report, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v
EOF
@@ -236,7 +236,7 @@ test_expect_success "cleanup ($PROTOCOL/porcelain)" '
'
test_expect_success "setup proc-receive hook (no ref, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok"
@@ -270,7 +270,7 @@ test_expect_success "proc-receive: bad protocol (no ref, $PROTOCOL/porcelain)" '
'
test_expect_success "setup proc-receive hook (unknown status, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "xx refs/for/main/topic"
diff --git a/t/t5411/test-0020-report-ng.sh b/t/t5411/test-0020-report-ng.sh
index e915dbc..6347c96 100644
--- a/t/t5411/test-0020-report-ng.sh
+++ b/t/t5411/test-0020-report-ng.sh
@@ -1,5 +1,5 @@
test_expect_success "setup proc-receive hook (ng, no message, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ng refs/for/main/topic"
@@ -31,7 +31,7 @@ test_expect_success "proc-receive: fail to update (ng, no message, $PROTOCOL)" '
'
test_expect_success "setup proc-receive hook (ng message, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ng refs/for/main/topic error msg"
diff --git a/t/t5411/test-0021-report-ng--porcelain.sh b/t/t5411/test-0021-report-ng--porcelain.sh
index 2a392e0..502b34f 100644
--- a/t/t5411/test-0021-report-ng--porcelain.sh
+++ b/t/t5411/test-0021-report-ng--porcelain.sh
@@ -1,5 +1,5 @@
test_expect_success "setup proc-receive hook (ng, no message, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ng refs/for/main/topic"
@@ -32,7 +32,7 @@ test_expect_success "proc-receive: fail to update (ng, no message, $PROTOCOL/por
'
test_expect_success "setup proc-receive hook (ng message, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ng refs/for/main/topic error msg"
diff --git a/t/t5411/test-0022-report-unexpect-ref.sh b/t/t5411/test-0022-report-unexpect-ref.sh
index f7a494b..7744392 100644
--- a/t/t5411/test-0022-report-unexpect-ref.sh
+++ b/t/t5411/test-0022-report-unexpect-ref.sh
@@ -1,5 +1,5 @@
test_expect_success "setup proc-receive hook (unexpected ref, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/heads/main"
diff --git a/t/t5411/test-0023-report-unexpect-ref--porcelain.sh b/t/t5411/test-0023-report-unexpect-ref--porcelain.sh
index 63c479e..6d116ef 100644
--- a/t/t5411/test-0023-report-unexpect-ref--porcelain.sh
+++ b/t/t5411/test-0023-report-unexpect-ref--porcelain.sh
@@ -1,5 +1,5 @@
test_expect_success "setup proc-receive hook (unexpected ref, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/heads/main"
diff --git a/t/t5411/test-0024-report-unknown-ref.sh b/t/t5411/test-0024-report-unknown-ref.sh
index af055aa..619ca2f 100644
--- a/t/t5411/test-0024-report-unknown-ref.sh
+++ b/t/t5411/test-0024-report-unknown-ref.sh
@@ -1,5 +1,5 @@
test_expect_success "setup proc-receive hook (unexpected ref, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic"
diff --git a/t/t5411/test-0025-report-unknown-ref--porcelain.sh b/t/t5411/test-0025-report-unknown-ref--porcelain.sh
index 99601ca..8b3f5d0 100644
--- a/t/t5411/test-0025-report-unknown-ref--porcelain.sh
+++ b/t/t5411/test-0025-report-unknown-ref--porcelain.sh
@@ -1,5 +1,5 @@
test_expect_success "setup proc-receive hook (unexpected ref, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic"
diff --git a/t/t5411/test-0026-push-options.sh b/t/t5411/test-0026-push-options.sh
index fec5f95..6dfc7b1 100644
--- a/t/t5411/test-0026-push-options.sh
+++ b/t/t5411/test-0026-push-options.sh
@@ -1,6 +1,6 @@
test_expect_success "setup proc-receive hook and disable push-options ($PROTOCOL)" '
git -C "$upstream" config receive.advertisePushOptions false &&
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic"
@@ -31,7 +31,7 @@ test_expect_success "enable push options ($PROTOCOL)" '
'
test_expect_success "setup version=0 for proc-receive hook ($PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
--version 0 \
@@ -75,7 +75,7 @@ test_expect_success "proc-receive: ignore push-options for version 0 ($PROTOCOL)
'
test_expect_success "restore proc-receive hook ($PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic"
diff --git a/t/t5411/test-0027-push-options--porcelain.sh b/t/t5411/test-0027-push-options--porcelain.sh
index 8fb75a8..768880b 100644
--- a/t/t5411/test-0027-push-options--porcelain.sh
+++ b/t/t5411/test-0027-push-options--porcelain.sh
@@ -1,6 +1,6 @@
test_expect_success "setup proc-receive hook and disable push-options ($PROTOCOL/porcelain)" '
git -C "$upstream" config receive.advertisePushOptions false &&
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic"
@@ -32,7 +32,7 @@ test_expect_success "enable push options ($PROTOCOL/porcelain)" '
'
test_expect_success "setup version=0 for proc-receive hook ($PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
--version 0 \
@@ -78,7 +78,7 @@ test_expect_success "proc-receive: ignore push-options for version 0 ($PROTOCOL/
'
test_expect_success "restore proc-receive hook ($PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic"
diff --git a/t/t5411/test-0030-report-ok.sh b/t/t5411/test-0030-report-ok.sh
index a3a6278..0f190a6 100644
--- a/t/t5411/test-0030-report-ok.sh
+++ b/t/t5411/test-0030-report-ok.sh
@@ -1,5 +1,5 @@
test_expect_success "setup proc-receive hook (ok, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic"
diff --git a/t/t5411/test-0031-report-ok--porcelain.sh b/t/t5411/test-0031-report-ok--porcelain.sh
index 0e17538..7ec3981 100644
--- a/t/t5411/test-0031-report-ok--porcelain.sh
+++ b/t/t5411/test-0031-report-ok--porcelain.sh
@@ -1,5 +1,5 @@
test_expect_success "setup proc-receive hook (ok, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic"
diff --git a/t/t5411/test-0032-report-with-options.sh b/t/t5411/test-0032-report-with-options.sh
index 988a430..07733b9 100644
--- a/t/t5411/test-0032-report-with-options.sh
+++ b/t/t5411/test-0032-report-with-options.sh
@@ -1,5 +1,5 @@
test_expect_success "setup proc-receive hook (option without matching ok, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "option refname refs/pull/123/head" \
@@ -30,7 +30,7 @@ test_expect_success "proc-receive: report option without matching ok ($PROTOCOL)
'
test_expect_success "setup proc-receive hook (option refname, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic" \
@@ -62,7 +62,7 @@ test_expect_success "proc-receive: report option refname ($PROTOCOL)" '
'
test_expect_success "setup proc-receive hook (option refname and forced-update, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic" \
@@ -95,7 +95,7 @@ test_expect_success "proc-receive: report option refname and forced-update ($PRO
'
test_expect_success "setup proc-receive hook (option refname and old-oid, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic" \
@@ -129,7 +129,7 @@ test_expect_success "proc-receive: report option refname and old-oid ($PROTOCOL)
'
test_expect_success "setup proc-receive hook (option old-oid, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic" \
@@ -161,7 +161,7 @@ test_expect_success "proc-receive: report option old-oid ($PROTOCOL)" '
'
test_expect_success "setup proc-receive hook (option old-oid and new-oid, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic" \
@@ -195,7 +195,7 @@ test_expect_success "proc-receive: report option old-oid and new-oid ($PROTOCOL)
'
test_expect_success "setup proc-receive hook (report with multiple rewrites, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/a/b/c/topic" \
diff --git a/t/t5411/test-0033-report-with-options--porcelain.sh b/t/t5411/test-0033-report-with-options--porcelain.sh
index daacb3d..2e1831b 100644
--- a/t/t5411/test-0033-report-with-options--porcelain.sh
+++ b/t/t5411/test-0033-report-with-options--porcelain.sh
@@ -1,5 +1,5 @@
test_expect_success "setup proc-receive hook (option without matching ok, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "option refname refs/pull/123/head" \
@@ -31,7 +31,7 @@ test_expect_success "proc-receive: report option without matching ok ($PROTOCOL/
'
test_expect_success "setup proc-receive hook (option refname, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic" \
@@ -64,7 +64,7 @@ test_expect_success "proc-receive: report option refname ($PROTOCOL/porcelain)"
'
test_expect_success "setup proc-receive hook (option refname and forced-update, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic" \
@@ -99,7 +99,7 @@ test_expect_success "proc-receive: report option refname and forced-update ($PRO
'
test_expect_success "setup proc-receive hook (option refname and old-oid, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic" \
@@ -134,7 +134,7 @@ test_expect_success "proc-receive: report option refname and old-oid ($PROTOCOL/
'
test_expect_success "setup proc-receive hook (option old-oid, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic" \
@@ -167,7 +167,7 @@ test_expect_success "proc-receive: report option old-oid ($PROTOCOL/porcelain)"
'
test_expect_success "setup proc-receive hook (option old-oid and new-oid, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic" \
@@ -202,7 +202,7 @@ test_expect_success "proc-receive: report option old-oid and new-oid ($PROTOCOL/
'
test_expect_success "setup proc-receive hook (report with multiple rewrites, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/a/b/c/topic" \
diff --git a/t/t5411/test-0034-report-ft.sh b/t/t5411/test-0034-report-ft.sh
index 73a47d1..0e37535 100644
--- a/t/t5411/test-0034-report-ft.sh
+++ b/t/t5411/test-0034-report-ft.sh
@@ -1,5 +1,5 @@
test_expect_success "setup proc-receive hook (ft, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic" \
diff --git a/t/t5411/test-0035-report-ft--porcelain.sh b/t/t5411/test-0035-report-ft--porcelain.sh
index c350201..b9a0518 100644
--- a/t/t5411/test-0035-report-ft--porcelain.sh
+++ b/t/t5411/test-0035-report-ft--porcelain.sh
@@ -1,5 +1,5 @@
test_expect_success "setup proc-receive hook (fall-through, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-\EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic" \
diff --git a/t/t5411/test-0036-report-multi-rewrite-for-one-ref.sh b/t/t5411/test-0036-report-multi-rewrite-for-one-ref.sh
index 8c8a6c1..889e970 100644
--- a/t/t5411/test-0036-report-multi-rewrite-for-one-ref.sh
+++ b/t/t5411/test-0036-report-multi-rewrite-for-one-ref.sh
@@ -14,7 +14,7 @@ test_expect_success "setup git config for remote-tracking of special refs" '
'
test_expect_success "setup proc-receive hook (multiple rewrites for one ref, no refname for the 1st rewrite, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic" \
@@ -87,7 +87,7 @@ test_expect_success "proc-receive: check remote-tracking #1 ($PROTOCOL)" '
'
test_expect_success "setup proc-receive hook (multiple rewrites for one ref, no refname for the 2nd rewrite, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic" \
@@ -162,7 +162,7 @@ test_expect_success "proc-receive: check remote-tracking #2 ($PROTOCOL)" '
'
test_expect_success "setup proc-receive hook (multiple rewrites for one ref, $PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic" \
diff --git a/t/t5411/test-0037-report-multi-rewrite-for-one-ref--porcelain.sh b/t/t5411/test-0037-report-multi-rewrite-for-one-ref--porcelain.sh
index bc44810..1e523b1 100644
--- a/t/t5411/test-0037-report-multi-rewrite-for-one-ref--porcelain.sh
+++ b/t/t5411/test-0037-report-multi-rewrite-for-one-ref--porcelain.sh
@@ -1,5 +1,5 @@
test_expect_success "setup proc-receive hook (multiple rewrites for one ref, no refname for the 1st rewrite, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic" \
@@ -58,7 +58,7 @@ test_expect_success "proc-receive: multiple rewrite for one ref, no refname for
'
test_expect_success "setup proc-receive hook (multiple rewrites for one ref, no refname for the 2nd rewrite, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic" \
@@ -119,7 +119,7 @@ test_expect_success "proc-receive: multiple rewrites for one ref, no refname for
'
test_expect_success "setup proc-receive hook (multiple rewrites for one ref, $PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/main/topic" \
diff --git a/t/t5411/test-0038-report-mixed-refs.sh b/t/t5411/test-0038-report-mixed-refs.sh
index e63fe7b..4c70e84 100644
--- a/t/t5411/test-0038-report-mixed-refs.sh
+++ b/t/t5411/test-0038-report-mixed-refs.sh
@@ -1,5 +1,5 @@
test_expect_success "setup proc-receive hook ($PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/next/topic2" \
diff --git a/t/t5411/test-0039-report-mixed-refs--porcelain.sh b/t/t5411/test-0039-report-mixed-refs--porcelain.sh
index 99d17b7..40f4c5b 100644
--- a/t/t5411/test-0039-report-mixed-refs--porcelain.sh
+++ b/t/t5411/test-0039-report-mixed-refs--porcelain.sh
@@ -1,5 +1,5 @@
test_expect_success "setup proc-receive hook ($PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/for/next/topic2" \
diff --git a/t/t5411/test-0040-process-all-refs.sh b/t/t5411/test-0040-process-all-refs.sh
index 2f405ad..7ae3851 100644
--- a/t/t5411/test-0040-process-all-refs.sh
+++ b/t/t5411/test-0040-process-all-refs.sh
@@ -17,7 +17,7 @@ test_expect_success "setup upstream branches ($PROTOCOL)" '
'
test_expect_success "setup proc-receive hook ($PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/heads/main" \
diff --git a/t/t5411/test-0041-process-all-refs--porcelain.sh b/t/t5411/test-0041-process-all-refs--porcelain.sh
index c884057..02e1e08 100644
--- a/t/t5411/test-0041-process-all-refs--porcelain.sh
+++ b/t/t5411/test-0041-process-all-refs--porcelain.sh
@@ -17,7 +17,7 @@ test_expect_success "setup upstream branches ($PROTOCOL/porcelain)" '
'
test_expect_success "setup proc-receive hook ($PROTOCOL/porcelain)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/heads/main" \
diff --git a/t/t5411/test-0050-proc-receive-refs-with-modifiers.sh b/t/t5411/test-0050-proc-receive-refs-with-modifiers.sh
index 31989f0..7efdfe5 100644
--- a/t/t5411/test-0050-proc-receive-refs-with-modifiers.sh
+++ b/t/t5411/test-0050-proc-receive-refs-with-modifiers.sh
@@ -9,7 +9,7 @@ test_expect_success "config receive.procReceiveRefs with modifiers ($PROTOCOL)"
'
test_expect_success "setup proc-receive hook ($PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/heads/main" \
@@ -70,7 +70,7 @@ test_expect_success "setup upstream: create tags/v123 ($PROTOCOL)" '
'
test_expect_success "setup proc-receive hook ($PROTOCOL)" '
- write_script "$upstream/hooks/proc-receive" <<-EOF
+ test_hook -C "$upstream" --clobber proc-receive <<-EOF
printf >&2 "# proc-receive hook\n"
test-tool proc-receive -v \
-r "ok refs/heads/main" \
diff --git a/t/t5500-fetch-pack.sh b/t/t5500-fetch-pack.sh
index f0dc4e6..ee6d2dd 100755
--- a/t/t5500-fetch-pack.sh
+++ b/t/t5500-fetch-pack.sh
@@ -927,7 +927,8 @@ test_expect_success 'fetching deepen' '
)
'
-test_expect_success 'use ref advertisement to prune "have" lines sent' '
+test_negotiation_algorithm_default () {
+ test_when_finished rm -rf clientv0 clientv2 &&
rm -rf server client &&
git init server &&
test_commit -C server both_have_1 &&
@@ -946,7 +947,7 @@ test_expect_success 'use ref advertisement to prune "have" lines sent' '
rm -f trace &&
cp -r client clientv0 &&
GIT_TRACE_PACKET="$(pwd)/trace" git -C clientv0 \
- fetch origin server_has both_have_2 &&
+ "$@" fetch origin server_has both_have_2 &&
grep "have $(git -C client rev-parse client_has)" trace &&
grep "have $(git -C client rev-parse both_have_2)" trace &&
! grep "have $(git -C client rev-parse both_have_2^)" trace &&
@@ -954,10 +955,27 @@ test_expect_success 'use ref advertisement to prune "have" lines sent' '
rm -f trace &&
cp -r client clientv2 &&
GIT_TRACE_PACKET="$(pwd)/trace" git -C clientv2 -c protocol.version=2 \
- fetch origin server_has both_have_2 &&
+ "$@" fetch origin server_has both_have_2 &&
grep "have $(git -C client rev-parse client_has)" trace &&
grep "have $(git -C client rev-parse both_have_2)" trace &&
! grep "have $(git -C client rev-parse both_have_2^)" trace
+}
+
+test_expect_success 'use ref advertisement to prune "have" lines sent' '
+ test_negotiation_algorithm_default
+'
+
+test_expect_success 'same as last but with config overrides' '
+ test_negotiation_algorithm_default \
+ -c feature.experimental=true \
+ -c fetch.negotiationAlgorithm=consecutive
+'
+
+test_expect_success 'ensure bogus fetch.negotiationAlgorithm yields error' '
+ test_when_finished rm -rf clientv0 &&
+ cp -r client clientv0 &&
+ test_must_fail git -C clientv0 --fetch.negotiationAlgorithm=bogus \
+ fetch origin server_has both_have_2
'
test_expect_success 'filtering by size' '
diff --git a/t/t5503-tagfollow.sh b/t/t5503-tagfollow.sh
index 195fc64..acdb731 100755
--- a/t/t5503-tagfollow.sh
+++ b/t/t5503-tagfollow.sh
@@ -160,4 +160,68 @@ test_expect_success 'new clone fetch main and tags' '
test_cmp expect actual
'
+test_expect_success 'atomic fetch with failing backfill' '
+ git init clone3 &&
+
+ # We want to test whether a failure when backfilling tags correctly
+ # aborts the complete transaction when `--atomic` is passed: we should
+ # neither create the branch nor should we create the tag when either
+ # one of both fails to update correctly.
+ #
+ # To trigger failure we simply abort when backfilling a tag.
+ test_hook -C clone3 reference-transaction <<-\EOF &&
+ while read oldrev newrev reference
+ do
+ if test "$reference" = refs/tags/tag1
+ then
+ exit 1
+ fi
+ done
+ EOF
+
+ test_must_fail git -C clone3 fetch --atomic .. $B:refs/heads/something &&
+ test_must_fail git -C clone3 rev-parse --verify refs/heads/something &&
+ test_must_fail git -C clone3 rev-parse --verify refs/tags/tag2
+'
+
+test_expect_success 'atomic fetch with backfill should use single transaction' '
+ git init clone4 &&
+
+ # Fetching with the `--atomic` flag should update all references in a
+ # single transaction, including backfilled tags. We thus expect to see
+ # a single reference transaction for the created branch and tags.
+ cat >expected <<-EOF &&
+ prepared
+ $ZERO_OID $B refs/heads/something
+ $ZERO_OID $S refs/tags/tag2
+ $ZERO_OID $T refs/tags/tag1
+ committed
+ $ZERO_OID $B refs/heads/something
+ $ZERO_OID $S refs/tags/tag2
+ $ZERO_OID $T refs/tags/tag1
+ EOF
+
+ test_hook -C clone4 reference-transaction <<-\EOF &&
+ ( echo "$*" && cat ) >>actual
+ EOF
+
+ git -C clone4 fetch --atomic .. $B:refs/heads/something &&
+ test_cmp expected clone4/actual
+'
+
+test_expect_success 'backfill failure causes command to fail' '
+ git init clone5 &&
+
+ # Create a tag that is nested below the tag we are about to fetch via
+ # the backfill mechanism. This causes a D/F conflict when backfilling
+ # and should thus cause the command to fail.
+ empty_blob=$(git -C clone5 hash-object -w --stdin </dev/null) &&
+ git -C clone5 update-ref refs/tags/tag1/nested $empty_blob &&
+
+ test_must_fail git -C clone5 fetch .. $B:refs/heads/something &&
+ test $B = $(git -C clone5 rev-parse --verify refs/heads/something) &&
+ test $S = $(git -C clone5 rev-parse --verify tag2) &&
+ test_must_fail git -C clone5 rev-parse --verify tag1
+'
+
test_done
diff --git a/t/t5505-remote.sh b/t/t5505-remote.sh
index 9ab3154..c90cf47 100755
--- a/t/t5505-remote.sh
+++ b/t/t5505-remote.sh
@@ -753,7 +753,9 @@ test_expect_success 'rename a remote' '
(
cd four &&
git config branch.main.pushRemote origin &&
- git remote rename origin upstream &&
+ GIT_TRACE2_EVENT=$(pwd)/trace \
+ git remote rename --progress origin upstream &&
+ test_region progress "Renaming remote references" trace &&
grep "pushRemote" .git/config &&
test -z "$(git for-each-ref refs/remotes/origin)" &&
test "$(git symbolic-ref refs/remotes/upstream/HEAD)" = "refs/remotes/upstream/main" &&
diff --git a/t/t5510-fetch.sh b/t/t5510-fetch.sh
index 20f7110..6f38a69 100755
--- a/t/t5510-fetch.sh
+++ b/t/t5510-fetch.sh
@@ -164,6 +164,17 @@ test_expect_success 'fetch --prune --tags with refspec prunes based on refspec'
git rev-parse sometag
'
+test_expect_success REFFILES 'fetch --prune fails to delete branches' '
+ cd "$D" &&
+ git clone . prune-fail &&
+ cd prune-fail &&
+ git update-ref refs/remotes/origin/extrabranch main &&
+ : this will prevent --prune from locking packed-refs for deleting refs, but adding loose refs still succeeds &&
+ >.git/packed-refs.new &&
+
+ test_must_fail git fetch --prune origin
+'
+
test_expect_success 'fetch --atomic works with a single branch' '
test_when_finished "rm -rf \"$D\"/atomic" &&
@@ -262,7 +273,7 @@ test_expect_success 'fetch --atomic executes a single reference transaction only
EOF
rm -f atomic/actual &&
- write_script atomic/.git/hooks/reference-transaction <<-\EOF &&
+ test_hook -C atomic reference-transaction <<-\EOF &&
( echo "$*" && cat ) >>actual
EOF
@@ -295,7 +306,7 @@ test_expect_success 'fetch --atomic aborts all reference updates if hook aborts'
EOF
rm -f atomic/actual &&
- write_script atomic/.git/hooks/reference-transaction <<-\EOF &&
+ test_hook -C atomic/.git reference-transaction <<-\EOF &&
( echo "$*" && cat ) >>actual
exit 1
EOF
@@ -323,7 +334,7 @@ test_expect_success 'fetch --atomic --append appends to FETCH_HEAD' '
test_line_count = 2 atomic/.git/FETCH_HEAD &&
cp atomic/.git/FETCH_HEAD expected &&
- write_script atomic/.git/hooks/reference-transaction <<-\EOF &&
+ test_hook -C atomic reference-transaction <<-\EOF &&
exit 1
EOF
@@ -332,6 +343,35 @@ test_expect_success 'fetch --atomic --append appends to FETCH_HEAD' '
test_cmp expected atomic/.git/FETCH_HEAD
'
+test_expect_success 'fetch --atomic --prune executes a single reference transaction only' '
+ test_when_finished "rm -rf \"$D\"/atomic" &&
+
+ cd "$D" &&
+ git branch scheduled-for-deletion &&
+ git clone . atomic &&
+ git branch -D scheduled-for-deletion &&
+ git branch new-branch &&
+ head_oid=$(git rev-parse HEAD) &&
+
+ # Fetching with the `--atomic` flag should update all references in a
+ # single transaction.
+ cat >expected <<-EOF &&
+ prepared
+ $ZERO_OID $ZERO_OID refs/remotes/origin/scheduled-for-deletion
+ $ZERO_OID $head_oid refs/remotes/origin/new-branch
+ committed
+ $ZERO_OID $ZERO_OID refs/remotes/origin/scheduled-for-deletion
+ $ZERO_OID $head_oid refs/remotes/origin/new-branch
+ EOF
+
+ test_hook -C atomic reference-transaction <<-\EOF &&
+ ( echo "$*" && cat ) >>actual
+ EOF
+
+ git -C atomic fetch --atomic --prune origin &&
+ test_cmp expected atomic/actual
+'
+
test_expect_success '--refmap="" ignores configured refspec' '
cd "$TRASH_DIRECTORY" &&
git clone "$D" remote-refs &&
diff --git a/t/t5511-refspec.sh b/t/t5511-refspec.sh
index be025b9..fc55681 100755
--- a/t/t5511-refspec.sh
+++ b/t/t5511-refspec.sh
@@ -2,6 +2,7 @@
test_description='refspec parsing'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_refspec () {
diff --git a/t/t5516-fetch-push.sh b/t/t5516-fetch-push.sh
index 2f04cf9..4dfb080 100755
--- a/t/t5516-fetch-push.sh
+++ b/t/t5516-fetch-push.sh
@@ -23,14 +23,10 @@ D=$(pwd)
mk_empty () {
repo_name="$1"
- rm -fr "$repo_name" &&
- mkdir "$repo_name" &&
- (
- cd "$repo_name" &&
- git init &&
- git config receive.denyCurrentBranch warn &&
- mv .git/hooks .git/hooks-disabled
- )
+ test_when_finished "rm -rf \"$repo_name\"" &&
+ test_path_is_missing "$repo_name" &&
+ git init "$repo_name" &&
+ git -C "$repo_name" config receive.denyCurrentBranch warn
}
mk_test () {
@@ -59,40 +55,28 @@ mk_test () {
mk_test_with_hooks() {
repo_name=$1
mk_test "$@" &&
- (
- cd "$repo_name" &&
- mkdir .git/hooks &&
- cd .git/hooks &&
-
- cat >pre-receive <<-'EOF' &&
- #!/bin/sh
- cat - >>pre-receive.actual
- EOF
-
- cat >update <<-'EOF' &&
- #!/bin/sh
- printf "%s %s %s\n" "$@" >>update.actual
- EOF
-
- cat >post-receive <<-'EOF' &&
- #!/bin/sh
- cat - >>post-receive.actual
- EOF
-
- cat >post-update <<-'EOF' &&
- #!/bin/sh
- for ref in "$@"
- do
- printf "%s\n" "$ref" >>post-update.actual
- done
- EOF
-
- chmod +x pre-receive update post-receive post-update
- )
+ test_hook -C "$repo_name" pre-receive <<-'EOF' &&
+ cat - >>pre-receive.actual
+ EOF
+
+ test_hook -C "$repo_name" update <<-'EOF' &&
+ printf "%s %s %s\n" "$@" >>update.actual
+ EOF
+
+ test_hook -C "$repo_name" post-receive <<-'EOF' &&
+ cat - >>post-receive.actual
+ EOF
+
+ test_hook -C "$repo_name" post-update <<-'EOF'
+ for ref in "$@"
+ do
+ printf "%s\n" "$ref" >>post-update.actual
+ done
+ EOF
}
mk_child() {
- rm -rf "$2" &&
+ test_when_finished "rm -rf \"$2\"" &&
git clone "$1" "$2"
}
@@ -197,38 +181,50 @@ grep_wrote () {
grep 'write_pack_file/wrote.*"value":"'$1'"' $2
}
-test_expect_success 'push with negotiation' '
- # Without negotiation
+test_expect_success 'push without negotiation' '
mk_empty testrepo &&
git push testrepo $the_first_commit:refs/remotes/origin/first_commit &&
test_commit -C testrepo unrelated_commit &&
git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit &&
- echo now pushing without negotiation &&
+ test_when_finished "rm event" &&
GIT_TRACE2_EVENT="$(pwd)/event" git -c protocol.version=2 push testrepo refs/heads/main:refs/remotes/origin/main &&
- grep_wrote 5 event && # 2 commits, 2 trees, 1 blob
+ grep_wrote 5 event # 2 commits, 2 trees, 1 blob
+'
- # Same commands, but with negotiation
- rm event &&
+test_expect_success 'push with negotiation' '
mk_empty testrepo &&
git push testrepo $the_first_commit:refs/remotes/origin/first_commit &&
test_commit -C testrepo unrelated_commit &&
git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit &&
+ test_when_finished "rm event" &&
GIT_TRACE2_EVENT="$(pwd)/event" git -c protocol.version=2 -c push.negotiate=1 push testrepo refs/heads/main:refs/remotes/origin/main &&
grep_wrote 2 event # 1 commit, 1 tree
'
test_expect_success 'push with negotiation proceeds anyway even if negotiation fails' '
- rm event &&
mk_empty testrepo &&
git push testrepo $the_first_commit:refs/remotes/origin/first_commit &&
test_commit -C testrepo unrelated_commit &&
git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit &&
+ test_when_finished "rm event" &&
GIT_TEST_PROTOCOL_VERSION=0 GIT_TRACE2_EVENT="$(pwd)/event" \
git -c push.negotiate=1 push testrepo refs/heads/main:refs/remotes/origin/main 2>err &&
grep_wrote 5 event && # 2 commits, 2 trees, 1 blob
test_i18ngrep "push negotiation failed" err
'
+test_expect_success 'push with negotiation does not attempt to fetch submodules' '
+ mk_empty submodule_upstream &&
+ test_commit -C submodule_upstream submodule_commit &&
+ git submodule add ./submodule_upstream submodule &&
+ mk_empty testrepo &&
+ git push testrepo $the_first_commit:refs/remotes/origin/first_commit &&
+ test_commit -C testrepo unrelated_commit &&
+ git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit &&
+ git -c submodule.recurse=true -c protocol.version=2 -c push.negotiate=1 push testrepo refs/heads/main:refs/remotes/origin/main 2>err &&
+ ! grep "Fetching submodule" err
+'
+
test_expect_success 'push without wildcard' '
mk_empty testrepo &&
@@ -656,7 +652,6 @@ test_expect_success 'push does not update local refs on failure' '
mk_test testrepo heads/main &&
mk_child testrepo child &&
- mkdir testrepo/.git/hooks &&
echo "#!/no/frobnication/today" >testrepo/.git/hooks/pre-receive &&
chmod +x testrepo/.git/hooks/pre-receive &&
(
@@ -1318,7 +1313,7 @@ done
test_expect_success 'fetch follows tags by default' '
mk_test testrepo heads/main &&
- rm -fr src dst &&
+ test_when_finished "rm -rf src" &&
git init src &&
(
cd src &&
@@ -1328,6 +1323,7 @@ test_expect_success 'fetch follows tags by default' '
sed -n "p; s|refs/heads/main$|refs/remotes/origin/main|p" tmp1 |
sort -k 3 >../expect
) &&
+ test_when_finished "rm -rf dst" &&
git init dst &&
(
cd dst &&
@@ -1353,8 +1349,9 @@ test_expect_success 'peeled advertisements are not considered ref tips' '
test_expect_success 'pushing a specific ref applies remote.$name.push as refmap' '
mk_test testrepo heads/main &&
- rm -fr src dst &&
+ test_when_finished "rm -rf src" &&
git init src &&
+ test_when_finished "rm -rf dst" &&
git init --bare dst &&
(
cd src &&
@@ -1377,8 +1374,9 @@ test_expect_success 'pushing a specific ref applies remote.$name.push as refmap'
test_expect_success 'with no remote.$name.push, it is not used as refmap' '
mk_test testrepo heads/main &&
- rm -fr src dst &&
+ test_when_finished "rm -rf src" &&
git init src &&
+ test_when_finished "rm -rf dst" &&
git init --bare dst &&
(
cd src &&
@@ -1399,8 +1397,9 @@ test_expect_success 'with no remote.$name.push, it is not used as refmap' '
test_expect_success 'with no remote.$name.push, upstream mapping is used' '
mk_test testrepo heads/main &&
- rm -fr src dst &&
+ test_when_finished "rm -rf src" &&
git init src &&
+ test_when_finished "rm -rf dst" &&
git init --bare dst &&
(
cd src &&
@@ -1428,8 +1427,9 @@ test_expect_success 'with no remote.$name.push, upstream mapping is used' '
test_expect_success 'push does not follow tags by default' '
mk_test testrepo heads/main &&
- rm -fr src dst &&
+ test_when_finished "rm -rf src" &&
git init src &&
+ test_when_finished "rm -rf dst" &&
git init --bare dst &&
(
cd src &&
@@ -1451,8 +1451,9 @@ test_expect_success 'push does not follow tags by default' '
test_expect_success 'push --follow-tags only pushes relevant tags' '
mk_test testrepo heads/main &&
- rm -fr src dst &&
+ test_when_finished "rm -rf src" &&
git init src &&
+ test_when_finished "rm -rf dst" &&
git init --bare dst &&
(
cd src &&
@@ -1490,9 +1491,9 @@ EOF
'
test_expect_success 'pushing a tag pushes the tagged object' '
- rm -rf dst.git &&
blob=$(echo unreferenced | git hash-object -w --stdin) &&
git tag -m foo tag-of-blob $blob &&
+ test_when_finished "rm -rf dst.git" &&
git init --bare dst.git &&
git push dst.git tag-of-blob &&
# the receiving index-pack should have noticed
@@ -1503,7 +1504,7 @@ test_expect_success 'pushing a tag pushes the tagged object' '
'
test_expect_success 'push into bare respects core.logallrefupdates' '
- rm -rf dst.git &&
+ test_when_finished "rm -rf dst.git" &&
git init --bare dst.git &&
git -C dst.git config core.logallrefupdates true &&
@@ -1521,7 +1522,7 @@ test_expect_success 'push into bare respects core.logallrefupdates' '
'
test_expect_success 'fetch into bare respects core.logallrefupdates' '
- rm -rf dst.git &&
+ test_when_finished "rm -rf dst.git" &&
git init --bare dst.git &&
(
cd dst.git &&
@@ -1542,6 +1543,7 @@ test_expect_success 'fetch into bare respects core.logallrefupdates' '
'
test_expect_success 'receive.denyCurrentBranch = updateInstead' '
+ mk_empty testrepo &&
git push testrepo main &&
(
cd testrepo &&
@@ -1644,7 +1646,7 @@ test_expect_success 'receive.denyCurrentBranch = updateInstead' '
) &&
# (5) push into void
- rm -fr void &&
+ test_when_finished "rm -rf void" &&
git init void &&
(
cd void &&
@@ -1666,26 +1668,23 @@ test_expect_success 'receive.denyCurrentBranch = updateInstead' '
'
test_expect_success 'updateInstead with push-to-checkout hook' '
- rm -fr testrepo &&
+ test_when_finished "rm -rf testrepo" &&
git init testrepo &&
- (
- cd testrepo &&
- git pull .. main &&
- git reset --hard HEAD^^ &&
- git tag initial &&
- git config receive.denyCurrentBranch updateInstead &&
- write_script .git/hooks/push-to-checkout <<-\EOF
- echo >&2 updating from $(git rev-parse HEAD)
- echo >&2 updating to "$1"
-
- git update-index -q --refresh &&
- git read-tree -u -m HEAD "$1" || {
- status=$?
- echo >&2 read-tree failed
- exit $status
- }
- EOF
- ) &&
+ git -C testrepo pull .. main &&
+ git -C testrepo reset --hard HEAD^^ &&
+ git -C testrepo tag initial &&
+ git -C testrepo config receive.denyCurrentBranch updateInstead &&
+ test_hook -C testrepo push-to-checkout <<-\EOF &&
+ echo >&2 updating from $(git rev-parse HEAD)
+ echo >&2 updating to "$1"
+
+ git update-index -q --refresh &&
+ git read-tree -u -m HEAD "$1" || {
+ status=$?
+ echo >&2 read-tree failed
+ exit $status
+ }
+ EOF
# Try pushing into a pristine
git push testrepo main &&
@@ -1728,35 +1727,32 @@ test_expect_success 'updateInstead with push-to-checkout hook' '
) &&
# push into void
- rm -fr void &&
+ test_when_finished "rm -rf void" &&
git init void &&
- (
- cd void &&
- git config receive.denyCurrentBranch updateInstead &&
- write_script .git/hooks/push-to-checkout <<-\EOF
- if git rev-parse --quiet --verify HEAD
- then
- has_head=yes
- echo >&2 updating from $(git rev-parse HEAD)
- else
- has_head=no
- echo >&2 pushing into void
- fi
- echo >&2 updating to "$1"
-
- git update-index -q --refresh &&
- case "$has_head" in
- yes)
- git read-tree -u -m HEAD "$1" ;;
- no)
- git read-tree -u -m "$1" ;;
- esac || {
- status=$?
- echo >&2 read-tree failed
- exit $status
- }
- EOF
- ) &&
+ git -C void config receive.denyCurrentBranch updateInstead &&
+ test_hook -C void push-to-checkout <<-\EOF &&
+ if git rev-parse --quiet --verify HEAD
+ then
+ has_head=yes
+ echo >&2 updating from $(git rev-parse HEAD)
+ else
+ has_head=no
+ echo >&2 pushing into void
+ fi
+ echo >&2 updating to "$1"
+
+ git update-index -q --refresh &&
+ case "$has_head" in
+ yes)
+ git read-tree -u -m HEAD "$1" ;;
+ no)
+ git read-tree -u -m "$1" ;;
+ esac || {
+ status=$?
+ echo >&2 read-tree failed
+ exit $status
+ }
+ EOF
git push void main &&
(
@@ -1809,4 +1805,12 @@ test_expect_success 'refuse fetch to current branch of bare repository worktree'
git -C bare.git fetch -u .. HEAD:wt
'
+test_expect_success 'refuse to push a hidden ref, and make sure do not pollute the repository' '
+ mk_empty testrepo &&
+ git -C testrepo config receive.hiderefs refs/hidden &&
+ git -C testrepo config receive.unpackLimit 1 &&
+ test_must_fail git push testrepo HEAD:refs/hidden/foo &&
+ test_dir_is_empty testrepo/.git/objects/pack
+'
+
test_done
diff --git a/t/t5520-pull.sh b/t/t5520-pull.sh
index 93ecfcd..0818080 100755
--- a/t/t5520-pull.sh
+++ b/t/t5520-pull.sh
@@ -330,6 +330,19 @@ test_expect_success '--rebase --autostash fast forward' '
test_cmp_rev HEAD to-rebase-ff
'
+test_expect_success '--rebase with rebase.autostash succeeds on ff' '
+ test_when_finished "rm -fr src dst actual" &&
+ git init src &&
+ test_commit -C src "initial" file "content" &&
+ git clone src dst &&
+ test_commit -C src --printf "more_content" file "more content\ncontent\n" &&
+ echo "dirty" >>dst/file &&
+ test_config -C dst rebase.autostash true &&
+ git -C dst pull --rebase >actual 2>&1 &&
+ grep -q "Fast-forward" actual &&
+ grep -q "Applied autostash." actual
+'
+
test_expect_success '--rebase with conflicts shows advice' '
test_when_finished "git rebase --abort; git checkout -f to-rebase" &&
git checkout -b seq &&
diff --git a/t/t5521-pull-options.sh b/t/t5521-pull-options.sh
index 66cfcb0..264de29 100755
--- a/t/t5521-pull-options.sh
+++ b/t/t5521-pull-options.sh
@@ -233,7 +233,7 @@ test_expect_success 'git pull --no-verify flag passed to merge' '
git init src &&
test_commit -C src one &&
git clone src dst &&
- write_script dst/.git/hooks/commit-msg <<-\EOF &&
+ test_hook -C dst commit-msg <<-\EOF &&
false
EOF
test_commit -C src two &&
@@ -245,7 +245,7 @@ test_expect_success 'git pull --no-verify --verify passed to merge' '
git init src &&
test_commit -C src one &&
git clone src dst &&
- write_script dst/.git/hooks/commit-msg <<-\EOF &&
+ test_hook -C dst commit-msg <<-\EOF &&
false
EOF
test_commit -C src two &&
diff --git a/t/t5526-fetch-submodules.sh b/t/t5526-fetch-submodules.sh
index 840c89c..43dada8 100755
--- a/t/t5526-fetch-submodules.sh
+++ b/t/t5526-fetch-submodules.sh
@@ -10,33 +10,122 @@ export GIT_TEST_FATAL_REGISTER_SUBMODULE_ODB
pwd=$(pwd)
-add_upstream_commit() {
+write_expected_sub () {
+ NEW_HEAD=$1 &&
+ SUPER_HEAD=$2 &&
+ cat >"$pwd/expect.err.sub" <<-EOF
+ Fetching submodule submodule${SUPER_HEAD:+ at commit $SUPER_HEAD}
+ From $pwd/submodule
+ OLD_HEAD..$NEW_HEAD sub -> origin/sub
+ EOF
+}
+
+write_expected_sub2 () {
+ NEW_HEAD=$1 &&
+ SUPER_HEAD=$2 &&
+ cat >"$pwd/expect.err.sub2" <<-EOF
+ Fetching submodule submodule2${SUPER_HEAD:+ at commit $SUPER_HEAD}
+ From $pwd/submodule2
+ OLD_HEAD..$NEW_HEAD sub2 -> origin/sub2
+ EOF
+}
+
+write_expected_deep () {
+ NEW_HEAD=$1 &&
+ SUB_HEAD=$2 &&
+ cat >"$pwd/expect.err.deep" <<-EOF
+ Fetching submodule submodule/subdir/deepsubmodule${SUB_HEAD:+ at commit $SUB_HEAD}
+ From $pwd/deepsubmodule
+ OLD_HEAD..$NEW_HEAD deep -> origin/deep
+ EOF
+}
+
+write_expected_super () {
+ NEW_HEAD=$1 &&
+ cat >"$pwd/expect.err.super" <<-EOF
+ From $pwd/.
+ OLD_HEAD..$NEW_HEAD super -> origin/super
+ EOF
+}
+
+# For each submodule in the test setup, this creates a commit and writes
+# a file that contains the expected err if that new commit were fetched.
+# These output files get concatenated in the right order by
+# verify_fetch_result().
+add_submodule_commits () {
(
cd submodule &&
- head1=$(git rev-parse --short HEAD) &&
echo new >> subfile &&
test_tick &&
git add subfile &&
git commit -m new subfile &&
- head2=$(git rev-parse --short HEAD) &&
- echo "Fetching submodule submodule" > ../expect.err &&
- echo "From $pwd/submodule" >> ../expect.err &&
- echo " $head1..$head2 sub -> origin/sub" >> ../expect.err
+ new_head=$(git rev-parse --short HEAD) &&
+ write_expected_sub $new_head
) &&
(
cd deepsubmodule &&
- head1=$(git rev-parse --short HEAD) &&
echo new >> deepsubfile &&
test_tick &&
git add deepsubfile &&
git commit -m new deepsubfile &&
- head2=$(git rev-parse --short HEAD) &&
- echo "Fetching submodule submodule/subdir/deepsubmodule" >> ../expect.err
- echo "From $pwd/deepsubmodule" >> ../expect.err &&
- echo " $head1..$head2 deep -> origin/deep" >> ../expect.err
+ new_head=$(git rev-parse --short HEAD) &&
+ write_expected_deep $new_head
)
}
+# For each superproject in the test setup, update its submodule, add the
+# submodule and create a new commit with the submodule change.
+#
+# This requires add_submodule_commits() to be called first, otherwise
+# the submodules will not have changed and cannot be "git add"-ed.
+add_superproject_commits () {
+ (
+ cd submodule &&
+ (
+ cd subdir/deepsubmodule &&
+ git fetch &&
+ git checkout -q FETCH_HEAD
+ ) &&
+ git add subdir/deepsubmodule &&
+ git commit -m "new deep submodule"
+ ) &&
+ git add submodule &&
+ git commit -m "new submodule" &&
+ super_head=$(git rev-parse --short HEAD) &&
+ sub_head=$(git -C submodule rev-parse --short HEAD) &&
+ write_expected_super $super_head &&
+ write_expected_sub $sub_head
+}
+
+# Verifies that the expected repositories were fetched. This is done by
+# concatenating the files expect.err.[super|sub|deep] in the correct
+# order and comparing it to the actual stderr.
+#
+# If a repo should not be fetched in the test, its corresponding
+# expect.err file should be rm-ed.
+verify_fetch_result () {
+ ACTUAL_ERR=$1 &&
+ rm -f expect.err.combined &&
+ if test -f expect.err.super
+ then
+ cat expect.err.super >>expect.err.combined
+ fi &&
+ if test -f expect.err.sub
+ then
+ cat expect.err.sub >>expect.err.combined
+ fi &&
+ if test -f expect.err.deep
+ then
+ cat expect.err.deep >>expect.err.combined
+ fi &&
+ if test -f expect.err.sub2
+ then
+ cat expect.err.sub2 >>expect.err.combined
+ fi &&
+ sed -e 's/[0-9a-f][0-9a-f]*\.\./OLD_HEAD\.\./' "$ACTUAL_ERR" >actual.err.cmp &&
+ test_cmp expect.err.combined actual.err.cmp
+}
+
test_expect_success setup '
mkdir deepsubmodule &&
(
@@ -68,38 +157,38 @@ test_expect_success setup '
'
test_expect_success "fetch --recurse-submodules recurses into submodules" '
- add_upstream_commit &&
+ add_submodule_commits &&
(
cd downstream &&
git fetch --recurse-submodules >../actual.out 2>../actual.err
) &&
test_must_be_empty actual.out &&
- test_cmp expect.err actual.err
+ verify_fetch_result actual.err
'
test_expect_success "submodule.recurse option triggers recursive fetch" '
- add_upstream_commit &&
+ add_submodule_commits &&
(
cd downstream &&
git -c submodule.recurse fetch >../actual.out 2>../actual.err
) &&
test_must_be_empty actual.out &&
- test_cmp expect.err actual.err
+ verify_fetch_result actual.err
'
test_expect_success "fetch --recurse-submodules -j2 has the same output behaviour" '
- add_upstream_commit &&
+ add_submodule_commits &&
(
cd downstream &&
GIT_TRACE="$TRASH_DIRECTORY/trace.out" git fetch --recurse-submodules -j2 2>../actual.err
) &&
test_must_be_empty actual.out &&
- test_cmp expect.err actual.err &&
+ verify_fetch_result actual.err &&
grep "2 tasks" trace.out
'
test_expect_success "fetch alone only fetches superproject" '
- add_upstream_commit &&
+ add_submodule_commits &&
(
cd downstream &&
git fetch >../actual.out 2>../actual.err
@@ -124,11 +213,11 @@ test_expect_success "using fetchRecurseSubmodules=true in .gitmodules recurses i
git fetch >../actual.out 2>../actual.err
) &&
test_must_be_empty actual.out &&
- test_cmp expect.err actual.err
+ verify_fetch_result actual.err
'
test_expect_success "--no-recurse-submodules overrides .gitmodules config" '
- add_upstream_commit &&
+ add_submodule_commits &&
(
cd downstream &&
git fetch --no-recurse-submodules >../actual.out 2>../actual.err
@@ -155,7 +244,7 @@ test_expect_success "--recurse-submodules overrides fetchRecurseSubmodules setti
git config --unset submodule.submodule.fetchRecurseSubmodules
) &&
test_must_be_empty actual.out &&
- test_cmp expect.err actual.err
+ verify_fetch_result actual.err
'
test_expect_success "--quiet propagates to submodules" '
@@ -177,13 +266,13 @@ test_expect_success "--quiet propagates to parallel submodules" '
'
test_expect_success "--dry-run propagates to submodules" '
- add_upstream_commit &&
+ add_submodule_commits &&
(
cd downstream &&
git fetch --recurse-submodules --dry-run >../actual.out 2>../actual.err
) &&
test_must_be_empty actual.out &&
- test_cmp expect.err actual.err
+ verify_fetch_result actual.err
'
test_expect_success "Without --dry-run propagates to submodules" '
@@ -192,22 +281,22 @@ test_expect_success "Without --dry-run propagates to submodules" '
git fetch --recurse-submodules >../actual.out 2>../actual.err
) &&
test_must_be_empty actual.out &&
- test_cmp expect.err actual.err
+ verify_fetch_result actual.err
'
test_expect_success "recurseSubmodules=true propagates into submodules" '
- add_upstream_commit &&
+ add_submodule_commits &&
(
cd downstream &&
git config fetch.recurseSubmodules true &&
git fetch >../actual.out 2>../actual.err
) &&
test_must_be_empty actual.out &&
- test_cmp expect.err actual.err
+ verify_fetch_result actual.err
'
test_expect_success "--recurse-submodules overrides config in submodule" '
- add_upstream_commit &&
+ add_submodule_commits &&
(
cd downstream &&
(
@@ -217,11 +306,11 @@ test_expect_success "--recurse-submodules overrides config in submodule" '
git fetch --recurse-submodules >../actual.out 2>../actual.err
) &&
test_must_be_empty actual.out &&
- test_cmp expect.err actual.err
+ verify_fetch_result actual.err
'
test_expect_success "--no-recurse-submodules overrides config setting" '
- add_upstream_commit &&
+ add_submodule_commits &&
(
cd downstream &&
git config fetch.recurseSubmodules true &&
@@ -246,36 +335,34 @@ test_expect_success "Recursion doesn't happen when no new commits are fetched in
'
test_expect_success "Recursion stops when no new submodule commits are fetched" '
- head1=$(git rev-parse --short HEAD) &&
git add submodule &&
git commit -m "new submodule" &&
- head2=$(git rev-parse --short HEAD) &&
- echo "From $pwd/." > expect.err.sub &&
- echo " $head1..$head2 super -> origin/super" >>expect.err.sub &&
- head -3 expect.err >> expect.err.sub &&
+ new_head=$(git rev-parse --short HEAD) &&
+ write_expected_super $new_head &&
+ rm expect.err.deep &&
(
cd downstream &&
git fetch >../actual.out 2>../actual.err
) &&
- test_cmp expect.err.sub actual.err &&
+ verify_fetch_result actual.err &&
test_must_be_empty actual.out
'
test_expect_success "Recursion doesn't happen when new superproject commits don't change any submodules" '
- add_upstream_commit &&
- head1=$(git rev-parse --short HEAD) &&
+ add_submodule_commits &&
echo a > file &&
git add file &&
git commit -m "new file" &&
- head2=$(git rev-parse --short HEAD) &&
- echo "From $pwd/." > expect.err.file &&
- echo " $head1..$head2 super -> origin/super" >> expect.err.file &&
+ new_head=$(git rev-parse --short HEAD) &&
+ write_expected_super $new_head &&
+ rm expect.err.sub &&
+ rm expect.err.deep &&
(
cd downstream &&
git fetch >../actual.out 2>../actual.err
) &&
test_must_be_empty actual.out &&
- test_cmp expect.err.file actual.err
+ verify_fetch_result actual.err
'
test_expect_success "Recursion picks up config in submodule" '
@@ -287,14 +374,11 @@ test_expect_success "Recursion picks up config in submodule" '
git config fetch.recurseSubmodules true
)
) &&
- add_upstream_commit &&
- head1=$(git rev-parse --short HEAD) &&
+ add_submodule_commits &&
git add submodule &&
git commit -m "new submodule" &&
- head2=$(git rev-parse --short HEAD) &&
- echo "From $pwd/." > expect.err.sub &&
- echo " $head1..$head2 super -> origin/super" >> expect.err.sub &&
- cat expect.err >> expect.err.sub &&
+ new_head=$(git rev-parse --short HEAD) &&
+ write_expected_super $new_head &&
(
cd downstream &&
git fetch >../actual.out 2>../actual.err &&
@@ -303,60 +387,23 @@ test_expect_success "Recursion picks up config in submodule" '
git config --unset fetch.recurseSubmodules
)
) &&
- test_cmp expect.err.sub actual.err &&
+ verify_fetch_result actual.err &&
test_must_be_empty actual.out
'
test_expect_success "Recursion picks up all submodules when necessary" '
- add_upstream_commit &&
- (
- cd submodule &&
- (
- cd subdir/deepsubmodule &&
- git fetch &&
- git checkout -q FETCH_HEAD
- ) &&
- head1=$(git rev-parse --short HEAD^) &&
- git add subdir/deepsubmodule &&
- git commit -m "new deepsubmodule" &&
- head2=$(git rev-parse --short HEAD) &&
- echo "Fetching submodule submodule" > ../expect.err.sub &&
- echo "From $pwd/submodule" >> ../expect.err.sub &&
- echo " $head1..$head2 sub -> origin/sub" >> ../expect.err.sub
- ) &&
- head1=$(git rev-parse --short HEAD) &&
- git add submodule &&
- git commit -m "new submodule" &&
- head2=$(git rev-parse --short HEAD) &&
- echo "From $pwd/." > expect.err.2 &&
- echo " $head1..$head2 super -> origin/super" >> expect.err.2 &&
- cat expect.err.sub >> expect.err.2 &&
- tail -3 expect.err >> expect.err.2 &&
+ add_submodule_commits &&
+ add_superproject_commits &&
(
cd downstream &&
git fetch >../actual.out 2>../actual.err
) &&
- test_cmp expect.err.2 actual.err &&
+ verify_fetch_result actual.err &&
test_must_be_empty actual.out
'
test_expect_success "'--recurse-submodules=on-demand' doesn't recurse when no new commits are fetched in the superproject (and ignores config)" '
- add_upstream_commit &&
- (
- cd submodule &&
- (
- cd subdir/deepsubmodule &&
- git fetch &&
- git checkout -q FETCH_HEAD
- ) &&
- head1=$(git rev-parse --short HEAD^) &&
- git add subdir/deepsubmodule &&
- git commit -m "new deepsubmodule" &&
- head2=$(git rev-parse --short HEAD) &&
- echo Fetching submodule submodule > ../expect.err.sub &&
- echo "From $pwd/submodule" >> ../expect.err.sub &&
- echo " $head1..$head2 sub -> origin/sub" >> ../expect.err.sub
- ) &&
+ add_submodule_commits &&
(
cd downstream &&
git config fetch.recurseSubmodules true &&
@@ -368,15 +415,8 @@ test_expect_success "'--recurse-submodules=on-demand' doesn't recurse when no ne
'
test_expect_success "'--recurse-submodules=on-demand' recurses as deep as necessary (and ignores config)" '
- head1=$(git rev-parse --short HEAD) &&
- git add submodule &&
- git commit -m "new submodule" &&
- head2=$(git rev-parse --short HEAD) &&
- tail -3 expect.err > expect.err.deepsub &&
- echo "From $pwd/." > expect.err &&
- echo " $head1..$head2 super -> origin/super" >>expect.err &&
- cat expect.err.sub >> expect.err &&
- cat expect.err.deepsub >> expect.err &&
+ add_submodule_commits &&
+ add_superproject_commits &&
(
cd downstream &&
git config fetch.recurseSubmodules false &&
@@ -392,24 +432,165 @@ test_expect_success "'--recurse-submodules=on-demand' recurses as deep as necess
)
) &&
test_must_be_empty actual.out &&
- test_cmp expect.err actual.err
+ verify_fetch_result actual.err
+'
+
+# These tests verify that we can fetch submodules that aren't in the
+# index.
+#
+# First, test the simple case where the index is empty and we only fetch
+# submodules that are not in the index.
+test_expect_success 'setup downstream branch without submodules' '
+ (
+ cd downstream &&
+ git checkout --recurse-submodules -b no-submodules &&
+ git rm .gitmodules &&
+ git rm submodule &&
+ git commit -m "no submodules" &&
+ git checkout --recurse-submodules super
+ )
+'
+
+test_expect_success "'--recurse-submodules=on-demand' should fetch submodule commits if the submodule is changed but the index has no submodules" '
+ add_submodule_commits &&
+ add_superproject_commits &&
+ # Fetch the new superproject commit
+ (
+ cd downstream &&
+ git switch --recurse-submodules no-submodules &&
+ git fetch --recurse-submodules=on-demand >../actual.out 2>../actual.err
+ ) &&
+ super_head=$(git rev-parse --short HEAD) &&
+ sub_head=$(git -C submodule rev-parse --short HEAD) &&
+ deep_head=$(git -C submodule/subdir/deepsubmodule rev-parse --short HEAD) &&
+
+ # assert that these are fetched from commits, not the index
+ write_expected_sub $sub_head $super_head &&
+ write_expected_deep $deep_head $sub_head &&
+
+ test_must_be_empty actual.out &&
+ verify_fetch_result actual.err
+'
+
+test_expect_success "'--recurse-submodules' should fetch submodule commits if the submodule is changed but the index has no submodules" '
+ add_submodule_commits &&
+ add_superproject_commits &&
+ # Fetch the new superproject commit
+ (
+ cd downstream &&
+ git switch --recurse-submodules no-submodules &&
+ git fetch --recurse-submodules >../actual.out 2>../actual.err
+ ) &&
+ super_head=$(git rev-parse --short HEAD) &&
+ sub_head=$(git -C submodule rev-parse --short HEAD) &&
+ deep_head=$(git -C submodule/subdir/deepsubmodule rev-parse --short HEAD) &&
+
+ # assert that these are fetched from commits, not the index
+ write_expected_sub $sub_head $super_head &&
+ write_expected_deep $deep_head $sub_head &&
+
+ test_must_be_empty actual.out &&
+ verify_fetch_result actual.err
+'
+
+test_expect_success "'--recurse-submodules' should ignore changed, inactive submodules" '
+ add_submodule_commits &&
+ add_superproject_commits &&
+
+ # Fetch the new superproject commit
+ (
+ cd downstream &&
+ git switch --recurse-submodules no-submodules &&
+ git -c submodule.submodule.active=false fetch --recurse-submodules >../actual.out 2>../actual.err
+ ) &&
+ test_must_be_empty actual.out &&
+ super_head=$(git rev-parse --short HEAD) &&
+ write_expected_super $super_head &&
+ # Neither should be fetched because the submodule is inactive
+ rm expect.err.sub &&
+ rm expect.err.deep &&
+ verify_fetch_result actual.err
+'
+
+# Now that we know we can fetch submodules that are not in the index,
+# test that we can fetch index and non-index submodules in the same
+# operation.
+test_expect_success 'setup downstream branch with other submodule' '
+ mkdir submodule2 &&
+ (
+ cd submodule2 &&
+ git init &&
+ echo sub2content >sub2file &&
+ git add sub2file &&
+ git commit -a -m new &&
+ git branch -M sub2
+ ) &&
+ git checkout -b super-sub2-only &&
+ git submodule add "$pwd/submodule2" submodule2 &&
+ git commit -m "add sub2" &&
+ git checkout super &&
+ (
+ cd downstream &&
+ git fetch --recurse-submodules origin &&
+ git checkout super-sub2-only &&
+ # Explicitly run "git submodule update" because sub2 is new
+ # and has not been cloned.
+ git submodule update --init &&
+ git checkout --recurse-submodules super
+ )
+'
+
+test_expect_success "'--recurse-submodules' should fetch submodule commits in changed submodules and the index" '
+ test_when_finished "rm expect.err.sub2" &&
+ # Create new commit in origin/super
+ add_submodule_commits &&
+ add_superproject_commits &&
+
+ # Create new commit in origin/super-sub2-only
+ git checkout super-sub2-only &&
+ (
+ cd submodule2 &&
+ test_commit --no-tag foo
+ ) &&
+ git add submodule2 &&
+ git commit -m "new submodule2" &&
+
+ git checkout super &&
+ (
+ cd downstream &&
+ git fetch --recurse-submodules >../actual.out 2>../actual.err
+ ) &&
+ test_must_be_empty actual.out &&
+ sub2_head=$(git -C submodule2 rev-parse --short HEAD) &&
+ super_head=$(git rev-parse --short super) &&
+ super_sub2_only_head=$(git rev-parse --short super-sub2-only) &&
+ write_expected_sub2 $sub2_head $super_sub2_only_head &&
+
+ # write_expected_super cannot handle >1 branch. Since this is a
+ # one-off, construct expect.err.super manually.
+ cat >"$pwd/expect.err.super" <<-EOF &&
+ From $pwd/.
+ OLD_HEAD..$super_head super -> origin/super
+ OLD_HEAD..$super_sub2_only_head super-sub2-only -> origin/super-sub2-only
+ EOF
+ verify_fetch_result actual.err
'
test_expect_success "'--recurse-submodules=on-demand' stops when no new submodule commits are found in the superproject (and ignores config)" '
- add_upstream_commit &&
- head1=$(git rev-parse --short HEAD) &&
+ add_submodule_commits &&
echo a >> file &&
git add file &&
git commit -m "new file" &&
- head2=$(git rev-parse --short HEAD) &&
- echo "From $pwd/." > expect.err.file &&
- echo " $head1..$head2 super -> origin/super" >> expect.err.file &&
+ new_head=$(git rev-parse --short HEAD) &&
+ write_expected_super $new_head &&
+ rm expect.err.sub &&
+ rm expect.err.deep &&
(
cd downstream &&
git fetch --recurse-submodules=on-demand >../actual.out 2>../actual.err
) &&
test_must_be_empty actual.out &&
- test_cmp expect.err.file actual.err
+ verify_fetch_result actual.err
'
test_expect_success "'fetch.recurseSubmodules=on-demand' overrides global config" '
@@ -417,15 +598,13 @@ test_expect_success "'fetch.recurseSubmodules=on-demand' overrides global config
cd downstream &&
git fetch --recurse-submodules
) &&
- add_upstream_commit &&
+ add_submodule_commits &&
git config --global fetch.recurseSubmodules false &&
- head1=$(git rev-parse --short HEAD) &&
git add submodule &&
git commit -m "new submodule" &&
- head2=$(git rev-parse --short HEAD) &&
- echo "From $pwd/." > expect.err.2 &&
- echo " $head1..$head2 super -> origin/super" >>expect.err.2 &&
- head -3 expect.err >> expect.err.2 &&
+ new_head=$(git rev-parse --short HEAD) &&
+ write_expected_super $new_head &&
+ rm expect.err.deep &&
(
cd downstream &&
git config fetch.recurseSubmodules on-demand &&
@@ -437,7 +616,7 @@ test_expect_success "'fetch.recurseSubmodules=on-demand' overrides global config
git config --unset fetch.recurseSubmodules
) &&
test_must_be_empty actual.out &&
- test_cmp expect.err.2 actual.err
+ verify_fetch_result actual.err
'
test_expect_success "'submodule.<sub>.fetchRecurseSubmodules=on-demand' overrides fetch.recurseSubmodules" '
@@ -445,15 +624,13 @@ test_expect_success "'submodule.<sub>.fetchRecurseSubmodules=on-demand' override
cd downstream &&
git fetch --recurse-submodules
) &&
- add_upstream_commit &&
+ add_submodule_commits &&
git config fetch.recurseSubmodules false &&
- head1=$(git rev-parse --short HEAD) &&
git add submodule &&
git commit -m "new submodule" &&
- head2=$(git rev-parse --short HEAD) &&
- echo "From $pwd/." > expect.err.2 &&
- echo " $head1..$head2 super -> origin/super" >>expect.err.2 &&
- head -3 expect.err >> expect.err.2 &&
+ new_head=$(git rev-parse --short HEAD) &&
+ write_expected_super $new_head &&
+ rm expect.err.deep &&
(
cd downstream &&
git config submodule.submodule.fetchRecurseSubmodules on-demand &&
@@ -465,7 +642,7 @@ test_expect_success "'submodule.<sub>.fetchRecurseSubmodules=on-demand' override
git config --unset submodule.submodule.fetchRecurseSubmodules
) &&
test_must_be_empty actual.out &&
- test_cmp expect.err.2 actual.err
+ verify_fetch_result actual.err
'
test_expect_success "don't fetch submodule when newly recorded commits are already present" '
@@ -473,18 +650,19 @@ test_expect_success "don't fetch submodule when newly recorded commits are alrea
cd submodule &&
git checkout -q HEAD^^
) &&
- head1=$(git rev-parse --short HEAD) &&
git add submodule &&
git commit -m "submodule rewound" &&
- head2=$(git rev-parse --short HEAD) &&
- echo "From $pwd/." > expect.err &&
- echo " $head1..$head2 super -> origin/super" >> expect.err &&
+ new_head=$(git rev-parse --short HEAD) &&
+ write_expected_super $new_head &&
+ rm expect.err.sub &&
+ # This file does not exist, but rm -f for readability
+ rm -f expect.err.deep &&
(
cd downstream &&
git fetch >../actual.out 2>../actual.err
) &&
test_must_be_empty actual.out &&
- test_cmp expect.err actual.err &&
+ verify_fetch_result actual.err &&
(
cd submodule &&
git checkout -q sub
@@ -496,15 +674,13 @@ test_expect_success "'fetch.recurseSubmodules=on-demand' works also without .git
cd downstream &&
git fetch --recurse-submodules
) &&
- add_upstream_commit &&
- head1=$(git rev-parse --short HEAD) &&
+ add_submodule_commits &&
git add submodule &&
git rm .gitmodules &&
git commit -m "new submodule without .gitmodules" &&
- head2=$(git rev-parse --short HEAD) &&
- echo "From $pwd/." >expect.err.2 &&
- echo " $head1..$head2 super -> origin/super" >>expect.err.2 &&
- head -3 expect.err >>expect.err.2 &&
+ new_head=$(git rev-parse --short HEAD) &&
+ write_expected_super $new_head &&
+ rm expect.err.deep &&
(
cd downstream &&
rm .gitmodules &&
@@ -520,7 +696,7 @@ test_expect_success "'fetch.recurseSubmodules=on-demand' works also without .git
git reset --hard
) &&
test_must_be_empty actual.out &&
- test_cmp expect.err.2 actual.err &&
+ verify_fetch_result actual.err &&
git checkout HEAD^ -- .gitmodules &&
git add .gitmodules &&
git commit -m "new submodule restored .gitmodules"
@@ -842,4 +1018,111 @@ test_expect_success 'recursive fetch after deinit a submodule' '
test_cmp expect actual
'
+test_expect_success 'setup repo with upstreams that share a submodule name' '
+ mkdir same-name-1 &&
+ (
+ cd same-name-1 &&
+ git init -b main &&
+ test_commit --no-tag a
+ ) &&
+ git clone same-name-1 same-name-2 &&
+ # same-name-1 and same-name-2 both add a submodule with the
+ # name "submodule"
+ (
+ cd same-name-1 &&
+ mkdir submodule &&
+ git -C submodule init -b main &&
+ test_commit -C submodule --no-tag a1 &&
+ git submodule add "$pwd/same-name-1/submodule" &&
+ git add submodule &&
+ git commit -m "super-a1"
+ ) &&
+ (
+ cd same-name-2 &&
+ mkdir submodule &&
+ git -C submodule init -b main &&
+ test_commit -C submodule --no-tag a2 &&
+ git submodule add "$pwd/same-name-2/submodule" &&
+ git add submodule &&
+ git commit -m "super-a2"
+ ) &&
+ git clone same-name-1 -o same-name-1 same-name-downstream &&
+ (
+ cd same-name-downstream &&
+ git remote add same-name-2 ../same-name-2 &&
+ git fetch --all &&
+ # init downstream with same-name-1
+ git submodule update --init
+ )
+'
+
+test_expect_success 'fetch --recurse-submodules updates name-conflicted, populated submodule' '
+ test_when_finished "git -C same-name-downstream checkout main" &&
+ (
+ cd same-name-1 &&
+ test_commit -C submodule --no-tag b1 &&
+ git add submodule &&
+ git commit -m "super-b1"
+ ) &&
+ (
+ cd same-name-2 &&
+ test_commit -C submodule --no-tag b2 &&
+ git add submodule &&
+ git commit -m "super-b2"
+ ) &&
+ (
+ cd same-name-downstream &&
+ # even though the .gitmodules is correct, we cannot
+ # fetch from same-name-2
+ git checkout same-name-2/main &&
+ git fetch --recurse-submodules same-name-1 &&
+ test_must_fail git fetch --recurse-submodules same-name-2
+ ) &&
+ super_head1=$(git -C same-name-1 rev-parse HEAD) &&
+ git -C same-name-downstream cat-file -e $super_head1 &&
+
+ super_head2=$(git -C same-name-2 rev-parse HEAD) &&
+ git -C same-name-downstream cat-file -e $super_head2 &&
+
+ sub_head1=$(git -C same-name-1/submodule rev-parse HEAD) &&
+ git -C same-name-downstream/submodule cat-file -e $sub_head1 &&
+
+ sub_head2=$(git -C same-name-2/submodule rev-parse HEAD) &&
+ test_must_fail git -C same-name-downstream/submodule cat-file -e $sub_head2
+'
+
+test_expect_success 'fetch --recurse-submodules updates name-conflicted, unpopulated submodule' '
+ (
+ cd same-name-1 &&
+ test_commit -C submodule --no-tag c1 &&
+ git add submodule &&
+ git commit -m "super-c1"
+ ) &&
+ (
+ cd same-name-2 &&
+ test_commit -C submodule --no-tag c2 &&
+ git add submodule &&
+ git commit -m "super-c2"
+ ) &&
+ (
+ cd same-name-downstream &&
+ git checkout main &&
+ git rm .gitmodules &&
+ git rm submodule &&
+ git commit -m "no submodules" &&
+ git fetch --recurse-submodules same-name-1
+ ) &&
+ head1=$(git -C same-name-1/submodule rev-parse HEAD) &&
+ head2=$(git -C same-name-2/submodule rev-parse HEAD) &&
+ (
+ cd same-name-downstream/.git/modules/submodule &&
+ # The submodule has core.worktree pointing to the "git
+ # rm"-ed directory, overwrite the invalid value. See
+ # comment in get_fetch_task_from_changed() for more
+ # information.
+ git --work-tree=. cat-file -e $head1 &&
+ test_must_fail git --work-tree=. cat-file -e $head2
+ )
+'
+
test_done
diff --git a/t/t5534-push-signed.sh b/t/t5534-push-signed.sh
index 24d374a..7c0a148 100755
--- a/t/t5534-push-signed.sh
+++ b/t/t5534-push-signed.sh
@@ -35,8 +35,7 @@ test_expect_success setup '
test_expect_success 'unsigned push does not send push certificate' '
prepare_dst &&
- mkdir -p dst/.git/hooks &&
- write_script dst/.git/hooks/post-receive <<-\EOF &&
+ test_hook -C dst post-receive <<-\EOF &&
# discard the update list
cat >/dev/null
# record the push certificate
@@ -52,8 +51,7 @@ test_expect_success 'unsigned push does not send push certificate' '
test_expect_success 'talking with a receiver without push certificate support' '
prepare_dst &&
- mkdir -p dst/.git/hooks &&
- write_script dst/.git/hooks/post-receive <<-\EOF &&
+ test_hook -C dst post-receive <<-\EOF &&
# discard the update list
cat >/dev/null
# record the push certificate
@@ -69,22 +67,19 @@ test_expect_success 'talking with a receiver without push certificate support' '
test_expect_success 'push --signed fails with a receiver without push certificate support' '
prepare_dst &&
- mkdir -p dst/.git/hooks &&
test_must_fail git push --signed dst noop ff +noff 2>err &&
test_i18ngrep "the receiving end does not support" err
'
test_expect_success 'push --signed=1 is accepted' '
prepare_dst &&
- mkdir -p dst/.git/hooks &&
test_must_fail git push --signed=1 dst noop ff +noff 2>err &&
test_i18ngrep "the receiving end does not support" err
'
test_expect_success GPG 'no certificate for a signed push with no update' '
prepare_dst &&
- mkdir -p dst/.git/hooks &&
- write_script dst/.git/hooks/post-receive <<-\EOF &&
+ test_hook -C dst post-receive <<-\EOF &&
if test -n "${GIT_PUSH_CERT-}"
then
git cat-file blob $GIT_PUSH_CERT >../push-cert
@@ -96,9 +91,8 @@ test_expect_success GPG 'no certificate for a signed push with no update' '
test_expect_success GPG 'signed push sends push certificate' '
prepare_dst &&
- mkdir -p dst/.git/hooks &&
git -C dst config receive.certnonceseed sekrit &&
- write_script dst/.git/hooks/post-receive <<-\EOF &&
+ test_hook -C dst post-receive <<-\EOF &&
# discard the update list
cat >/dev/null
# record the push certificate
@@ -139,10 +133,9 @@ test_expect_success GPG 'signed push sends push certificate' '
test_expect_success GPGSSH 'ssh signed push sends push certificate' '
prepare_dst &&
- mkdir -p dst/.git/hooks &&
git -C dst config gpg.ssh.allowedSignersFile "${GPGSSH_ALLOWED_SIGNERS}" &&
git -C dst config receive.certnonceseed sekrit &&
- write_script dst/.git/hooks/post-receive <<-\EOF &&
+ test_hook -C dst post-receive <<-\EOF &&
# discard the update list
cat >/dev/null
# record the push certificate
@@ -223,9 +216,8 @@ test_expect_success GPG 'inconsistent push options in signed push not allowed' '
test_expect_success GPG 'fail without key and heed user.signingkey' '
prepare_dst &&
- mkdir -p dst/.git/hooks &&
git -C dst config receive.certnonceseed sekrit &&
- write_script dst/.git/hooks/post-receive <<-\EOF &&
+ test_hook -C dst post-receive <<-\EOF &&
# discard the update list
cat >/dev/null
# record the push certificate
@@ -273,9 +265,8 @@ test_expect_success GPG 'fail without key and heed user.signingkey' '
test_expect_success GPGSM 'fail without key and heed user.signingkey x509' '
test_config gpg.format x509 &&
prepare_dst &&
- mkdir -p dst/.git/hooks &&
git -C dst config receive.certnonceseed sekrit &&
- write_script dst/.git/hooks/post-receive <<-\EOF &&
+ test_hook -C dst post-receive <<-\EOF &&
# discard the update list
cat >/dev/null
# record the push certificate
@@ -326,10 +317,9 @@ test_expect_success GPGSM 'fail without key and heed user.signingkey x509' '
test_expect_success GPGSSH 'fail without key and heed user.signingkey ssh' '
test_config gpg.format ssh &&
prepare_dst &&
- mkdir -p dst/.git/hooks &&
git -C dst config gpg.ssh.allowedSignersFile "${GPGSSH_ALLOWED_SIGNERS}" &&
git -C dst config receive.certnonceseed sekrit &&
- write_script dst/.git/hooks/post-receive <<-\EOF &&
+ test_hook -C dst post-receive <<-\EOF &&
# discard the update list
cat >/dev/null
# record the push certificate
diff --git a/t/t5537-fetch-shallow.sh b/t/t5537-fetch-shallow.sh
index 11d5ea5..92948de 100755
--- a/t/t5537-fetch-shallow.sh
+++ b/t/t5537-fetch-shallow.sh
@@ -161,6 +161,15 @@ test_expect_success 'fetch --update-shallow' '
)
'
+test_expect_success 'fetch --update-shallow into a repo with submodules' '
+ git init a-submodule &&
+ test_commit -C a-submodule foo &&
+ git init repo-with-sub &&
+ git -C repo-with-sub submodule add ../a-submodule a-submodule &&
+ git -C repo-with-sub commit -m "added submodule" &&
+ git -C repo-with-sub fetch --update-shallow ../shallow/.git refs/heads/*:refs/remotes/shallow/*
+'
+
test_expect_success 'fetch --update-shallow (with fetch.writeCommitGraph)' '
(
cd shallow &&
diff --git a/t/t5540-http-push-webdav.sh b/t/t5540-http-push-webdav.sh
index b0dbacf..37db3de 100755
--- a/t/t5540-http-push-webdav.sh
+++ b/t/t5540-http-push-webdav.sh
@@ -42,7 +42,9 @@ test_expect_success 'setup remote repository' '
git clone --bare test_repo test_repo.git &&
cd test_repo.git &&
git --bare update-server-info &&
- mv hooks/post-update.sample hooks/post-update &&
+ test_hook --setup post-update <<-\EOF &&
+ exec git update-server-info
+ EOF
ORIG_HEAD=$(git rev-parse --verify HEAD) &&
cd - &&
mv test_repo.git "$HTTPD_DOCUMENT_ROOT_PATH"
diff --git a/t/t5541-http-push-smart.sh b/t/t5541-http-push-smart.sh
index 8ca50f8..2f09ff4 100755
--- a/t/t5541-http-push-smart.sh
+++ b/t/t5541-http-push-smart.sh
@@ -96,18 +96,18 @@ test_expect_success 'create and delete remote branch' '
test_must_fail git show-ref --verify refs/remotes/origin/dev
'
-cat >"$HTTPD_DOCUMENT_ROOT_PATH/test_repo.git/hooks/update" <<EOF
-#!/bin/sh
-exit 1
-EOF
-chmod a+x "$HTTPD_DOCUMENT_ROOT_PATH/test_repo.git/hooks/update"
+test_expect_success 'setup rejected update hook' '
+ test_hook --setup -C "$HTTPD_DOCUMENT_ROOT_PATH/test_repo.git" update <<-\EOF &&
+ exit 1
+ EOF
-cat >exp <<EOF
-remote: error: hook declined to update refs/heads/dev2
-To http://127.0.0.1:$LIB_HTTPD_PORT/smart/test_repo.git
- ! [remote rejected] dev2 -> dev2 (hook declined)
-error: failed to push some refs to 'http://127.0.0.1:$LIB_HTTPD_PORT/smart/test_repo.git'
-EOF
+ cat >exp <<-EOF
+ remote: error: hook declined to update refs/heads/dev2
+ To http://127.0.0.1:$LIB_HTTPD_PORT/smart/test_repo.git
+ ! [remote rejected] dev2 -> dev2 (hook declined)
+ error: failed to push some refs to '\''http://127.0.0.1:$LIB_HTTPD_PORT/smart/test_repo.git'\''
+ EOF
+'
test_expect_success 'rejected update prints status' '
cd "$ROOT_PATH"/test_repo_clone &&
@@ -419,10 +419,7 @@ test_expect_success CMDLINE_LIMIT 'push 2000 tags over http' '
'
test_expect_success GPG 'push with post-receive to inspect certificate' '
- (
- cd "$HTTPD_DOCUMENT_ROOT_PATH"/test_repo.git &&
- mkdir -p hooks &&
- write_script hooks/post-receive <<-\EOF &&
+ test_hook -C "$HTTPD_DOCUMENT_ROOT_PATH"/test_repo.git post-receive <<-\EOF &&
# discard the update list
cat >/dev/null
# record the push certificate
@@ -437,8 +434,9 @@ test_expect_success GPG 'push with post-receive to inspect certificate' '
NONCE_STATUS=${GIT_PUSH_CERT_NONCE_STATUS-nononcestatus}
NONCE=${GIT_PUSH_CERT_NONCE-nononce}
E_O_F
- EOF
-
+ EOF
+ (
+ cd "$HTTPD_DOCUMENT_ROOT_PATH"/test_repo.git &&
git config receive.certnonceseed sekrit &&
git config receive.certnonceslop 30
) &&
diff --git a/t/t5543-atomic-push.sh b/t/t5543-atomic-push.sh
index bfee461..7043112 100755
--- a/t/t5543-atomic-push.sh
+++ b/t/t5543-atomic-push.sh
@@ -162,16 +162,10 @@ test_expect_success 'atomic push obeys update hook preventing a branch to be pus
test_commit two &&
git push --mirror up
) &&
- (
- cd upstream &&
- HOOKDIR="$(git rev-parse --git-dir)/hooks" &&
- HOOK="$HOOKDIR/update" &&
- mkdir -p "$HOOKDIR" &&
- write_script "$HOOK" <<-\EOF
- # only allow update to main from now on
- test "$1" = "refs/heads/main"
- EOF
- ) &&
+ test_hook -C upstream update <<-\EOF &&
+ # only allow update to main from now on
+ test "$1" = "refs/heads/main"
+ EOF
(
cd workbench &&
git checkout main &&
diff --git a/t/t5547-push-quarantine.sh b/t/t5547-push-quarantine.sh
index faaa51c..1876fb3 100755
--- a/t/t5547-push-quarantine.sh
+++ b/t/t5547-push-quarantine.sh
@@ -5,7 +5,7 @@ test_description='check quarantine of objects during push'
test_expect_success 'create picky dest repo' '
git init --bare dest.git &&
- write_script dest.git/hooks/pre-receive <<-\EOF
+ test_hook --setup -C dest.git pre-receive <<-\EOF
while read old new ref; do
test "$(git log -1 --format=%s $new)" = reject && exit 1
done
@@ -60,7 +60,7 @@ test_expect_success 'push to repo path with path separator (colon)' '
test_expect_success 'updating a ref from quarantine is forbidden' '
git init --bare update.git &&
- write_script update.git/hooks/pre-receive <<-\EOF &&
+ test_hook -C update.git pre-receive <<-\EOF &&
read old new refname
git update-ref refs/heads/unrelated $new
exit 1
diff --git a/t/t5548-push-porcelain.sh b/t/t5548-push-porcelain.sh
index f11ff57..6282728 100755
--- a/t/t5548-push-porcelain.sh
+++ b/t/t5548-push-porcelain.sh
@@ -168,7 +168,7 @@ run_git_push_porcelain_output_test() {
'
test_expect_success "prepare pre-receive hook ($PROTOCOL)" '
- write_script "$upstream/hooks/pre-receive" <<-EOF
+ test_hook --setup -C "$upstream" pre-receive <<-EOF
exit 1
EOF
'
diff --git a/t/t5550-http-fetch-dumb.sh b/t/t5550-http-fetch-dumb.sh
index 2592039..f0d9cd5 100755
--- a/t/t5550-http-fetch-dumb.sh
+++ b/t/t5550-http-fetch-dumb.sh
@@ -25,16 +25,17 @@ test_expect_success 'setup repository' '
git commit -m two
'
+setup_post_update_server_info_hook () {
+ test_hook --setup -C "$1" post-update <<-\EOF &&
+ exec git update-server-info
+ EOF
+ git -C "$1" update-server-info
+}
+
test_expect_success 'create http-accessible bare repository with loose objects' '
cp -R .git "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
- (cd "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
- git config core.bare true &&
- mkdir -p hooks &&
- write_script "hooks/post-update" <<-\EOF &&
- exec git update-server-info
- EOF
- hooks/post-update
- ) &&
+ git -C "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" config core.bare true &&
+ setup_post_update_server_info_hook "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
git remote add public "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
git push public main:main
'
@@ -62,13 +63,7 @@ test_expect_success 'create password-protected repository' '
test_expect_success 'create empty remote repository' '
git init --bare "$HTTPD_DOCUMENT_ROOT_PATH/empty.git" &&
- (cd "$HTTPD_DOCUMENT_ROOT_PATH/empty.git" &&
- mkdir -p hooks &&
- write_script "hooks/post-update" <<-\EOF &&
- exec git update-server-info
- EOF
- hooks/post-update
- )
+ setup_post_update_server_info_hook "$HTTPD_DOCUMENT_ROOT_PATH/empty.git"
'
test_expect_success 'empty dumb HTTP repository has default hash algorithm' '
diff --git a/t/t5571-pre-push-hook.sh b/t/t5571-pre-push-hook.sh
index 660f876..a11b20e 100755
--- a/t/t5571-pre-push-hook.sh
+++ b/t/t5571-pre-push-hook.sh
@@ -6,57 +6,66 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
-# Setup hook that always succeeds
-HOOKDIR="$(git rev-parse --git-dir)/hooks"
-HOOK="$HOOKDIR/pre-push"
-mkdir -p "$HOOKDIR"
-write_script "$HOOK" <<EOF
-cat >/dev/null
-exit 0
-EOF
-
test_expect_success 'setup' '
+ test_hook pre-push <<-\EOF &&
+ cat >actual
+ EOF
+
git config push.default upstream &&
git init --bare repo1 &&
git remote add parent1 repo1 &&
test_commit one &&
- git push parent1 HEAD:foreign
+ cat >expect <<-EOF &&
+ HEAD $(git rev-parse HEAD) refs/heads/foreign $(test_oid zero)
+ EOF
+
+ test_when_finished "rm actual" &&
+ git push parent1 HEAD:foreign &&
+ test_cmp expect actual
'
-write_script "$HOOK" <<EOF
-cat >/dev/null
-exit 1
-EOF
COMMIT1="$(git rev-parse HEAD)"
export COMMIT1
test_expect_success 'push with failing hook' '
+ test_hook pre-push <<-\EOF &&
+ cat >actual &&
+ exit 1
+ EOF
+
test_commit two &&
- test_must_fail git push parent1 HEAD
+ cat >expect <<-EOF &&
+ HEAD $(git rev-parse HEAD) refs/heads/main $(test_oid zero)
+ EOF
+
+ test_when_finished "rm actual" &&
+ test_must_fail git push parent1 HEAD &&
+ test_cmp expect actual
'
test_expect_success '--no-verify bypasses hook' '
- git push --no-verify parent1 HEAD
+ git push --no-verify parent1 HEAD &&
+ test_path_is_missing actual
'
COMMIT2="$(git rev-parse HEAD)"
export COMMIT2
-write_script "$HOOK" <<'EOF'
-echo "$1" >actual
-echo "$2" >>actual
-cat >>actual
-EOF
-
-cat >expected <<EOF
-parent1
-repo1
-refs/heads/main $COMMIT2 refs/heads/foreign $COMMIT1
-EOF
-
test_expect_success 'push with hook' '
+ test_hook --setup pre-push <<-\EOF &&
+ echo "$1" >actual
+ echo "$2" >>actual
+ cat >>actual
+ EOF
+
+ cat >expect <<-EOF &&
+ parent1
+ repo1
+ refs/heads/main $COMMIT2 refs/heads/foreign $COMMIT1
+ EOF
+
git push parent1 main:foreign &&
- diff expected actual
+ test_cmp expect actual
'
test_expect_success 'add a branch' '
@@ -67,49 +76,48 @@ test_expect_success 'add a branch' '
COMMIT3="$(git rev-parse HEAD)"
export COMMIT3
-cat >expected <<EOF
-parent1
-repo1
-refs/heads/other $COMMIT3 refs/heads/foreign $COMMIT2
-EOF
-
test_expect_success 'push to default' '
+ cat >expect <<-EOF &&
+ parent1
+ repo1
+ refs/heads/other $COMMIT3 refs/heads/foreign $COMMIT2
+ EOF
git push &&
- diff expected actual
+ test_cmp expect actual
'
-cat >expected <<EOF
-parent1
-repo1
-refs/tags/one $COMMIT1 refs/tags/tag1 $ZERO_OID
-HEAD~ $COMMIT2 refs/heads/prev $ZERO_OID
-EOF
-
test_expect_success 'push non-branches' '
+ cat >expect <<-EOF &&
+ parent1
+ repo1
+ refs/tags/one $COMMIT1 refs/tags/tag1 $ZERO_OID
+ HEAD~ $COMMIT2 refs/heads/prev $ZERO_OID
+ EOF
+
git push parent1 one:tag1 HEAD~:refs/heads/prev &&
- diff expected actual
+ test_cmp expect actual
'
-cat >expected <<EOF
-parent1
-repo1
-(delete) $ZERO_OID refs/heads/prev $COMMIT2
-EOF
-
test_expect_success 'push delete' '
+ cat >expect <<-EOF &&
+ parent1
+ repo1
+ (delete) $ZERO_OID refs/heads/prev $COMMIT2
+ EOF
+
git push parent1 :prev &&
- diff expected actual
+ test_cmp expect actual
'
-cat >expected <<EOF
-repo1
-repo1
-HEAD $COMMIT3 refs/heads/other $ZERO_OID
-EOF
-
test_expect_success 'push to URL' '
+ cat >expect <<-EOF &&
+ repo1
+ repo1
+ HEAD $COMMIT3 refs/heads/other $ZERO_OID
+ EOF
+
git push repo1 HEAD &&
- diff expected actual
+ test_cmp expect actual
'
test_expect_success 'set up many-ref tests' '
@@ -124,7 +132,9 @@ test_expect_success 'set up many-ref tests' '
'
test_expect_success 'sigpipe does not cause pre-push hook failure' '
- echo "exit 0" | write_script "$HOOK" &&
+ test_hook --clobber pre-push <<-\EOF &&
+ exit 0
+ EOF
git push parent1 "refs/heads/b/*:refs/heads/b/*"
'
diff --git a/t/t5601-clone.sh b/t/t5601-clone.sh
index 83c24fc..4a61f2c 100755
--- a/t/t5601-clone.sh
+++ b/t/t5601-clone.sh
@@ -79,12 +79,10 @@ test_expect_success 'clone from hooks' '
cd .. &&
git init r1 &&
cd r1 &&
- cat >.git/hooks/pre-commit <<-\EOF &&
- #!/bin/sh
+ test_hook pre-commit <<-\EOF &&
git clone ../r0 ../r2
exit 1
EOF
- chmod u+x .git/hooks/pre-commit &&
: >file &&
git add file &&
test_must_fail git commit -m invoke-hook &&
diff --git a/t/t5616-partial-clone.sh b/t/t5616-partial-clone.sh
index 34469b6..4a3778d 100755
--- a/t/t5616-partial-clone.sh
+++ b/t/t5616-partial-clone.sh
@@ -166,6 +166,85 @@ test_expect_success 'manual prefetch of missing objects' '
test_line_count = 0 observed.oids
'
+# create new commits in "src" repo to establish a history on file.4.txt
+# and push to "srv.bare".
+test_expect_success 'push new commits to server for file.4.txt' '
+ for x in a b c d e f
+ do
+ echo "Mod file.4.txt $x" >src/file.4.txt &&
+ if list_contains "a,b" "$x"; then
+ printf "%10000s" X >>src/file.4.txt
+ fi &&
+ if list_contains "c,d" "$x"; then
+ printf "%20000s" X >>src/file.4.txt
+ fi &&
+ git -C src add file.4.txt &&
+ git -C src commit -m "mod $x" || return 1
+ done &&
+ git -C src push -u srv main
+'
+
+# Do partial fetch to fetch smaller files; then verify that without --refetch
+# applying a new filter does not refetch missing large objects. Then use
+# --refetch to apply the new filter on existing commits. Test it under both
+# protocol v2 & v0.
+test_expect_success 'apply a different filter using --refetch' '
+ git -C pc1 fetch --filter=blob:limit=999 origin &&
+ git -C pc1 rev-list --quiet --objects --missing=print \
+ main..origin/main >observed &&
+ test_line_count = 4 observed &&
+
+ git -C pc1 fetch --filter=blob:limit=19999 --refetch origin &&
+ git -C pc1 rev-list --quiet --objects --missing=print \
+ main..origin/main >observed &&
+ test_line_count = 2 observed &&
+
+ git -c protocol.version=0 -C pc1 fetch --filter=blob:limit=29999 \
+ --refetch origin &&
+ git -C pc1 rev-list --quiet --objects --missing=print \
+ main..origin/main >observed &&
+ test_line_count = 0 observed
+'
+
+test_expect_success 'fetch --refetch works with a shallow clone' '
+ git clone --no-checkout --depth=1 --filter=blob:none "file://$(pwd)/srv.bare" pc1s &&
+ git -C pc1s rev-list --objects --missing=print HEAD >observed &&
+ test_line_count = 6 observed &&
+
+ GIT_TRACE=1 git -C pc1s fetch --filter=blob:limit=999 --refetch origin &&
+ git -C pc1s rev-list --objects --missing=print HEAD >observed &&
+ test_line_count = 6 observed
+'
+
+test_expect_success 'fetch --refetch triggers repacking' '
+ GIT_TRACE2_CONFIG_PARAMS=gc.autoPackLimit,maintenance.incremental-repack.auto &&
+ export GIT_TRACE2_CONFIG_PARAMS &&
+
+ GIT_TRACE2_EVENT="$PWD/trace1.event" \
+ git -C pc1 fetch --refetch origin &&
+ test_subcommand git maintenance run --auto --no-quiet <trace1.event &&
+ grep \"param\":\"gc.autopacklimit\",\"value\":\"1\" trace1.event &&
+ grep \"param\":\"maintenance.incremental-repack.auto\",\"value\":\"-1\" trace1.event &&
+
+ GIT_TRACE2_EVENT="$PWD/trace2.event" \
+ git -c protocol.version=0 \
+ -c gc.autoPackLimit=0 \
+ -c maintenance.incremental-repack.auto=1234 \
+ -C pc1 fetch --refetch origin &&
+ test_subcommand git maintenance run --auto --no-quiet <trace2.event &&
+ grep \"param\":\"gc.autopacklimit\",\"value\":\"0\" trace2.event &&
+ grep \"param\":\"maintenance.incremental-repack.auto\",\"value\":\"-1\" trace2.event &&
+
+ GIT_TRACE2_EVENT="$PWD/trace3.event" \
+ git -c protocol.version=0 \
+ -c gc.autoPackLimit=1234 \
+ -c maintenance.incremental-repack.auto=0 \
+ -C pc1 fetch --refetch origin &&
+ test_subcommand git maintenance run --auto --no-quiet <trace3.event &&
+ grep \"param\":\"gc.autopacklimit\",\"value\":\"1\" trace3.event &&
+ grep \"param\":\"maintenance.incremental-repack.auto\",\"value\":\"0\" trace3.event
+'
+
test_expect_success 'partial clone with transfer.fsckobjects=1 works with submodules' '
test_create_repo submodule &&
test_commit -C submodule mycommit &&
@@ -225,7 +304,7 @@ test_expect_success 'use fsck before and after manually fetching a missing subtr
# Auto-fetch all remaining trees and blobs with --missing=error
git -C dst rev-list --missing=error --objects main >fetched_objects &&
- test_line_count = 70 fetched_objects &&
+ test_line_count = 88 fetched_objects &&
awk -f print_1.awk fetched_objects |
xargs -n1 git -C dst cat-file -t >fetched_types &&
diff --git a/t/t5617-clone-submodules-remote.sh b/t/t5617-clone-submodules-remote.sh
index e2dbb4e..ca8f800 100755
--- a/t/t5617-clone-submodules-remote.sh
+++ b/t/t5617-clone-submodules-remote.sh
@@ -28,6 +28,13 @@ test_expect_success 'setup' '
)
'
+# bare clone giving "srv.bare" for use as our server.
+test_expect_success 'setup bare clone for server' '
+ git clone --bare "file://$(pwd)/." srv.bare &&
+ git -C srv.bare config --local uploadpack.allowfilter 1 &&
+ git -C srv.bare config --local uploadpack.allowanysha1inwant 1
+'
+
test_expect_success 'clone with --no-remote-submodules' '
test_when_finished "rm -rf super_clone" &&
git clone --recurse-submodules --no-remote-submodules "file://$pwd/." super_clone &&
@@ -65,4 +72,38 @@ test_expect_success 'clone with --single-branch' '
)
'
+# do basic partial clone from "srv.bare"
+# confirm partial clone was registered in the local config for super and sub.
+test_expect_success 'clone with --filter' '
+ git clone --recurse-submodules \
+ --filter blob:none --also-filter-submodules \
+ "file://$pwd/srv.bare" super_clone &&
+ test_cmp_config -C super_clone true remote.origin.promisor &&
+ test_cmp_config -C super_clone blob:none remote.origin.partialclonefilter &&
+ test_cmp_config -C super_clone/sub true remote.origin.promisor &&
+ test_cmp_config -C super_clone/sub blob:none remote.origin.partialclonefilter
+'
+
+# check that clone.filterSubmodules works (--also-filter-submodules can be
+# omitted)
+test_expect_success 'filters applied with clone.filterSubmodules' '
+ test_config_global clone.filterSubmodules true &&
+ git clone --recurse-submodules --filter blob:none \
+ "file://$pwd/srv.bare" super_clone2 &&
+ test_cmp_config -C super_clone2 true remote.origin.promisor &&
+ test_cmp_config -C super_clone2 blob:none remote.origin.partialclonefilter &&
+ test_cmp_config -C super_clone2/sub true remote.origin.promisor &&
+ test_cmp_config -C super_clone2/sub blob:none remote.origin.partialclonefilter
+'
+
+test_expect_success '--no-also-filter-submodules overrides clone.filterSubmodules=true' '
+ test_config_global clone.filterSubmodules true &&
+ git clone --recurse-submodules --filter blob:none \
+ --no-also-filter-submodules \
+ "file://$pwd/srv.bare" super_clone3 &&
+ test_cmp_config -C super_clone3 true remote.origin.promisor &&
+ test_cmp_config -C super_clone3 blob:none remote.origin.partialclonefilter &&
+ test_cmp_config -C super_clone3/sub false --default false remote.origin.promisor
+'
+
test_done
diff --git a/t/t5700-protocol-v1.sh b/t/t5700-protocol-v1.sh
index 468bd3e..6c8d4c6 100755
--- a/t/t5700-protocol-v1.sh
+++ b/t/t5700-protocol-v1.sh
@@ -149,6 +149,21 @@ test_expect_success 'push with file:// using protocol v1' '
grep "push< version 1" log
'
+test_expect_success 'cloning branchless tagless but not refless remote' '
+ rm -rf server client &&
+
+ git -c init.defaultbranch=main init server &&
+ echo foo >server/foo.txt &&
+ git -C server add foo.txt &&
+ git -C server commit -m "message" &&
+ git -C server update-ref refs/notbranch/alsonottag HEAD &&
+ git -C server checkout --detach &&
+ git -C server branch -D main &&
+ git -C server symbolic-ref HEAD refs/heads/nonexistentbranch &&
+
+ git -c protocol.version=1 clone "file://$(pwd)/server" client
+'
+
# Test protocol v1 with 'ssh://' transport
#
test_expect_success 'setup ssh wrapper' '
diff --git a/t/t5702-protocol-v2.sh b/t/t5702-protocol-v2.sh
index 710f33e..00ce9ae 100755
--- a/t/t5702-protocol-v2.sh
+++ b/t/t5702-protocol-v2.sh
@@ -619,7 +619,7 @@ test_expect_success 'usage: --negotiate-only without --negotiation-tip' '
setup_negotiate_only "$SERVER" "$URI" &&
cat >err.expect <<-\EOF &&
- fatal: --negotiate-only needs one or more --negotiate-tip=*
+ fatal: --negotiate-only needs one or more --negotiation-tip=*
EOF
test_must_fail git -c protocol.version=2 -C client fetch \
@@ -628,6 +628,18 @@ test_expect_success 'usage: --negotiate-only without --negotiation-tip' '
test_cmp err.expect err.actual
'
+test_expect_success 'usage: --negotiate-only with --recurse-submodules' '
+ cat >err.expect <<-\EOF &&
+ fatal: options '\''--negotiate-only'\'' and '\''--recurse-submodules'\'' cannot be used together
+ EOF
+
+ test_must_fail git -c protocol.version=2 -C client fetch \
+ --negotiate-only \
+ --recurse-submodules \
+ origin 2>err.actual &&
+ test_cmp err.expect err.actual
+'
+
test_expect_success 'file:// --negotiate-only' '
SERVER="server" &&
URI="file://$(pwd)/server" &&
diff --git a/t/t6005-rev-list-count.sh b/t/t6005-rev-list-count.sh
index 86542c6..e960049 100755
--- a/t/t6005-rev-list-count.sh
+++ b/t/t6005-rev-list-count.sh
@@ -2,7 +2,6 @@
test_description='git rev-list --max-count and --skip test'
-TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
@@ -14,39 +13,39 @@ test_expect_success 'setup' '
'
test_expect_success 'no options' '
- test $(git rev-list HEAD | wc -l) = 5
+ test_stdout_line_count = 5 git rev-list HEAD
'
test_expect_success '--max-count' '
- test $(git rev-list HEAD --max-count=0 | wc -l) = 0 &&
- test $(git rev-list HEAD --max-count=3 | wc -l) = 3 &&
- test $(git rev-list HEAD --max-count=5 | wc -l) = 5 &&
- test $(git rev-list HEAD --max-count=10 | wc -l) = 5
+ test_stdout_line_count = 0 git rev-list HEAD --max-count=0 &&
+ test_stdout_line_count = 3 git rev-list HEAD --max-count=3 &&
+ test_stdout_line_count = 5 git rev-list HEAD --max-count=5 &&
+ test_stdout_line_count = 5 git rev-list HEAD --max-count=10
'
test_expect_success '--max-count all forms' '
- test $(git rev-list HEAD --max-count=1 | wc -l) = 1 &&
- test $(git rev-list HEAD -1 | wc -l) = 1 &&
- test $(git rev-list HEAD -n1 | wc -l) = 1 &&
- test $(git rev-list HEAD -n 1 | wc -l) = 1
+ test_stdout_line_count = 1 git rev-list HEAD --max-count=1 &&
+ test_stdout_line_count = 1 git rev-list HEAD -1 &&
+ test_stdout_line_count = 1 git rev-list HEAD -n1 &&
+ test_stdout_line_count = 1 git rev-list HEAD -n 1
'
test_expect_success '--skip' '
- test $(git rev-list HEAD --skip=0 | wc -l) = 5 &&
- test $(git rev-list HEAD --skip=3 | wc -l) = 2 &&
- test $(git rev-list HEAD --skip=5 | wc -l) = 0 &&
- test $(git rev-list HEAD --skip=10 | wc -l) = 0
+ test_stdout_line_count = 5 git rev-list HEAD --skip=0 &&
+ test_stdout_line_count = 2 git rev-list HEAD --skip=3 &&
+ test_stdout_line_count = 0 git rev-list HEAD --skip=5 &&
+ test_stdout_line_count = 0 git rev-list HEAD --skip=10
'
test_expect_success '--skip --max-count' '
- test $(git rev-list HEAD --skip=0 --max-count=0 | wc -l) = 0 &&
- test $(git rev-list HEAD --skip=0 --max-count=10 | wc -l) = 5 &&
- test $(git rev-list HEAD --skip=3 --max-count=0 | wc -l) = 0 &&
- test $(git rev-list HEAD --skip=3 --max-count=1 | wc -l) = 1 &&
- test $(git rev-list HEAD --skip=3 --max-count=2 | wc -l) = 2 &&
- test $(git rev-list HEAD --skip=3 --max-count=10 | wc -l) = 2 &&
- test $(git rev-list HEAD --skip=5 --max-count=10 | wc -l) = 0 &&
- test $(git rev-list HEAD --skip=10 --max-count=10 | wc -l) = 0
+ test_stdout_line_count = 0 git rev-list HEAD --skip=0 --max-count=0 &&
+ test_stdout_line_count = 5 git rev-list HEAD --skip=0 --max-count=10 &&
+ test_stdout_line_count = 0 git rev-list HEAD --skip=3 --max-count=0 &&
+ test_stdout_line_count = 1 git rev-list HEAD --skip=3 --max-count=1 &&
+ test_stdout_line_count = 2 git rev-list HEAD --skip=3 --max-count=2 &&
+ test_stdout_line_count = 2 git rev-list HEAD --skip=3 --max-count=10 &&
+ test_stdout_line_count = 0 git rev-list HEAD --skip=5 --max-count=10 &&
+ test_stdout_line_count = 0 git rev-list HEAD --skip=10 --max-count=10
'
test_done
diff --git a/t/t6007-rev-list-cherry-pick-file.sh b/t/t6007-rev-list-cherry-pick-file.sh
index aebe4b6..6f3e543 100755
--- a/t/t6007-rev-list-cherry-pick-file.sh
+++ b/t/t6007-rev-list-cherry-pick-file.sh
@@ -58,7 +58,7 @@ EOF
test_expect_success '--left-right' '
git rev-list --left-right B...C > actual &&
- git name-rev --stdin --name-only --refs="*tags/*" \
+ git name-rev --annotate-stdin --name-only --refs="*tags/*" \
< actual > actual.named &&
test_cmp expect actual.named
'
@@ -78,14 +78,14 @@ EOF
test_expect_success '--cherry-pick bar does not come up empty' '
git rev-list --left-right --cherry-pick B...C -- bar > actual &&
- git name-rev --stdin --name-only --refs="*tags/*" \
+ git name-rev --annotate-stdin --name-only --refs="*tags/*" \
< actual > actual.named &&
test_cmp expect actual.named
'
test_expect_success 'bar does not come up empty' '
git rev-list --left-right B...C -- bar > actual &&
- git name-rev --stdin --name-only --refs="*tags/*" \
+ git name-rev --annotate-stdin --name-only --refs="*tags/*" \
< actual > actual.named &&
test_cmp expect actual.named
'
@@ -97,14 +97,14 @@ EOF
test_expect_success '--cherry-pick bar does not come up empty (II)' '
git rev-list --left-right --cherry-pick F...E -- bar > actual &&
- git name-rev --stdin --name-only --refs="*tags/*" \
+ git name-rev --annotate-stdin --name-only --refs="*tags/*" \
< actual > actual.named &&
test_cmp expect actual.named
'
test_expect_success 'name-rev multiple --refs combine inclusive' '
git rev-list --left-right --cherry-pick F...E -- bar >actual &&
- git name-rev --stdin --name-only --refs="*tags/F" --refs="*tags/E" \
+ git name-rev --annotate-stdin --name-only --refs="*tags/F" --refs="*tags/E" \
<actual >actual.named &&
test_cmp expect actual.named
'
@@ -116,7 +116,7 @@ EOF
test_expect_success 'name-rev --refs excludes non-matched patterns' '
git rev-list --left-right --right-only --cherry-pick F...E -- bar >>expect &&
git rev-list --left-right --cherry-pick F...E -- bar >actual &&
- git name-rev --stdin --name-only --refs="*tags/F" \
+ git name-rev --annotate-stdin --name-only --refs="*tags/F" \
<actual >actual.named &&
test_cmp expect actual.named
'
@@ -128,14 +128,14 @@ EOF
test_expect_success 'name-rev --exclude excludes matched patterns' '
git rev-list --left-right --right-only --cherry-pick F...E -- bar >>expect &&
git rev-list --left-right --cherry-pick F...E -- bar >actual &&
- git name-rev --stdin --name-only --refs="*tags/*" --exclude="*E" \
+ git name-rev --annotate-stdin --name-only --refs="*tags/*" --exclude="*E" \
<actual >actual.named &&
test_cmp expect actual.named
'
test_expect_success 'name-rev --no-refs clears the refs list' '
git rev-list --left-right --cherry-pick F...E -- bar >expect &&
- git name-rev --stdin --name-only --refs="*tags/F" --refs="*tags/E" --no-refs --refs="*tags/G" \
+ git name-rev --annotate-stdin --name-only --refs="*tags/F" --refs="*tags/E" --no-refs --refs="*tags/G" \
<expect >actual &&
test_cmp expect actual
'
@@ -149,7 +149,7 @@ EOF
test_expect_success '--cherry-mark' '
git rev-list --cherry-mark F...E -- bar > actual &&
- git name-rev --stdin --name-only --refs="*tags/*" \
+ git name-rev --annotate-stdin --name-only --refs="*tags/*" \
< actual > actual.named &&
test_cmp expect actual.named
'
@@ -163,7 +163,7 @@ EOF
test_expect_success '--cherry-mark --left-right' '
git rev-list --cherry-mark --left-right F...E -- bar > actual &&
- git name-rev --stdin --name-only --refs="*tags/*" \
+ git name-rev --annotate-stdin --name-only --refs="*tags/*" \
< actual > actual.named &&
test_cmp expect actual.named
'
@@ -174,14 +174,14 @@ EOF
test_expect_success '--cherry-pick --right-only' '
git rev-list --cherry-pick --right-only F...E -- bar > actual &&
- git name-rev --stdin --name-only --refs="*tags/*" \
+ git name-rev --annotate-stdin --name-only --refs="*tags/*" \
< actual > actual.named &&
test_cmp expect actual.named
'
test_expect_success '--cherry-pick --left-only' '
git rev-list --cherry-pick --left-only E...F -- bar > actual &&
- git name-rev --stdin --name-only --refs="*tags/*" \
+ git name-rev --annotate-stdin --name-only --refs="*tags/*" \
< actual > actual.named &&
test_cmp expect actual.named
'
@@ -193,7 +193,7 @@ EOF
test_expect_success '--cherry' '
git rev-list --cherry F...E -- bar > actual &&
- git name-rev --stdin --name-only --refs="*tags/*" \
+ git name-rev --annotate-stdin --name-only --refs="*tags/*" \
< actual > actual.named &&
test_cmp expect actual.named
'
diff --git a/t/t6012-rev-list-simplify.sh b/t/t6012-rev-list-simplify.sh
index 4f7fa8b..de1e87f 100755
--- a/t/t6012-rev-list-simplify.sh
+++ b/t/t6012-rev-list-simplify.sh
@@ -12,17 +12,18 @@ note () {
}
unnote () {
- git name-rev --tags --stdin | sed -e "s|$OID_REGEX (tags/\([^)]*\)) |\1 |g"
+ test_when_finished "rm -f tmp" &&
+ git name-rev --tags --annotate-stdin >tmp &&
+ sed -e "s|$OID_REGEX (tags/\([^)]*\)) |\1 |g" <tmp
}
#
-# Create a test repo with interesting commit graph:
+# Create a test repo with an interesting commit graph:
#
-# A--B----------G--H--I--K--L
-# \ \ / /
-# \ \ / /
-# C------E---F J
-# \_/
+# A-----B-----G--H--I--K--L
+# \ \ / /
+# \ \ / /
+# C--D--E--F J
#
# The commits are laid out from left-to-right starting with
# the root commit A and terminating at the tip commit L.
@@ -112,8 +113,8 @@ check_outcome () {
shift &&
param="$*" &&
test_expect_$outcome "log $param" '
- git log --pretty="$FMT" --parents $param |
- unnote >actual &&
+ git log --pretty="$FMT" --parents $param >out &&
+ unnote >actual <out &&
sed -e "s/^.* \([^ ]*\) .*/\1/" >check <actual &&
test_cmp expect check
'
@@ -142,11 +143,18 @@ check_result 'I B A' --author-date-order -- file
check_result 'H' --first-parent -- another-file
check_result 'H' --first-parent --topo-order -- another-file
+check_result 'L K I H G B A' --first-parent L
+check_result 'F E D C' --exclude-first-parent-only F ^L
+check_result '' F ^L
+check_result 'L K I H G J' L ^F
+check_result 'L K I H G B J' --exclude-first-parent-only L ^F
+check_result 'L K I H G B' --exclude-first-parent-only --first-parent L ^F
+
check_result 'E C B A' --full-history E -- lost
test_expect_success 'full history simplification without parent' '
printf "%s\n" E C B A >expect &&
- git log --pretty="$FMT" --full-history E -- lost |
- unnote >actual &&
+ git log --pretty="$FMT" --full-history E -- lost >out &&
+ unnote >actual <out &&
sed -e "s/^.* \([^ ]*\) .*/\1/" >check <actual &&
test_cmp expect check
'
diff --git a/t/t6020-bundle-misc.sh b/t/t6020-bundle-misc.sh
index b13e8a5..8332051 100755
--- a/t/t6020-bundle-misc.sh
+++ b/t/t6020-bundle-misc.sh
@@ -122,6 +122,8 @@ format_and_save_expect () {
sed -e 's/Z$//' >expect
}
+HASH_MESSAGE="The bundle uses this hash algorithm: $GIT_DEFAULT_HASH"
+
# (C) (D, pull/1/head, topic/1)
# o --- o
# / \ (L)
@@ -194,11 +196,12 @@ test_expect_success 'create bundle from special rev: main^!' '
git bundle verify special-rev.bdl |
make_user_friendly_and_stable_output >actual &&
- format_and_save_expect <<-\EOF &&
+ format_and_save_expect <<-EOF &&
The bundle contains this ref:
<COMMIT-P> refs/heads/main
The bundle requires this ref:
<COMMIT-O> Z
+ $HASH_MESSAGE
EOF
test_cmp expect actual &&
@@ -215,12 +218,13 @@ test_expect_success 'create bundle with --max-count option' '
git bundle verify max-count.bdl |
make_user_friendly_and_stable_output >actual &&
- format_and_save_expect <<-\EOF &&
+ format_and_save_expect <<-EOF &&
The bundle contains these 2 refs:
<COMMIT-P> refs/heads/main
<TAG-1> refs/tags/v1
The bundle requires this ref:
<COMMIT-O> Z
+ $HASH_MESSAGE
EOF
test_cmp expect actual &&
@@ -240,7 +244,7 @@ test_expect_success 'create bundle with --since option' '
git bundle verify since.bdl |
make_user_friendly_and_stable_output >actual &&
- format_and_save_expect <<-\EOF &&
+ format_and_save_expect <<-EOF &&
The bundle contains these 5 refs:
<COMMIT-P> refs/heads/main
<COMMIT-N> refs/heads/release
@@ -250,6 +254,7 @@ test_expect_success 'create bundle with --since option' '
The bundle requires these 2 refs:
<COMMIT-M> Z
<COMMIT-K> Z
+ $HASH_MESSAGE
EOF
test_cmp expect actual &&
@@ -267,11 +272,12 @@ test_expect_success 'create bundle 1 - no prerequisites' '
EOF
git bundle create stdin-1.bdl --stdin <input &&
- cat >expect <<-\EOF &&
+ format_and_save_expect <<-EOF &&
The bundle contains these 2 refs:
<COMMIT-D> refs/heads/topic/1
<COMMIT-H> refs/heads/topic/2
The bundle records a complete history.
+ $HASH_MESSAGE
EOF
# verify bundle, which has no prerequisites
@@ -308,13 +314,14 @@ test_expect_success 'create bundle 2 - has prerequisites' '
--stdin \
release <input &&
- format_and_save_expect <<-\EOF &&
+ format_and_save_expect <<-EOF &&
The bundle contains this ref:
<COMMIT-N> refs/heads/release
The bundle requires these 3 refs:
<COMMIT-D> Z
<COMMIT-E> Z
<COMMIT-G> Z
+ $HASH_MESSAGE
EOF
git bundle verify 2.bdl |
@@ -367,13 +374,14 @@ test_expect_success 'create bundle 3 - two refs, same object' '
--stdin \
main HEAD <input &&
- format_and_save_expect <<-\EOF &&
+ format_and_save_expect <<-EOF &&
The bundle contains these 2 refs:
<COMMIT-P> refs/heads/main
<COMMIT-P> HEAD
The bundle requires these 2 refs:
<COMMIT-M> Z
<COMMIT-K> Z
+ $HASH_MESSAGE
EOF
git bundle verify 3.bdl |
@@ -409,12 +417,13 @@ test_expect_success 'create bundle 4 - with tags' '
--stdin \
--all <input &&
- cat >expect <<-\EOF &&
+ cat >expect <<-EOF &&
The bundle contains these 3 refs:
<TAG-1> refs/tags/v1
<TAG-2> refs/tags/v2
<TAG-3> refs/tags/v3
The bundle records a complete history.
+ $HASH_MESSAGE
EOF
git bundle verify 4.bdl |
@@ -475,4 +484,79 @@ test_expect_success 'clone from bundle' '
test_cmp expect actual
'
+test_expect_success 'unfiltered bundle with --objects' '
+ git bundle create all-objects.bdl \
+ --all --objects &&
+ git bundle create all.bdl \
+ --all &&
+
+ # Compare the headers of these files.
+ sed -n -e "/^$/q" -e "p" all.bdl >expect &&
+ sed -n -e "/^$/q" -e "p" all-objects.bdl >actual &&
+ test_cmp expect actual
+'
+
+for filter in "blob:none" "tree:0" "tree:1" "blob:limit=100"
+do
+ test_expect_success "filtered bundle: $filter" '
+ test_when_finished rm -rf .git/objects/pack cloned unbundled &&
+ git bundle create partial.bdl \
+ --all \
+ --filter=$filter &&
+
+ git bundle verify partial.bdl >unfiltered &&
+ make_user_friendly_and_stable_output <unfiltered >actual &&
+
+ cat >expect <<-EOF &&
+ The bundle contains these 10 refs:
+ <COMMIT-P> refs/heads/main
+ <COMMIT-N> refs/heads/release
+ <COMMIT-D> refs/heads/topic/1
+ <COMMIT-H> refs/heads/topic/2
+ <COMMIT-D> refs/pull/1/head
+ <COMMIT-G> refs/pull/2/head
+ <TAG-1> refs/tags/v1
+ <TAG-2> refs/tags/v2
+ <TAG-3> refs/tags/v3
+ <COMMIT-P> HEAD
+ The bundle records a complete history.
+ $HASH_MESSAGE
+ The bundle uses this filter: $filter
+ EOF
+ test_cmp expect actual &&
+
+ test_config uploadpack.allowfilter 1 &&
+ test_config uploadpack.allowanysha1inwant 1 &&
+ git clone --no-local --filter=$filter --bare "file://$(pwd)" cloned &&
+
+ git init unbundled &&
+ git -C unbundled bundle unbundle ../partial.bdl >ref-list.txt &&
+ ls unbundled/.git/objects/pack/pack-*.promisor >promisor &&
+ test_line_count = 1 promisor &&
+
+ # Count the same number of reachable objects.
+ reflist=$(git for-each-ref --format="%(objectname)") &&
+ git rev-list --objects --filter=$filter --missing=allow-any \
+ $reflist >expect &&
+ for repo in cloned unbundled
+ do
+ git -C $repo rev-list --objects --missing=allow-any \
+ $reflist >actual &&
+ test_cmp expect actual || return 1
+ done
+ '
+done
+
+# NEEDSWORK: 'git clone --bare' should be able to clone from a filtered
+# bundle, but that requires a change to promisor/filter config options.
+# For now, we fail gracefully with a helpful error. This behavior can be
+# changed in the future to succeed as much as possible.
+test_expect_success 'cloning from filtered bundle has useful error' '
+ git bundle create partial.bdl \
+ --all \
+ --filter=blob:none &&
+ test_must_fail git clone --bare partial.bdl partial 2>err &&
+ grep "cannot clone from filtered bundle" err
+'
+
test_done
diff --git a/t/t6030-bisect-porcelain.sh b/t/t6030-bisect-porcelain.sh
index 1be85d0..5382e5d 100755
--- a/t/t6030-bisect-porcelain.sh
+++ b/t/t6030-bisect-porcelain.sh
@@ -278,6 +278,51 @@ test_expect_success '"git bisect run" with more complex "git bisect start"' '
git bisect reset
'
+test_expect_success 'bisect run accepts exit code 126 as bad' '
+ test_when_finished "git bisect reset" &&
+ write_script test_script.sh <<-\EOF &&
+ ! grep Another hello || exit 126 >/dev/null
+ EOF
+ git bisect start &&
+ git bisect good $HASH1 &&
+ git bisect bad $HASH4 &&
+ git bisect run ./test_script.sh >my_bisect_log.txt &&
+ grep "$HASH3 is the first bad commit" my_bisect_log.txt
+'
+
+test_expect_success POSIXPERM 'bisect run fails with non-executable test script' '
+ test_when_finished "git bisect reset" &&
+ >not-executable.sh &&
+ chmod -x not-executable.sh &&
+ git bisect start &&
+ git bisect good $HASH1 &&
+ git bisect bad $HASH4 &&
+ test_must_fail git bisect run ./not-executable.sh >my_bisect_log.txt &&
+ ! grep "is the first bad commit" my_bisect_log.txt
+'
+
+test_expect_success 'bisect run accepts exit code 127 as bad' '
+ test_when_finished "git bisect reset" &&
+ write_script test_script.sh <<-\EOF &&
+ ! grep Another hello || exit 127 >/dev/null
+ EOF
+ git bisect start &&
+ git bisect good $HASH1 &&
+ git bisect bad $HASH4 &&
+ git bisect run ./test_script.sh >my_bisect_log.txt &&
+ grep "$HASH3 is the first bad commit" my_bisect_log.txt
+'
+
+test_expect_success 'bisect run fails with missing test script' '
+ test_when_finished "git bisect reset" &&
+ rm -f does-not-exist.sh &&
+ git bisect start &&
+ git bisect good $HASH1 &&
+ git bisect bad $HASH4 &&
+ test_must_fail git bisect run ./does-not-exist.sh >my_bisect_log.txt &&
+ ! grep "is the first bad commit" my_bisect_log.txt
+'
+
# $HASH1 is good, $HASH5 is bad, we skip $HASH3
# but $HASH4 is good,
# so we should find $HASH5 as the first bad commit
diff --git a/t/t6102-rev-list-unexpected-objects.sh b/t/t6102-rev-list-unexpected-objects.sh
index 6f0902b..cf0195e 100755
--- a/t/t6102-rev-list-unexpected-objects.sh
+++ b/t/t6102-rev-list-unexpected-objects.sh
@@ -17,8 +17,13 @@ test_expect_success 'setup unexpected non-blob entry' '
broken_tree="$(git hash-object -w --literally -t tree broken-tree)"
'
-test_expect_failure 'traverse unexpected non-blob entry (lone)' '
- test_must_fail git rev-list --objects $broken_tree
+test_expect_success !SANITIZE_LEAK 'TODO (should fail!): traverse unexpected non-blob entry (lone)' '
+ sed "s/Z$//" >expect <<-EOF &&
+ $broken_tree Z
+ $tree foo
+ EOF
+ git rev-list --objects $broken_tree >actual &&
+ test_cmp expect actual
'
test_expect_success 'traverse unexpected non-blob entry (seen)' '
@@ -116,8 +121,8 @@ test_expect_success 'setup unexpected non-blob tag' '
tag=$(git hash-object -w --literally -t tag broken-tag)
'
-test_expect_failure 'traverse unexpected non-blob tag (lone)' '
- test_must_fail git rev-list --objects $tag
+test_expect_success !SANITIZE_LEAK 'TODO (should fail!): traverse unexpected non-blob tag (lone)' '
+ git rev-list --objects $tag
'
test_expect_success 'traverse unexpected non-blob tag (seen)' '
diff --git a/t/t6111-rev-list-treesame.sh b/t/t6111-rev-list-treesame.sh
index e07b607..90ff141 100755
--- a/t/t6111-rev-list-treesame.sh
+++ b/t/t6111-rev-list-treesame.sh
@@ -23,7 +23,8 @@ note () {
}
unnote () {
- git name-rev --tags --stdin | sed -e "s|$OID_REGEX (tags/\([^)]*\))\([ ]\)|\1\2|g"
+ git name-rev --tags --annotate-stdin | \
+ sed -e "s|$OID_REGEX (tags/\([^)]*\))\([ ]\)|\1\2|g"
}
test_expect_success setup '
diff --git a/t/t6120-describe.sh b/t/t6120-describe.sh
index d8af2bb..9a35e78 100755
--- a/t/t6120-describe.sh
+++ b/t/t6120-describe.sh
@@ -270,7 +270,7 @@ test_expect_success 'name-rev --all' '
test_cmp expect actual
'
-test_expect_success 'name-rev --stdin' '
+test_expect_success 'name-rev --annotate-stdin' '
>expect.unsorted &&
for rev in $(git rev-list --all)
do
@@ -278,11 +278,16 @@ test_expect_success 'name-rev --stdin' '
echo "$rev ($name)" >>expect.unsorted || return 1
done &&
sort <expect.unsorted >expect &&
- git rev-list --all | git name-rev --stdin >actual.unsorted &&
+ git rev-list --all | git name-rev --annotate-stdin >actual.unsorted &&
sort <actual.unsorted >actual &&
test_cmp expect actual
'
+test_expect_success 'name-rev --stdin deprecated' "
+ git rev-list --all | git name-rev --stdin 2>actual &&
+ grep -E 'warning: --stdin is deprecated' actual
+"
+
test_expect_success 'describe --contains with the exact tags' '
echo "A^0" >expect &&
tag_object=$(git rev-parse refs/tags/A) &&
@@ -483,6 +488,124 @@ test_expect_success 'name-rev covers all conditions while looking at parents' '
)
'
+# A-B-C-D-E-main
+#
+# Where C has a non-monotonically increasing commit timestamp w.r.t. other
+# commits
+test_expect_success 'non-monotonic commit dates setup' '
+ UNIX_EPOCH_ZERO="@0 +0000" &&
+ git init non-monotonic &&
+ test_commit -C non-monotonic A &&
+ test_commit -C non-monotonic --no-tag B &&
+ test_commit -C non-monotonic --no-tag --date "$UNIX_EPOCH_ZERO" C &&
+ test_commit -C non-monotonic D &&
+ test_commit -C non-monotonic E
+'
+
+test_expect_success 'name-rev with commitGraph handles non-monotonic timestamps' '
+ test_config -C non-monotonic core.commitGraph true &&
+ (
+ cd non-monotonic &&
+
+ git commit-graph write --reachable &&
+
+ echo "main~3 tags/D~2" >expect &&
+ git name-rev --tags main~3 >actual &&
+
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'name-rev --all works with non-monotonic timestamps' '
+ test_config -C non-monotonic core.commitGraph false &&
+ (
+ cd non-monotonic &&
+
+ rm -rf .git/info/commit-graph* &&
+
+ cat >tags <<-\EOF &&
+ tags/E
+ tags/D
+ tags/D~1
+ tags/D~2
+ tags/A
+ EOF
+
+ git log --pretty=%H >revs &&
+
+ paste -d" " revs tags | sort >expect &&
+
+ git name-rev --tags --all | sort >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'name-rev --annotate-stdin works with non-monotonic timestamps' '
+ test_config -C non-monotonic core.commitGraph false &&
+ (
+ cd non-monotonic &&
+
+ rm -rf .git/info/commit-graph* &&
+
+ cat >expect <<-\EOF &&
+ E
+ D
+ D~1
+ D~2
+ A
+ EOF
+
+ git log --pretty=%H >revs &&
+ git name-rev --tags --annotate-stdin --name-only <revs >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'name-rev --all works with commitGraph' '
+ test_config -C non-monotonic core.commitGraph true &&
+ (
+ cd non-monotonic &&
+
+ git commit-graph write --reachable &&
+
+ cat >tags <<-\EOF &&
+ tags/E
+ tags/D
+ tags/D~1
+ tags/D~2
+ tags/A
+ EOF
+
+ git log --pretty=%H >revs &&
+
+ paste -d" " revs tags | sort >expect &&
+
+ git name-rev --tags --all | sort >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'name-rev --annotate-stdin works with commitGraph' '
+ test_config -C non-monotonic core.commitGraph true &&
+ (
+ cd non-monotonic &&
+
+ git commit-graph write --reachable &&
+
+ cat >expect <<-\EOF &&
+ E
+ D
+ D~1
+ D~2
+ A
+ EOF
+
+ git log --pretty=%H >revs &&
+ git name-rev --tags --annotate-stdin --name-only <revs >actual &&
+ test_cmp expect actual
+ )
+'
+
# B
# o
# \
diff --git a/t/t6404-recursive-merge.sh b/t/t6404-recursive-merge.sh
index eaf48e9..b8735c6 100755
--- a/t/t6404-recursive-merge.sh
+++ b/t/t6404-recursive-merge.sh
@@ -108,8 +108,13 @@ test_expect_success 'refuse to merge binary files' '
printf "\0\0" >binary-file &&
git add binary-file &&
git commit -m binary2 &&
- test_must_fail git merge F >merge.out 2>merge.err &&
- grep "Cannot merge binary files: binary-file (HEAD vs. F)" merge.err
+ if test "$GIT_TEST_MERGE_ALGORITHM" = ort
+ then
+ test_must_fail git merge F >merge_output
+ else
+ test_must_fail git merge F 2>merge_output
+ fi &&
+ grep "Cannot merge binary files: binary-file (HEAD vs. F)" merge_output
'
test_expect_success 'mark rename/delete as unmerged' '
diff --git a/t/t6406-merge-attr.sh b/t/t6406-merge-attr.sh
index 57e6af5..99abefd 100755
--- a/t/t6406-merge-attr.sh
+++ b/t/t6406-merge-attr.sh
@@ -221,8 +221,13 @@ test_expect_success 'binary files with union attribute' '
printf "two\0" >bin.txt &&
git commit -am two &&
- test_must_fail git merge bin-main 2>stderr &&
- grep -i "warning.*cannot merge.*HEAD vs. bin-main" stderr
+ if test "$GIT_TEST_MERGE_ALGORITHM" = ort
+ then
+ test_must_fail git merge bin-main >output
+ else
+ test_must_fail git merge bin-main 2>output
+ fi &&
+ grep -i "warning.*cannot merge.*HEAD vs. bin-main" output
'
test_done
diff --git a/t/t6407-merge-binary.sh b/t/t6407-merge-binary.sh
index 8e6241f..0753fc9 100755
--- a/t/t6407-merge-binary.sh
+++ b/t/t6407-merge-binary.sh
@@ -43,14 +43,9 @@ test_expect_success resolve '
rm -f a* m* &&
git reset --hard anchor &&
- if git merge -s resolve main
- then
- echo Oops, should not have succeeded
- false
- else
- git ls-files -s >current &&
- test_cmp expect current
- fi
+ test_must_fail git merge -s resolve main &&
+ git ls-files -s >current &&
+ test_cmp expect current
'
test_expect_success recursive '
@@ -58,14 +53,9 @@ test_expect_success recursive '
rm -f a* m* &&
git reset --hard anchor &&
- if git merge -s recursive main
- then
- echo Oops, should not have succeeded
- false
- else
- git ls-files -s >current &&
- test_cmp expect current
- fi
+ test_must_fail git merge -s recursive main &&
+ git ls-files -s >current &&
+ test_cmp expect current
'
test_done
diff --git a/t/t6423-merge-rename-directories.sh b/t/t6423-merge-rename-directories.sh
index 5b81a13..479db32 100755
--- a/t/t6423-merge-rename-directories.sh
+++ b/t/t6423-merge-rename-directories.sh
@@ -4421,14 +4421,14 @@ test_setup_12c1 () {
git checkout A &&
git mv node2/ node1/ &&
- for i in `git ls-files`; do echo side A >>$i; done &&
+ for i in $(git ls-files); do echo side A >>$i; done &&
git add -u &&
test_tick &&
git commit -m "A" &&
git checkout B &&
git mv node1/ node2/ &&
- for i in `git ls-files`; do echo side B >>$i; done &&
+ for i in $(git ls-files); do echo side B >>$i; done &&
git add -u &&
test_tick &&
git commit -m "B"
@@ -4511,7 +4511,7 @@ test_setup_12c2 () {
git checkout A &&
git mv node2/ node1/ &&
- for i in `git ls-files`; do echo side A >>$i; done &&
+ for i in $(git ls-files); do echo side A >>$i; done &&
git add -u &&
echo leaf5 >node1/leaf5 &&
git add node1/leaf5 &&
@@ -4520,7 +4520,7 @@ test_setup_12c2 () {
git checkout B &&
git mv node1/ node2/ &&
- for i in `git ls-files`; do echo side B >>$i; done &&
+ for i in $(git ls-files); do echo side B >>$i; done &&
git add -u &&
echo leaf6 >node2/leaf6 &&
git add node2/leaf6 &&
@@ -4759,7 +4759,7 @@ test_setup_12f () {
echo g >dir/subdir/tweaked/g &&
echo h >dir/subdir/tweaked/h &&
test_seq 20 30 >dir/subdir/tweaked/Makefile &&
- for i in `test_seq 1 88`; do
+ for i in $(test_seq 1 88); do
echo content $i >dir/unchanged/file_$i
done &&
git add . &&
diff --git a/t/t6428-merge-conflicts-sparse.sh b/t/t6428-merge-conflicts-sparse.sh
index 7e8bf49..142c9aa 100755
--- a/t/t6428-merge-conflicts-sparse.sh
+++ b/t/t6428-merge-conflicts-sparse.sh
@@ -112,7 +112,7 @@ test_expect_success 'conflicting entries written to worktree even if sparse' '
)
'
-test_expect_merge_algorithm failure success 'present-despite-SKIP_WORKTREE handled reasonably' '
+test_expect_success 'present-despite-SKIP_WORKTREE handled reasonably' '
test_setup_numerals in_the_way &&
(
cd numerals_in_the_way &&
@@ -132,26 +132,13 @@ test_expect_merge_algorithm failure success 'present-despite-SKIP_WORKTREE handl
test_must_fail git merge -s recursive B^0 &&
- git ls-files -t >index_files &&
- test_cmp expected-index index_files &&
+ test_path_is_missing .git/MERGE_HEAD &&
- test_path_is_file README &&
test_path_is_file numerals &&
- test_cmp expected-merge numerals &&
-
- # There should still be a file with "foobar" in it
- grep foobar * &&
-
- # 5 other files:
- # * expected-merge
- # * expected-index
- # * index_files
- # * others
- # * whatever name was given to the numerals file that had
- # "foobar" in it
- git ls-files -o >others &&
- test_line_count = 5 others
+ # numerals should still have "foobar" in it
+ echo foobar >expect &&
+ test_cmp expect numerals
)
'
diff --git a/t/t6429-merge-sequence-rename-caching.sh b/t/t6429-merge-sequence-rename-caching.sh
index 035edc4..f2bc8a7 100755
--- a/t/t6429-merge-sequence-rename-caching.sh
+++ b/t/t6429-merge-sequence-rename-caching.sh
@@ -697,4 +697,71 @@ test_expect_success 'caching renames only on upstream side, part 2' '
)
'
+#
+# The following testcase just creates two simple renames (slightly modified
+# on both sides but without conflicting changes), and a directory full of
+# files that are otherwise uninteresting. The setup is as follows:
+#
+# base: unrelated/<BUNCH OF FILES>
+# numbers
+# values
+# upstream: modify: numbers
+# modify: values
+# topic: add: unrelated/foo
+# modify: numbers
+# modify: values
+# rename: numbers -> sequence
+# rename: values -> progression
+#
+# This is a trivial rename case, but we're curious what happens with a very
+# low renameLimit interacting with the restart optimization trying to notice
+# that unrelated/ looks like a trivial merge candidate.
+#
+test_expect_success 'avoid assuming we detected renames' '
+ git init redo-weirdness &&
+ (
+ cd redo-weirdness &&
+
+ mkdir unrelated &&
+ for i in $(test_seq 1 10)
+ do
+ >unrelated/$i
+ done &&
+ test_seq 2 10 >numbers &&
+ test_seq 12 20 >values &&
+ git add numbers values unrelated/ &&
+ git commit -m orig &&
+
+ git branch upstream &&
+ git branch topic &&
+
+ git switch upstream &&
+ test_seq 1 10 >numbers &&
+ test_seq 11 20 >values &&
+ git add numbers &&
+ git commit -m "Some tweaks" &&
+
+ git switch topic &&
+
+ >unrelated/foo &&
+ test_seq 2 12 >numbers &&
+ test_seq 12 22 >values &&
+ git add numbers values unrelated/ &&
+ git mv numbers sequence &&
+ git mv values progression &&
+ git commit -m A &&
+
+ #
+ # Actual testing
+ #
+
+ git switch --detach topic^0 &&
+
+ test_must_fail git -c merge.renameLimit=1 rebase upstream &&
+
+ git ls-files -u >actual &&
+ ! test_file_is_empty actual
+ )
+'
+
test_done
diff --git a/t/t6500-gc.sh b/t/t6500-gc.sh
index c202126..cd6c533 100755
--- a/t/t6500-gc.sh
+++ b/t/t6500-gc.sh
@@ -101,12 +101,12 @@ test_expect_success 'pre-auto-gc hook can stop auto gc' '
EOF
git init pre-auto-gc-hook &&
+ test_hook -C pre-auto-gc-hook pre-auto-gc <<-\EOF &&
+ echo >&2 no gc for you &&
+ exit 1
+ EOF
(
cd pre-auto-gc-hook &&
- write_script ".git/hooks/pre-auto-gc" <<-\EOF &&
- echo >&2 no gc for you &&
- exit 1
- EOF
git config gc.auto 3 &&
git config gc.autoDetach false &&
@@ -128,14 +128,12 @@ test_expect_success 'pre-auto-gc hook can stop auto gc' '
See "git help gc" for manual housekeeping.
EOF
- (
- cd pre-auto-gc-hook &&
- write_script ".git/hooks/pre-auto-gc" <<-\EOF &&
- echo >&2 will gc for you &&
- exit 0
- EOF
- git gc --auto >../out.actual 2>../err.actual
- ) &&
+ test_hook -C pre-auto-gc-hook --clobber pre-auto-gc <<-\EOF &&
+ echo >&2 will gc for you &&
+ exit 0
+ EOF
+
+ git -C pre-auto-gc-hook gc --auto >out.actual 2>err.actual &&
test_must_be_empty out.actual &&
test_cmp err.expect err.actual
diff --git a/t/t7001-mv.sh b/t/t7001-mv.sh
index 963356b..a402908 100755
--- a/t/t7001-mv.sh
+++ b/t/t7001-mv.sh
@@ -4,6 +4,25 @@ test_description='git mv in subdirs'
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-diff-data.sh
+test_expect_success 'mv -f refreshes updated index entry' '
+ echo test >bar &&
+ git add bar &&
+ git commit -m test &&
+
+ echo foo >foo &&
+ git add foo &&
+
+ # Wait one second to ensure ctime of rename will differ from original
+ # file creation ctime.
+ sleep 1 &&
+ git mv -f foo bar &&
+ git reset --merge HEAD &&
+
+ # Verify the index has been reset
+ git diff-files >out &&
+ test_must_be_empty out
+'
+
test_expect_success 'prepare reference tree' '
mkdir path0 path1 &&
COPYING_test_data >path0/COPYING &&
diff --git a/t/t7012-skip-worktree-writing.sh b/t/t7012-skip-worktree-writing.sh
index a1080b9..cb9f1a6 100755
--- a/t/t7012-skip-worktree-writing.sh
+++ b/t/t7012-skip-worktree-writing.sh
@@ -171,50 +171,20 @@ test_expect_success 'stash restore in sparse checkout' '
# Put a file in the working directory in the way
echo in the way >modified &&
- git stash apply &&
+ test_must_fail git stash apply 2>error&&
- # Ensure stash vivifies modifies paths...
- cat >expect <<-EOF &&
- H addme
- H modified
- H removeme
- H subdir/A
- S untouched
- EOF
- git ls-files -t >actual &&
- test_cmp expect actual &&
+ grep "changes.*would be overwritten by merge" error &&
- # ...and that the paths show up in status as changed...
- cat >expect <<-EOF &&
- A addme
- M modified
- D removeme
- M subdir/A
- ?? actual
- ?? expect
- ?? modified.stash.XXXXXX
- EOF
- git status --porcelain | \
- sed -e s/stash......./stash.XXXXXX/ >actual &&
- test_cmp expect actual &&
+ echo in the way >expect &&
+ test_cmp expect modified &&
+ git diff --quiet HEAD ":!modified" &&
# ...and that working directory reflects the files correctly
- test_path_is_file addme &&
+ test_path_is_missing addme &&
test_path_is_file modified &&
test_path_is_missing removeme &&
test_path_is_file subdir/A &&
- test_path_is_missing untouched &&
-
- # ...including that we have the expected "modified" file...
- cat >expect <<-EOF &&
- modified
- tweaked
- EOF
- test_cmp expect modified &&
-
- # ...and that the other "modified" file is still present...
- echo in the way >expect &&
- test_cmp expect modified.stash.*
+ test_path_is_missing untouched
)
'
diff --git a/t/t7063-status-untracked-cache.sh b/t/t7063-status-untracked-cache.sh
index a0c123b..ca90ee8 100755
--- a/t/t7063-status-untracked-cache.sh
+++ b/t/t7063-status-untracked-cache.sh
@@ -90,6 +90,9 @@ test_expect_success 'setup' '
cd worktree &&
mkdir done dtwo dthree &&
touch one two three done/one dtwo/two dthree/three &&
+ test-tool chmtime =-300 one two three done/one dtwo/two dthree/three &&
+ test-tool chmtime =-300 done dtwo dthree &&
+ test-tool chmtime =-300 . &&
git add one two done/one &&
: >.git/info/exclude &&
git update-index --untracked-cache &&
@@ -142,7 +145,6 @@ two
EOF
test_expect_success 'status first time (empty cache)' '
- avoid_racy &&
: >../trace.output &&
GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.output" \
git status --porcelain >../actual &&
@@ -166,7 +168,6 @@ test_expect_success 'untracked cache after first status' '
'
test_expect_success 'status second time (fully populated cache)' '
- avoid_racy &&
: >../trace.output &&
GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.output" \
git status --porcelain >../actual &&
@@ -190,8 +191,8 @@ test_expect_success 'untracked cache after second status' '
'
test_expect_success 'modify in root directory, one dir invalidation' '
- avoid_racy &&
: >four &&
+ test-tool chmtime =-240 four &&
: >../trace.output &&
GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.output" \
git status --porcelain >../actual &&
@@ -241,7 +242,6 @@ EOF
'
test_expect_success 'new .gitignore invalidates recursively' '
- avoid_racy &&
echo four >.gitignore &&
: >../trace.output &&
GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.output" \
@@ -292,7 +292,6 @@ EOF
'
test_expect_success 'new info/exclude invalidates everything' '
- avoid_racy &&
echo three >>.git/info/exclude &&
: >../trace.output &&
GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.output" \
@@ -520,14 +519,14 @@ test_expect_success 'create/modify files, some of which are gitignored' '
echo three >done/three && # three is gitignored
echo four >done/four && # four is gitignored at a higher level
echo five >done/five && # five is not gitignored
- echo test >base && #we need to ensure that the root dir is touched
- rm base &&
+ test-tool chmtime =-180 done/two done/three done/four done/five done &&
+ # we need to ensure that the root dir is touched (in the past);
+ test-tool chmtime =-180 . &&
sync_mtime
'
test_expect_success 'test sparse status with untracked cache' '
: >../trace.output &&
- avoid_racy &&
GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.output" \
git status --porcelain >../status.actual &&
iuc status --porcelain >../status.iuc &&
@@ -570,7 +569,6 @@ EOF
'
test_expect_success 'test sparse status again with untracked cache' '
- avoid_racy &&
: >../trace.output &&
GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.output" \
git status --porcelain >../status.actual &&
@@ -597,11 +595,11 @@ EOF
test_expect_success 'set up for test of subdir and sparse checkouts' '
mkdir done/sub &&
mkdir done/sub/sub &&
- echo "sub" > done/sub/sub/file
+ echo "sub" > done/sub/sub/file &&
+ test-tool chmtime =-120 done/sub/sub/file done/sub/sub done/sub done
'
test_expect_success 'test sparse status with untracked cache and subdir' '
- avoid_racy &&
: >../trace.output &&
GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.output" \
git status --porcelain >../status.actual &&
@@ -651,7 +649,6 @@ EOF
'
test_expect_success 'test sparse status again with untracked cache and subdir' '
- avoid_racy &&
: >../trace.output &&
GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.output" \
git status --porcelain >../status.actual &&
diff --git a/t/t7102-reset.sh b/t/t7102-reset.sh
index d054260..22477f3 100755
--- a/t/t7102-reset.sh
+++ b/t/t7102-reset.sh
@@ -462,14 +462,40 @@ test_expect_success 'resetting an unmodified path is a no-op' '
git diff-index --cached --exit-code HEAD
'
+test_reset_refreshes_index () {
+
+ # To test whether the index is refreshed in `git reset --mixed` with
+ # the given options, create a scenario where we clearly see different
+ # results depending on whether the refresh occurred or not.
+
+ # Step 0: start with a clean index
+ git reset --hard HEAD &&
+
+ # Step 1: remove file2, but only in the index (no change to worktree)
+ git rm --cached file2 &&
+
+ # Step 2: reset index & leave worktree unchanged from HEAD
+ git $1 reset $2 --mixed HEAD &&
+
+ # Step 3: verify whether the index is refreshed by checking whether
+ # file2 still has staged changes in the index differing from HEAD (if
+ # the refresh occurred, there should be no such changes)
+ git diff-files >output.log &&
+ test_must_be_empty output.log
+}
+
test_expect_success '--mixed refreshes the index' '
- cat >expect <<-\EOF &&
- Unstaged changes after reset:
- M file2
- EOF
- echo 123 >>file2 &&
- git reset --mixed HEAD >output &&
- test_cmp expect output
+ # Verify default behavior (without --[no-]refresh or reset.refresh)
+ test_reset_refreshes_index &&
+
+ # With --quiet
+ test_reset_refreshes_index "" --quiet
+'
+
+test_expect_success '--mixed --[no-]refresh sets refresh behavior' '
+ # Verify that --[no-]refresh controls index refresh
+ test_reset_refreshes_index "" --refresh &&
+ ! test_reset_refreshes_index "" --no-refresh
'
test_expect_success '--mixed preserves skip-worktree' '
diff --git a/t/t7103-reset-bare.sh b/t/t7103-reset-bare.sh
index 0de83e3..a60153f 100755
--- a/t/t7103-reset-bare.sh
+++ b/t/t7103-reset-bare.sh
@@ -63,9 +63,12 @@ test_expect_success '"mixed" reset is not allowed in bare' '
test_must_fail git reset --mixed HEAD^
'
-test_expect_success '"soft" reset is allowed in bare' '
+test_expect_success !SANITIZE_LEAK '"soft" reset is allowed in bare' '
git reset --soft HEAD^ &&
- test "$(git show --pretty=format:%s | head -n 1)" = "one"
+ git show --pretty=format:%s >out &&
+ echo one >expect &&
+ head -n 1 out >actual &&
+ test_cmp expect actual
'
test_done
diff --git a/t/t7113-post-index-change-hook.sh b/t/t7113-post-index-change-hook.sh
index a21781d..58e55a7 100755
--- a/t/t7113-post-index-change-hook.sh
+++ b/t/t7113-post-index-change-hook.sh
@@ -17,8 +17,7 @@ test_expect_success 'setup' '
'
test_expect_success 'test status, add, commit, others trigger hook without flags set' '
- mkdir -p .git/hooks &&
- write_script .git/hooks/post-index-change <<-\EOF &&
+ test_hook post-index-change <<-\EOF &&
if test "$1" -eq 1; then
echo "Invalid combination of flags passed to hook; updated_workdir is set." >testfailure
exit 1
@@ -63,7 +62,7 @@ test_expect_success 'test status, add, commit, others trigger hook without flags
'
test_expect_success 'test checkout and reset trigger the hook' '
- write_script .git/hooks/post-index-change <<-\EOF &&
+ test_hook post-index-change <<-\EOF &&
if test "$1" -eq 1 && test "$2" -eq 1; then
echo "Invalid combination of flags passed to hook; updated_workdir and updated_skipworktree are both set." >testfailure
exit 1
@@ -106,7 +105,7 @@ test_expect_success 'test checkout and reset trigger the hook' '
'
test_expect_success 'test reset --mixed and update-index triggers the hook' '
- write_script .git/hooks/post-index-change <<-\EOF &&
+ test_hook post-index-change <<-\EOF &&
if test "$1" -eq 1 && test "$2" -eq 1; then
echo "Invalid combination of flags passed to hook; updated_workdir and updated_skipworktree are both set." >testfailure
exit 1
diff --git a/t/t7406-submodule-update.sh b/t/t7406-submodule-update.sh
index 11cccbb..000e055 100755
--- a/t/t7406-submodule-update.sh
+++ b/t/t7406-submodule-update.sh
@@ -205,8 +205,18 @@ test_expect_success 'submodule update should fail due to local changes' '
(cd submodule &&
compare_head
) &&
- test_must_fail git submodule update submodule
- )
+ test_must_fail git submodule update submodule 2>../actual.raw
+ ) &&
+ sed "s/^> //" >expect <<-\EOF &&
+ > error: Your local changes to the following files would be overwritten by checkout:
+ > file
+ > Please commit your changes or stash them before you switch branches.
+ > Aborting
+ > fatal: Unable to checkout OID in submodule path '\''submodule'\''
+ EOF
+ sed -e "s/checkout $SQ[^$SQ]*$SQ/checkout OID/" <actual.raw >actual &&
+ test_cmp expect actual
+
'
test_expect_success 'submodule update should throw away changes with --force ' '
(cd super &&
@@ -1061,4 +1071,16 @@ test_expect_success 'submodule update --quiet passes quietness to fetch with a s
)
'
+test_expect_success 'submodule update --filter requires --init' '
+ test_expect_code 129 git -C super submodule update --filter blob:none
+'
+
+test_expect_success 'submodule update --filter sets partial clone settings' '
+ test_when_finished "rm -rf super-filter" &&
+ git clone cloned super-filter &&
+ git -C super-filter submodule update --init --filter blob:none &&
+ test_cmp_config -C super-filter/submodule true remote.origin.promisor &&
+ test_cmp_config -C super-filter/submodule blob:none remote.origin.partialclonefilter
+'
+
test_done
diff --git a/t/t7408-submodule-reference.sh b/t/t7408-submodule-reference.sh
index a3892f4..c3a4545 100755
--- a/t/t7408-submodule-reference.sh
+++ b/t/t7408-submodule-reference.sh
@@ -193,7 +193,19 @@ test_expect_success 'missing nested submodule alternate fails clone and submodul
cd supersuper-clone &&
check_that_two_of_three_alternates_are_used &&
# update of the submodule fails
- test_must_fail git submodule update --init --recursive
+ cat >expect <<-\EOF &&
+ fatal: submodule '\''sub'\'' cannot add alternate: path ... does not exist
+ Failed to clone '\''sub'\''. Retry scheduled
+ fatal: submodule '\''sub-dissociate'\'' cannot add alternate: path ... does not exist
+ Failed to clone '\''sub-dissociate'\''. Retry scheduled
+ fatal: submodule '\''sub'\'' cannot add alternate: path ... does not exist
+ Failed to clone '\''sub'\'' a second time, aborting
+ fatal: Failed to recurse into submodule path ...
+ EOF
+ test_must_fail git submodule update --init --recursive 2>err &&
+ grep -e fatal: -e ^Failed err >actual.raw &&
+ sed -e "s/path $SQ[^$SQ]*$SQ/path .../" <actual.raw >actual &&
+ test_cmp expect actual
)
'
diff --git a/t/t7500-commit-template-squash-signoff.sh b/t/t7500-commit-template-squash-signoff.sh
index 9196465..5fcaa0b 100755
--- a/t/t7500-commit-template-squash-signoff.sh
+++ b/t/t7500-commit-template-squash-signoff.sh
@@ -442,7 +442,7 @@ test_expect_success '--fixup=reword: give error with pathsec' '
'
test_expect_success '--fixup=reword: -F give error message' '
- echo "fatal: Only one of -c/-C/-F/--fixup can be used." >expect &&
+ echo "fatal: options '\''-F'\'' and '\''--fixup'\'' cannot be used together" >expect &&
test_must_fail git commit --fixup=reword:HEAD~ -F msg 2>actual &&
test_cmp expect actual
'
diff --git a/t/t7503-pre-commit-and-pre-merge-commit-hooks.sh b/t/t7503-pre-commit-and-pre-merge-commit-hooks.sh
index 606d8d0..ad1eb64 100755
--- a/t/t7503-pre-commit-and-pre-merge-commit-hooks.sh
+++ b/t/t7503-pre-commit-and-pre-merge-commit-hooks.sh
@@ -7,37 +7,6 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
-HOOKDIR="$(git rev-parse --git-dir)/hooks"
-PRECOMMIT="$HOOKDIR/pre-commit"
-PREMERGE="$HOOKDIR/pre-merge-commit"
-
-# Prepare sample scripts that write their $0 to actual_hooks
-test_expect_success 'sample script setup' '
- mkdir -p "$HOOKDIR" &&
- write_script "$HOOKDIR/success.sample" <<-\EOF &&
- echo $0 >>actual_hooks
- exit 0
- EOF
- write_script "$HOOKDIR/fail.sample" <<-\EOF &&
- echo $0 >>actual_hooks
- exit 1
- EOF
- write_script "$HOOKDIR/non-exec.sample" <<-\EOF &&
- echo $0 >>actual_hooks
- exit 1
- EOF
- chmod -x "$HOOKDIR/non-exec.sample" &&
- write_script "$HOOKDIR/require-prefix.sample" <<-\EOF &&
- echo $0 >>actual_hooks
- test $GIT_PREFIX = "success/"
- EOF
- write_script "$HOOKDIR/check-author.sample" <<-\EOF
- echo $0 >>actual_hooks
- test "$GIT_AUTHOR_NAME" = "New Author" &&
- test "$GIT_AUTHOR_EMAIL" = "newauthor@example.com"
- EOF
-'
-
test_expect_success 'root commit' '
echo "root" >file &&
git add file &&
@@ -96,10 +65,16 @@ test_expect_success '--no-verify with no hook (merge)' '
test_path_is_missing actual_hooks
'
+setup_success_hook () {
+ test_when_finished "rm -f actual_hooks expected_hooks" &&
+ echo "$1" >expected_hooks &&
+ test_hook "$1" <<-EOF
+ echo $1 >>actual_hooks
+ EOF
+}
+
test_expect_success 'with succeeding hook' '
- test_when_finished "rm -f \"$PRECOMMIT\" expected_hooks actual_hooks" &&
- cp "$HOOKDIR/success.sample" "$PRECOMMIT" &&
- echo "$PRECOMMIT" >expected_hooks &&
+ setup_success_hook "pre-commit" &&
echo "more" >>file &&
git add file &&
git commit -m "more" &&
@@ -107,9 +82,7 @@ test_expect_success 'with succeeding hook' '
'
test_expect_success 'with succeeding hook (merge)' '
- test_when_finished "rm -f \"$PREMERGE\" expected_hooks actual_hooks" &&
- cp "$HOOKDIR/success.sample" "$PREMERGE" &&
- echo "$PREMERGE" >expected_hooks &&
+ setup_success_hook "pre-merge-commit" &&
git checkout side &&
git merge -m "merge main" main &&
git checkout main &&
@@ -117,17 +90,14 @@ test_expect_success 'with succeeding hook (merge)' '
'
test_expect_success 'automatic merge fails; both hooks are available' '
- test_when_finished "rm -f \"$PREMERGE\" \"$PRECOMMIT\"" &&
- test_when_finished "rm -f expected_hooks actual_hooks" &&
- test_when_finished "git checkout main" &&
- cp "$HOOKDIR/success.sample" "$PREMERGE" &&
- cp "$HOOKDIR/success.sample" "$PRECOMMIT" &&
+ setup_success_hook "pre-commit" &&
+ setup_success_hook "pre-merge-commit" &&
git checkout conflicting-a &&
test_must_fail git merge -m "merge conflicting-b" conflicting-b &&
test_path_is_missing actual_hooks &&
- echo "$PRECOMMIT" >expected_hooks &&
+ echo "pre-commit" >expected_hooks &&
echo a+b >conflicting &&
git add conflicting &&
git commit -m "resolve conflict" &&
@@ -135,8 +105,7 @@ test_expect_success 'automatic merge fails; both hooks are available' '
'
test_expect_success '--no-verify with succeeding hook' '
- test_when_finished "rm -f \"$PRECOMMIT\" actual_hooks" &&
- cp "$HOOKDIR/success.sample" "$PRECOMMIT" &&
+ setup_success_hook "pre-commit" &&
echo "even more" >>file &&
git add file &&
git commit --no-verify -m "even more" &&
@@ -144,8 +113,7 @@ test_expect_success '--no-verify with succeeding hook' '
'
test_expect_success '--no-verify with succeeding hook (merge)' '
- test_when_finished "rm -f \"$PREMERGE\" actual_hooks" &&
- cp "$HOOKDIR/success.sample" "$PREMERGE" &&
+ setup_success_hook "pre-merge-commit" &&
git branch -f side side-orig &&
git checkout side &&
git merge --no-verify -m "merge main" main &&
@@ -153,10 +121,19 @@ test_expect_success '--no-verify with succeeding hook (merge)' '
test_path_is_missing actual_hooks
'
+setup_failing_hook () {
+ test_when_finished "rm -f actual_hooks" &&
+ test_hook "$1" <<-EOF
+ echo $1-failing-hook >>actual_hooks
+ exit 1
+ EOF
+}
+
test_expect_success 'with failing hook' '
- test_when_finished "rm -f \"$PRECOMMIT\" expected_hooks actual_hooks" &&
- cp "$HOOKDIR/fail.sample" "$PRECOMMIT" &&
- echo "$PRECOMMIT" >expected_hooks &&
+ setup_failing_hook "pre-commit" &&
+ test_when_finished "rm -f expected_hooks" &&
+ echo "pre-commit-failing-hook" >expected_hooks &&
+
echo "another" >>file &&
git add file &&
test_must_fail git commit -m "another" &&
@@ -164,8 +141,7 @@ test_expect_success 'with failing hook' '
'
test_expect_success '--no-verify with failing hook' '
- test_when_finished "rm -f \"$PRECOMMIT\" actual_hooks" &&
- cp "$HOOKDIR/fail.sample" "$PRECOMMIT" &&
+ setup_failing_hook "pre-commit" &&
echo "stuff" >>file &&
git add file &&
git commit --no-verify -m "stuff" &&
@@ -173,9 +149,8 @@ test_expect_success '--no-verify with failing hook' '
'
test_expect_success 'with failing hook (merge)' '
- test_when_finished "rm -f \"$PREMERGE\" expected_hooks actual_hooks" &&
- cp "$HOOKDIR/fail.sample" "$PREMERGE" &&
- echo "$PREMERGE" >expected_hooks &&
+ setup_failing_hook "pre-merge-commit" &&
+ echo "pre-merge-commit-failing-hook" >expected_hooks &&
git checkout side &&
test_must_fail git merge -m "merge main" main &&
git checkout main &&
@@ -183,8 +158,8 @@ test_expect_success 'with failing hook (merge)' '
'
test_expect_success '--no-verify with failing hook (merge)' '
- test_when_finished "rm -f \"$PREMERGE\" actual_hooks" &&
- cp "$HOOKDIR/fail.sample" "$PREMERGE" &&
+ setup_failing_hook "pre-merge-commit" &&
+
git branch -f side side-orig &&
git checkout side &&
git merge --no-verify -m "merge main" main &&
@@ -192,9 +167,18 @@ test_expect_success '--no-verify with failing hook (merge)' '
test_path_is_missing actual_hooks
'
+setup_non_exec_hook () {
+ test_when_finished "rm -f actual_hooks" &&
+ test_hook "$1" <<-\EOF &&
+ echo non-exec >>actual_hooks
+ exit 1
+ EOF
+ test_hook --disable "$1"
+}
+
+
test_expect_success POSIXPERM 'with non-executable hook' '
- test_when_finished "rm -f \"$PRECOMMIT\" actual_hooks" &&
- cp "$HOOKDIR/non-exec.sample" "$PRECOMMIT" &&
+ setup_non_exec_hook "pre-commit" &&
echo "content" >>file &&
git add file &&
git commit -m "content" &&
@@ -202,8 +186,7 @@ test_expect_success POSIXPERM 'with non-executable hook' '
'
test_expect_success POSIXPERM '--no-verify with non-executable hook' '
- test_when_finished "rm -f \"$PRECOMMIT\" actual_hooks" &&
- cp "$HOOKDIR/non-exec.sample" "$PRECOMMIT" &&
+ setup_non_exec_hook "pre-commit" &&
echo "more content" >>file &&
git add file &&
git commit --no-verify -m "more content" &&
@@ -211,8 +194,7 @@ test_expect_success POSIXPERM '--no-verify with non-executable hook' '
'
test_expect_success POSIXPERM 'with non-executable hook (merge)' '
- test_when_finished "rm -f \"$PREMERGE\" actual_hooks" &&
- cp "$HOOKDIR/non-exec.sample" "$PREMERGE" &&
+ setup_non_exec_hook "pre-merge" &&
git branch -f side side-orig &&
git checkout side &&
git merge -m "merge main" main &&
@@ -221,8 +203,7 @@ test_expect_success POSIXPERM 'with non-executable hook (merge)' '
'
test_expect_success POSIXPERM '--no-verify with non-executable hook (merge)' '
- test_when_finished "rm -f \"$PREMERGE\" actual_hooks" &&
- cp "$HOOKDIR/non-exec.sample" "$PREMERGE" &&
+ setup_non_exec_hook "pre-merge" &&
git branch -f side side-orig &&
git checkout side &&
git merge --no-verify -m "merge main" main &&
@@ -230,10 +211,18 @@ test_expect_success POSIXPERM '--no-verify with non-executable hook (merge)' '
test_path_is_missing actual_hooks
'
+setup_require_prefix_hook () {
+ test_when_finished "rm -f expected_hooks" &&
+ echo require-prefix >expected_hooks &&
+ test_hook pre-commit <<-\EOF
+ echo require-prefix >>actual_hooks
+ test $GIT_PREFIX = "success/"
+ EOF
+}
+
test_expect_success 'with hook requiring GIT_PREFIX' '
- test_when_finished "rm -rf \"$PRECOMMIT\" expected_hooks actual_hooks success" &&
- cp "$HOOKDIR/require-prefix.sample" "$PRECOMMIT" &&
- echo "$PRECOMMIT" >expected_hooks &&
+ test_when_finished "rm -rf actual_hooks success" &&
+ setup_require_prefix_hook &&
echo "more content" >>file &&
git add file &&
mkdir success &&
@@ -245,9 +234,8 @@ test_expect_success 'with hook requiring GIT_PREFIX' '
'
test_expect_success 'with failing hook requiring GIT_PREFIX' '
- test_when_finished "rm -rf \"$PRECOMMIT\" expected_hooks actual_hooks fail" &&
- cp "$HOOKDIR/require-prefix.sample" "$PRECOMMIT" &&
- echo "$PRECOMMIT" >expected_hooks &&
+ test_when_finished "rm -rf actual_hooks fail" &&
+ setup_require_prefix_hook &&
echo "more content" >>file &&
git add file &&
mkdir fail &&
@@ -259,13 +247,23 @@ test_expect_success 'with failing hook requiring GIT_PREFIX' '
test_cmp expected_hooks actual_hooks
'
+setup_require_author_hook () {
+ test_when_finished "rm -f expected_hooks actual_hooks" &&
+ echo check-author >expected_hooks &&
+ test_hook pre-commit <<-\EOF
+ echo check-author >>actual_hooks
+ test "$GIT_AUTHOR_NAME" = "New Author" &&
+ test "$GIT_AUTHOR_EMAIL" = "newauthor@example.com"
+ EOF
+}
+
+
test_expect_success 'check the author in hook' '
- test_when_finished "rm -f \"$PRECOMMIT\" expected_hooks actual_hooks" &&
- cp "$HOOKDIR/check-author.sample" "$PRECOMMIT" &&
+ setup_require_author_hook &&
cat >expected_hooks <<-EOF &&
- $PRECOMMIT
- $PRECOMMIT
- $PRECOMMIT
+ check-author
+ check-author
+ check-author
EOF
test_must_fail git commit --allow-empty -m "by a.u.thor" &&
(
diff --git a/t/t7504-commit-msg-hook.sh b/t/t7504-commit-msg-hook.sh
index bba58f0..a39de8c 100755
--- a/t/t7504-commit-msg-hook.sh
+++ b/t/t7504-commit-msg-hook.sh
@@ -54,15 +54,11 @@ test_expect_success '--no-verify with no hook (editor)' '
'
-# now install hook that always succeeds
-HOOKDIR="$(git rev-parse --git-dir)/hooks"
-HOOK="$HOOKDIR/commit-msg"
-mkdir -p "$HOOKDIR"
-cat > "$HOOK" <<EOF
-#!/bin/sh
-exit 0
-EOF
-chmod +x "$HOOK"
+test_expect_success 'setup: commit-msg hook that always succeeds' '
+ test_hook --setup commit-msg <<-\EOF
+ exit 0
+ EOF
+'
test_expect_success 'with succeeding hook' '
@@ -98,11 +94,11 @@ test_expect_success '--no-verify with succeeding hook (editor)' '
'
-# now a hook that fails
-cat > "$HOOK" <<EOF
-#!/bin/sh
-exit 1
-EOF
+test_expect_success 'setup: commit-msg hook that always fails' '
+ test_hook --clobber commit-msg <<-\EOF
+ exit 1
+ EOF
+'
commit_msg_is () {
test "$(git log --pretty=format:%s%b -1)" = "$1"
@@ -176,8 +172,12 @@ test_expect_success 'merge bypasses failing hook with --no-verify' '
commit_msg_is "Merge branch '\''main'\'' into newbranch"
'
+test_expect_success 'setup: commit-msg hook made non-executable' '
+ git_dir="$(git rev-parse --git-dir)" &&
+ chmod -x "$git_dir/hooks/commit-msg"
+'
+
-chmod -x "$HOOK"
test_expect_success POSIXPERM 'with non-executable hook' '
echo "content" >file &&
@@ -212,13 +212,12 @@ test_expect_success POSIXPERM '--no-verify with non-executable hook (editor)' '
'
-# now a hook that edits the commit message
-cat > "$HOOK" <<'EOF'
-#!/bin/sh
-echo "new message" > "$1"
-exit 0
-EOF
-chmod +x "$HOOK"
+test_expect_success 'setup: commit-msg hook that edits the commit message' '
+ test_hook --clobber commit-msg <<-\EOF
+ echo "new message" >"$1"
+ exit 0
+ EOF
+'
test_expect_success 'hook edits commit message' '
diff --git a/t/t7505-prepare-commit-msg-hook.sh b/t/t7505-prepare-commit-msg-hook.sh
index e39c809..2128142 100755
--- a/t/t7505-prepare-commit-msg-hook.sh
+++ b/t/t7505-prepare-commit-msg-hook.sh
@@ -47,25 +47,19 @@ test_expect_success 'with no hook' '
'
-# set up fake editor for interactive editing
-cat > fake-editor <<'EOF'
-#!/bin/sh
-exit 0
-EOF
-chmod +x fake-editor
-
-## Not using test_set_editor here so we can easily ensure the editor variable
-## is only set for the editor tests
-FAKE_EDITOR="$(pwd)/fake-editor"
-export FAKE_EDITOR
+test_expect_success 'setup fake editor for interactive editing' '
+ write_script fake-editor <<-\EOF &&
+ exit 0
+ EOF
-# now install hook that always succeeds and adds a message
-HOOKDIR="$(git rev-parse --git-dir)/hooks"
-HOOK="$HOOKDIR/prepare-commit-msg"
-mkdir -p "$HOOKDIR"
-echo "#!$SHELL_PATH" > "$HOOK"
-cat >> "$HOOK" <<'EOF'
+ ## Not using test_set_editor here so we can easily ensure the editor variable
+ ## is only set for the editor tests
+ FAKE_EDITOR="$(pwd)/fake-editor" &&
+ export FAKE_EDITOR
+'
+test_expect_success 'setup prepare-commit-msg hook' '
+ test_hook --setup prepare-commit-msg <<\EOF
GIT_DIR=$(git rev-parse --git-dir)
if test -d "$GIT_DIR/rebase-merge"
then
@@ -103,7 +97,7 @@ else
fi
exit 0
EOF
-chmod +x "$HOOK"
+'
echo dummy template > "$(git rev-parse --git-dir)/template"
@@ -265,10 +259,11 @@ test_expect_success 'with hook and editor (cherry-pick)' '
test "$(git log -1 --pretty=format:%s)" = merge
'
-cat > "$HOOK" <<'EOF'
-#!/bin/sh
-exit 1
-EOF
+test_expect_success 'setup: commit-msg hook that always fails' '
+ test_hook --setup --clobber prepare-commit-msg <<-\EOF
+ exit 1
+ EOF
+'
test_expect_success 'with failing hook' '
@@ -296,9 +291,9 @@ test_expect_success 'with failing hook (merge)' '
git checkout -B other HEAD@{1} &&
echo "more" >> file &&
git add file &&
- rm -f "$HOOK" &&
+ test_hook --remove prepare-commit-msg &&
git commit -m other &&
- write_script "$HOOK" <<-EOF &&
+ test_hook --setup prepare-commit-msg <<-\EOF &&
exit 1
EOF
git checkout - &&
diff --git a/t/t7508-status.sh b/t/t7508-status.sh
index 05c6c02..2b7ef6c 100755
--- a/t/t7508-status.sh
+++ b/t/t7508-status.sh
@@ -1647,13 +1647,33 @@ test_expect_success '"Initial commit" should not be noted in commit template' '
'
test_expect_success '--no-optional-locks prevents index update' '
- test-tool chmtime =1234567890 .git/index &&
+ test_set_magic_mtime .git/index &&
git --no-optional-locks status &&
- test-tool chmtime --get .git/index >out &&
- grep ^1234567890 out &&
+ test_is_magic_mtime .git/index &&
git status &&
- test-tool chmtime --get .git/index >out &&
- ! grep ^1234567890 out
+ ! test_is_magic_mtime .git/index
+'
+
+test_expect_success 'racy timestamps will be fixed for clean worktree' '
+ echo content >racy-dirty &&
+ echo content >racy-racy &&
+ git add racy* &&
+ git commit -m "racy test files" &&
+ # let status rewrite the index, if necessary; after that we expect
+ # no more index writes unless caused by racy timestamps; note that
+ # timestamps may already be racy now (depending on previous tests)
+ git status &&
+ test_set_magic_mtime .git/index &&
+ git status &&
+ ! test_is_magic_mtime .git/index
+'
+
+test_expect_success 'racy timestamps will be fixed for dirty worktree' '
+ echo content2 >racy-dirty &&
+ git status &&
+ test_set_magic_mtime .git/index &&
+ git status &&
+ ! test_is_magic_mtime .git/index
'
test_done
diff --git a/t/t7519-status-fsmonitor.sh b/t/t7519-status-fsmonitor.sh
index a6308ac..d4f9c6a 100755
--- a/t/t7519-status-fsmonitor.sh
+++ b/t/t7519-status-fsmonitor.sh
@@ -26,7 +26,7 @@ dirty_repo () {
}
write_integration_script () {
- write_script .git/hooks/fsmonitor-test<<-\EOF
+ test_hook --setup --clobber fsmonitor-test<<-\EOF
if test "$#" -ne 2
then
echo "$0: exactly 2 arguments expected"
@@ -56,7 +56,6 @@ test_lazy_prereq UNTRACKED_CACHE '
'
test_expect_success 'setup' '
- mkdir -p .git/hooks &&
: >tracked &&
: >modified &&
mkdir dir1 &&
@@ -108,7 +107,7 @@ EOF
# test that "update-index --fsmonitor-valid" sets the fsmonitor valid bit
test_expect_success 'update-index --fsmonitor-valid" sets the fsmonitor valid bit' '
- write_script .git/hooks/fsmonitor-test<<-\EOF &&
+ test_hook fsmonitor-test<<-\EOF &&
printf "last_update_token\0"
EOF
git update-index --fsmonitor &&
@@ -169,7 +168,7 @@ EOF
# test that newly added files are marked valid
test_expect_success 'newly added files are marked valid' '
- write_script .git/hooks/fsmonitor-test<<-\EOF &&
+ test_hook --setup --clobber fsmonitor-test<<-\EOF &&
printf "last_update_token\0"
EOF
git add new &&
@@ -210,7 +209,7 @@ EOF
# test that *only* files returned by the integration script get flagged as invalid
test_expect_success '*only* files returned by the integration script get flagged as invalid' '
- write_script .git/hooks/fsmonitor-test<<-\EOF &&
+ test_hook --clobber fsmonitor-test<<-\EOF &&
printf "last_update_token\0"
printf "dir1/modified\0"
EOF
@@ -231,7 +230,7 @@ test_expect_success 'refresh_index() invalidates fsmonitor cache' '
dirty_repo &&
write_integration_script &&
git add . &&
- write_script .git/hooks/fsmonitor-test<<-\EOF &&
+ test_hook --clobber fsmonitor-test<<-\EOF &&
EOF
git commit -m "to reset" &&
git reset HEAD~1 &&
@@ -280,7 +279,7 @@ do
# Make sure it's actually skipping the check for modified and untracked
# (if enabled) files unless it is told about them.
test_expect_success "status doesn't detect unreported modifications" '
- write_script .git/hooks/fsmonitor-test<<-\EOF &&
+ test_hook --clobber fsmonitor-test<<-\EOF &&
printf "last_update_token\0"
:>marker
EOF
@@ -322,19 +321,25 @@ test_expect_success UNTRACKED_CACHE 'ignore .git changes when invalidating UNTR'
test_create_repo dot-git &&
(
cd dot-git &&
- mkdir -p .git/hooks &&
: >tracked &&
+ test-tool chmtime =-60 tracked &&
: >modified &&
+ test-tool chmtime =-60 modified &&
mkdir dir1 &&
: >dir1/tracked &&
+ test-tool chmtime =-60 dir1/tracked &&
: >dir1/modified &&
+ test-tool chmtime =-60 dir1/modified &&
mkdir dir2 &&
: >dir2/tracked &&
+ test-tool chmtime =-60 dir2/tracked &&
: >dir2/modified &&
+ test-tool chmtime =-60 dir2/modified &&
write_integration_script &&
git config core.fsmonitor .git/hooks/fsmonitor-test &&
git update-index --untracked-cache &&
git update-index --fsmonitor &&
+ git status &&
GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace-before" \
git status &&
test-tool dump-untracked-cache >../before
@@ -407,14 +412,14 @@ test_expect_success 'status succeeds with sparse index' '
git -C sparse sparse-checkout init --cone --sparse-index &&
git -C sparse sparse-checkout set dir1 dir2 &&
- write_script .git/hooks/fsmonitor-test <<-\EOF &&
+ test_hook --clobber fsmonitor-test <<-\EOF &&
printf "last_update_token\0"
EOF
git -C full config core.fsmonitor ../.git/hooks/fsmonitor-test &&
git -C sparse config core.fsmonitor ../.git/hooks/fsmonitor-test &&
check_sparse_index_behavior ! &&
- write_script .git/hooks/fsmonitor-test <<-\EOF &&
+ test_hook --clobber fsmonitor-test <<-\EOF &&
printf "last_update_token\0"
printf "dir1/modified\0"
EOF
@@ -432,7 +437,7 @@ test_expect_success 'status succeeds with sparse index' '
# This one modifies outside the sparse-checkout definition
# and hence we expect to expand the sparse-index.
- write_script .git/hooks/fsmonitor-test <<-\EOF &&
+ test_hook --clobber fsmonitor-test <<-\EOF &&
printf "last_update_token\0"
printf "dir1a/modified\0"
EOF
diff --git a/t/t7520-ignored-hook-warning.sh b/t/t7520-ignored-hook-warning.sh
index 634fb7f..dc57526 100755
--- a/t/t7520-ignored-hook-warning.sh
+++ b/t/t7520-ignored-hook-warning.sh
@@ -5,10 +5,7 @@ test_description='ignored hook warning'
. ./test-lib.sh
test_expect_success setup '
- hookdir="$(git rev-parse --git-dir)/hooks" &&
- hook="$hookdir/pre-commit" &&
- mkdir -p "$hookdir" &&
- write_script "$hook" <<-\EOF
+ test_hook --setup pre-commit <<-\EOF
exit 0
EOF
'
@@ -19,20 +16,20 @@ test_expect_success 'no warning if hook is not ignored' '
'
test_expect_success POSIXPERM 'warning if hook is ignored' '
- chmod -x "$hook" &&
+ test_hook --disable pre-commit &&
git commit --allow-empty -m "even more" 2>message &&
test_i18ngrep -e "hook was ignored" message
'
test_expect_success POSIXPERM 'no warning if advice.ignoredHook set to false' '
test_config advice.ignoredHook false &&
- chmod -x "$hook" &&
+ test_hook --disable pre-commit &&
git commit --allow-empty -m "even more" 2>message &&
test_i18ngrep ! -e "hook was ignored" message
'
test_expect_success 'no warning if unset advice.ignoredHook and hook removed' '
- rm -f "$hook" &&
+ test_hook --remove pre-commit &&
test_unconfig advice.ignoredHook &&
git commit --allow-empty -m "even more" 2>message &&
test_i18ngrep ! -e "hook was ignored" message
diff --git a/t/t7527-builtin-fsmonitor.sh b/t/t7527-builtin-fsmonitor.sh
new file mode 100755
index 0000000..bd0c952
--- /dev/null
+++ b/t/t7527-builtin-fsmonitor.sh
@@ -0,0 +1,609 @@
+#!/bin/sh
+
+test_description='built-in file system watcher'
+
+. ./test-lib.sh
+
+if ! test_have_prereq FSMONITOR_DAEMON
+then
+ skip_all="fsmonitor--daemon is not supported on this platform"
+ test_done
+fi
+
+stop_daemon_delete_repo () {
+ r=$1 &&
+ test_might_fail git -C $r fsmonitor--daemon stop &&
+ rm -rf $1
+}
+
+start_daemon () {
+ r= tf= t2= tk= &&
+
+ while test "$#" -ne 0
+ do
+ case "$1" in
+ -C)
+ r="-C ${2?}"
+ shift
+ ;;
+ --tf)
+ tf="${2?}"
+ shift
+ ;;
+ --t2)
+ t2="${2?}"
+ shift
+ ;;
+ --tk)
+ tk="${2?}"
+ shift
+ ;;
+ -*)
+ BUG "error: unknown option: '$1'"
+ ;;
+ *)
+ BUG "error: unbound argument: '$1'"
+ ;;
+ esac
+ shift
+ done &&
+
+ (
+ if test -n "$tf"
+ then
+ GIT_TRACE_FSMONITOR="$tf"
+ export GIT_TRACE_FSMONITOR
+ fi &&
+
+ if test -n "$t2"
+ then
+ GIT_TRACE2_PERF="$t2"
+ export GIT_TRACE2_PERF
+ fi &&
+
+ if test -n "$tk"
+ then
+ GIT_TEST_FSMONITOR_TOKEN="$tk"
+ export GIT_TEST_FSMONITOR_TOKEN
+ fi &&
+
+ git $r fsmonitor--daemon start &&
+ git $r fsmonitor--daemon status
+ )
+}
+
+# Is a Trace2 data event present with the given catetory and key?
+# We do not care what the value is.
+#
+have_t2_data_event () {
+ c=$1 &&
+ k=$2 &&
+
+ grep -e '"event":"data".*"category":"'"$c"'".*"key":"'"$k"'"'
+}
+
+test_expect_success 'explicit daemon start and stop' '
+ test_when_finished "stop_daemon_delete_repo test_explicit" &&
+
+ git init test_explicit &&
+ start_daemon -C test_explicit &&
+
+ git -C test_explicit fsmonitor--daemon stop &&
+ test_must_fail git -C test_explicit fsmonitor--daemon status
+'
+
+test_expect_success 'implicit daemon start' '
+ test_when_finished "stop_daemon_delete_repo test_implicit" &&
+
+ git init test_implicit &&
+ test_must_fail git -C test_implicit fsmonitor--daemon status &&
+
+ # query will implicitly start the daemon.
+ #
+ # for test-script simplicity, we send a V1 timestamp rather than
+ # a V2 token. either way, the daemon response to any query contains
+ # a new V2 token. (the daemon may complain that we sent a V1 request,
+ # but this test case is only concerned with whether the daemon was
+ # implicitly started.)
+
+ GIT_TRACE2_EVENT="$PWD/.git/trace" \
+ test-tool -C test_implicit fsmonitor-client query --token 0 >actual &&
+ nul_to_q <actual >actual.filtered &&
+ grep "builtin:" actual.filtered &&
+
+ # confirm that a daemon was started in the background.
+ #
+ # since the mechanism for starting the background daemon is platform
+ # dependent, just confirm that the foreground command received a
+ # response from the daemon.
+
+ have_t2_data_event fsm_client query/response-length <.git/trace &&
+
+ git -C test_implicit fsmonitor--daemon status &&
+ git -C test_implicit fsmonitor--daemon stop &&
+ test_must_fail git -C test_implicit fsmonitor--daemon status
+'
+
+test_expect_success 'implicit daemon stop (delete .git)' '
+ test_when_finished "stop_daemon_delete_repo test_implicit_1" &&
+
+ git init test_implicit_1 &&
+
+ start_daemon -C test_implicit_1 &&
+
+ # deleting the .git directory will implicitly stop the daemon.
+ rm -rf test_implicit_1/.git &&
+
+ # [1] Create an empty .git directory so that the following Git
+ # command will stay relative to the `-C` directory.
+ #
+ # Without this, the Git command will override the requested
+ # -C argument and crawl out to the containing Git source tree.
+ # This would make the test result dependent upon whether we
+ # were using fsmonitor on our development worktree.
+ #
+ sleep 1 &&
+ mkdir test_implicit_1/.git &&
+
+ test_must_fail git -C test_implicit_1 fsmonitor--daemon status
+'
+
+test_expect_success 'implicit daemon stop (rename .git)' '
+ test_when_finished "stop_daemon_delete_repo test_implicit_2" &&
+
+ git init test_implicit_2 &&
+
+ start_daemon -C test_implicit_2 &&
+
+ # renaming the .git directory will implicitly stop the daemon.
+ mv test_implicit_2/.git test_implicit_2/.xxx &&
+
+ # See [1] above.
+ #
+ sleep 1 &&
+ mkdir test_implicit_2/.git &&
+
+ test_must_fail git -C test_implicit_2 fsmonitor--daemon status
+'
+
+test_expect_success 'cannot start multiple daemons' '
+ test_when_finished "stop_daemon_delete_repo test_multiple" &&
+
+ git init test_multiple &&
+
+ start_daemon -C test_multiple &&
+
+ test_must_fail git -C test_multiple fsmonitor--daemon start 2>actual &&
+ grep "fsmonitor--daemon is already running" actual &&
+
+ git -C test_multiple fsmonitor--daemon stop &&
+ test_must_fail git -C test_multiple fsmonitor--daemon status
+'
+
+# These tests use the main repo in the trash directory
+
+test_expect_success 'setup' '
+ >tracked &&
+ >modified &&
+ >delete &&
+ >rename &&
+ mkdir dir1 &&
+ >dir1/tracked &&
+ >dir1/modified &&
+ >dir1/delete &&
+ >dir1/rename &&
+ mkdir dir2 &&
+ >dir2/tracked &&
+ >dir2/modified &&
+ >dir2/delete &&
+ >dir2/rename &&
+ mkdir dirtorename &&
+ >dirtorename/a &&
+ >dirtorename/b &&
+
+ cat >.gitignore <<-\EOF &&
+ .gitignore
+ expect*
+ actual*
+ flush*
+ trace*
+ EOF
+
+ git -c core.fsmonitor=false add . &&
+ test_tick &&
+ git -c core.fsmonitor=false commit -m initial &&
+
+ git config core.fsmonitor true
+'
+
+# The test already explicitly stopped (or tried to stop) the daemon.
+# This is here in case something else fails first.
+#
+redundant_stop_daemon () {
+ test_might_fail git fsmonitor--daemon stop
+}
+
+test_expect_success 'update-index implicitly starts daemon' '
+ test_when_finished redundant_stop_daemon &&
+
+ test_must_fail git fsmonitor--daemon status &&
+
+ GIT_TRACE2_EVENT="$PWD/.git/trace_implicit_1" \
+ git update-index --fsmonitor &&
+
+ git fsmonitor--daemon status &&
+ test_might_fail git fsmonitor--daemon stop &&
+
+ # Confirm that the trace2 log contains a record of the
+ # daemon starting.
+ test_subcommand git fsmonitor--daemon start <.git/trace_implicit_1
+'
+
+test_expect_success 'status implicitly starts daemon' '
+ test_when_finished redundant_stop_daemon &&
+
+ test_must_fail git fsmonitor--daemon status &&
+
+ GIT_TRACE2_EVENT="$PWD/.git/trace_implicit_2" \
+ git status >actual &&
+
+ git fsmonitor--daemon status &&
+ test_might_fail git fsmonitor--daemon stop &&
+
+ # Confirm that the trace2 log contains a record of the
+ # daemon starting.
+ test_subcommand git fsmonitor--daemon start <.git/trace_implicit_2
+'
+
+edit_files () {
+ echo 1 >modified &&
+ echo 2 >dir1/modified &&
+ echo 3 >dir2/modified &&
+ >dir1/untracked
+}
+
+delete_files () {
+ rm -f delete &&
+ rm -f dir1/delete &&
+ rm -f dir2/delete
+}
+
+create_files () {
+ echo 1 >new &&
+ echo 2 >dir1/new &&
+ echo 3 >dir2/new
+}
+
+rename_files () {
+ mv rename renamed &&
+ mv dir1/rename dir1/renamed &&
+ mv dir2/rename dir2/renamed
+}
+
+file_to_directory () {
+ rm -f delete &&
+ mkdir delete &&
+ echo 1 >delete/new
+}
+
+directory_to_file () {
+ rm -rf dir1 &&
+ echo 1 >dir1
+}
+
+# The next few test cases confirm that our fsmonitor daemon sees each type
+# of OS filesystem notification that we care about. At this layer we just
+# ensure we are getting the OS notifications and do not try to confirm what
+# is reported by `git status`.
+#
+# We run a simple query after modifying the filesystem just to introduce
+# a bit of a delay so that the trace logging from the daemon has time to
+# get flushed to disk.
+#
+# We `reset` and `clean` at the bottom of each test (and before stopping the
+# daemon) because these commands might implicitly restart the daemon.
+
+clean_up_repo_and_stop_daemon () {
+ git reset --hard HEAD &&
+ git clean -fd &&
+ test_might_fail git fsmonitor--daemon stop &&
+ rm -f .git/trace
+}
+
+test_expect_success 'edit some files' '
+ test_when_finished clean_up_repo_and_stop_daemon &&
+
+ start_daemon --tf "$PWD/.git/trace" &&
+
+ edit_files &&
+
+ test-tool fsmonitor-client query --token 0 &&
+
+ grep "^event: dir1/modified$" .git/trace &&
+ grep "^event: dir2/modified$" .git/trace &&
+ grep "^event: modified$" .git/trace &&
+ grep "^event: dir1/untracked$" .git/trace
+'
+
+test_expect_success 'create some files' '
+ test_when_finished clean_up_repo_and_stop_daemon &&
+
+ start_daemon --tf "$PWD/.git/trace" &&
+
+ create_files &&
+
+ test-tool fsmonitor-client query --token 0 &&
+
+ grep "^event: dir1/new$" .git/trace &&
+ grep "^event: dir2/new$" .git/trace &&
+ grep "^event: new$" .git/trace
+'
+
+test_expect_success 'delete some files' '
+ test_when_finished clean_up_repo_and_stop_daemon &&
+
+ start_daemon --tf "$PWD/.git/trace" &&
+
+ delete_files &&
+
+ test-tool fsmonitor-client query --token 0 &&
+
+ grep "^event: dir1/delete$" .git/trace &&
+ grep "^event: dir2/delete$" .git/trace &&
+ grep "^event: delete$" .git/trace
+'
+
+test_expect_success 'rename some files' '
+ test_when_finished clean_up_repo_and_stop_daemon &&
+
+ start_daemon --tf "$PWD/.git/trace" &&
+
+ rename_files &&
+
+ test-tool fsmonitor-client query --token 0 &&
+
+ grep "^event: dir1/rename$" .git/trace &&
+ grep "^event: dir2/rename$" .git/trace &&
+ grep "^event: rename$" .git/trace &&
+ grep "^event: dir1/renamed$" .git/trace &&
+ grep "^event: dir2/renamed$" .git/trace &&
+ grep "^event: renamed$" .git/trace
+'
+
+test_expect_success 'rename directory' '
+ test_when_finished clean_up_repo_and_stop_daemon &&
+
+ start_daemon --tf "$PWD/.git/trace" &&
+
+ mv dirtorename dirrenamed &&
+
+ test-tool fsmonitor-client query --token 0 &&
+
+ grep "^event: dirtorename/*$" .git/trace &&
+ grep "^event: dirrenamed/*$" .git/trace
+'
+
+test_expect_success 'file changes to directory' '
+ test_when_finished clean_up_repo_and_stop_daemon &&
+
+ start_daemon --tf "$PWD/.git/trace" &&
+
+ file_to_directory &&
+
+ test-tool fsmonitor-client query --token 0 &&
+
+ grep "^event: delete$" .git/trace &&
+ grep "^event: delete/new$" .git/trace
+'
+
+test_expect_success 'directory changes to a file' '
+ test_when_finished clean_up_repo_and_stop_daemon &&
+
+ start_daemon --tf "$PWD/.git/trace" &&
+
+ directory_to_file &&
+
+ test-tool fsmonitor-client query --token 0 &&
+
+ grep "^event: dir1$" .git/trace
+'
+
+# The next few test cases exercise the token-resync code. When filesystem
+# drops events (because of filesystem velocity or because the daemon isn't
+# polling fast enough), we need to discard the cached data (relative to the
+# current token) and start collecting events under a new token.
+#
+# the 'test-tool fsmonitor-client flush' command can be used to send a
+# "flush" message to a running daemon and ask it to do a flush/resync.
+
+test_expect_success 'flush cached data' '
+ test_when_finished "stop_daemon_delete_repo test_flush" &&
+
+ git init test_flush &&
+
+ start_daemon -C test_flush --tf "$PWD/.git/trace_daemon" --tk true &&
+
+ # The daemon should have an initial token with no events in _0 and
+ # then a few (probably platform-specific number of) events in _1.
+ # These should both have the same <token_id>.
+
+ test-tool -C test_flush fsmonitor-client query --token "builtin:test_00000001:0" >actual_0 &&
+ nul_to_q <actual_0 >actual_q0 &&
+
+ >test_flush/file_1 &&
+ >test_flush/file_2 &&
+
+ test-tool -C test_flush fsmonitor-client query --token "builtin:test_00000001:0" >actual_1 &&
+ nul_to_q <actual_1 >actual_q1 &&
+
+ grep "file_1" actual_q1 &&
+
+ # Force a flush. This will change the <token_id>, reset the <seq_nr>, and
+ # flush the file data. Then create some events and ensure that the file
+ # again appears in the cache. It should have the new <token_id>.
+
+ test-tool -C test_flush fsmonitor-client flush >flush_0 &&
+ nul_to_q <flush_0 >flush_q0 &&
+ grep "^builtin:test_00000002:0Q/Q$" flush_q0 &&
+
+ test-tool -C test_flush fsmonitor-client query --token "builtin:test_00000002:0" >actual_2 &&
+ nul_to_q <actual_2 >actual_q2 &&
+
+ grep "^builtin:test_00000002:0Q$" actual_q2 &&
+
+ >test_flush/file_3 &&
+
+ test-tool -C test_flush fsmonitor-client query --token "builtin:test_00000002:0" >actual_3 &&
+ nul_to_q <actual_3 >actual_q3 &&
+
+ grep "file_3" actual_q3
+'
+
+# The next few test cases create repos where the .git directory is NOT
+# inside the one of the working directory. That is, where .git is a file
+# that points to a directory elsewhere. This happens for submodules and
+# non-primary worktrees.
+
+test_expect_success 'setup worktree base' '
+ git init wt-base &&
+ echo 1 >wt-base/file1 &&
+ git -C wt-base add file1 &&
+ git -C wt-base commit -m "c1"
+'
+
+test_expect_success 'worktree with .git file' '
+ git -C wt-base worktree add ../wt-secondary &&
+
+ start_daemon -C wt-secondary \
+ --tf "$PWD/trace_wt_secondary" \
+ --t2 "$PWD/trace2_wt_secondary" &&
+
+ git -C wt-secondary fsmonitor--daemon stop &&
+ test_must_fail git -C wt-secondary fsmonitor--daemon status
+'
+
+# NEEDSWORK: Repeat one of the "edit" tests on wt-secondary and
+# confirm that we get the same events and behavior -- that is, that
+# fsmonitor--daemon correctly watches BOTH the working directory and
+# the external GITDIR directory and behaves the same as when ".git"
+# is a directory inside the working directory.
+
+test_expect_success 'cleanup worktrees' '
+ stop_daemon_delete_repo wt-secondary &&
+ stop_daemon_delete_repo wt-base
+'
+
+# The next few tests perform arbitrary/contrived file operations and
+# confirm that status is correct. That is, that the data (or lack of
+# data) from fsmonitor doesn't cause incorrect results. And doesn't
+# cause incorrect results when the untracked-cache is enabled.
+
+test_lazy_prereq UNTRACKED_CACHE '
+ git update-index --test-untracked-cache
+'
+
+test_expect_success 'Matrix: setup for untracked-cache,fsmonitor matrix' '
+ test_unconfig core.fsmonitor &&
+ git update-index --no-fsmonitor &&
+ test_might_fail git fsmonitor--daemon stop
+'
+
+matrix_clean_up_repo () {
+ git reset --hard HEAD &&
+ git clean -fd
+}
+
+matrix_try () {
+ uc=$1 &&
+ fsm=$2 &&
+ fn=$3 &&
+
+ if test $uc = true && test $fsm = false
+ then
+ # The untracked-cache is buggy when FSMonitor is
+ # DISABLED, so skip the tests for this matrix
+ # combination.
+ #
+ # We've observed random, occasional test failures on
+ # Windows and MacOS when the UC is turned on and FSM
+ # is turned off. These are rare, but they do happen
+ # indicating that it is probably a race condition within
+ # the untracked cache itself.
+ #
+ # It usually happens when a test does F/D trickery and
+ # then the NEXT test fails because of extra status
+ # output from stale UC data from the previous test.
+ #
+ # Since FSMonitor is not involved in the error, skip
+ # the tests for this matrix combination.
+ #
+ return 0
+ fi &&
+
+ test_expect_success "Matrix[uc:$uc][fsm:$fsm] $fn" '
+ matrix_clean_up_repo &&
+ $fn &&
+ if test $uc = false && test $fsm = false
+ then
+ git status --porcelain=v1 >.git/expect.$fn
+ else
+ git status --porcelain=v1 >.git/actual.$fn &&
+ test_cmp .git/expect.$fn .git/actual.$fn
+ fi
+ '
+}
+
+uc_values="false"
+test_have_prereq UNTRACKED_CACHE && uc_values="false true"
+for uc_val in $uc_values
+do
+ if test $uc_val = false
+ then
+ test_expect_success "Matrix[uc:$uc_val] disable untracked cache" '
+ git config core.untrackedcache false &&
+ git update-index --no-untracked-cache
+ '
+ else
+ test_expect_success "Matrix[uc:$uc_val] enable untracked cache" '
+ git config core.untrackedcache true &&
+ git update-index --untracked-cache
+ '
+ fi
+
+ fsm_values="false true"
+ for fsm_val in $fsm_values
+ do
+ if test $fsm_val = false
+ then
+ test_expect_success "Matrix[uc:$uc_val][fsm:$fsm_val] disable fsmonitor" '
+ test_unconfig core.fsmonitor &&
+ git update-index --no-fsmonitor &&
+ test_might_fail git fsmonitor--daemon stop
+ '
+ else
+ test_expect_success "Matrix[uc:$uc_val][fsm:$fsm_val] enable fsmonitor" '
+ git config core.fsmonitor true &&
+ git fsmonitor--daemon start &&
+ git update-index --fsmonitor
+ '
+ fi
+
+ matrix_try $uc_val $fsm_val edit_files
+ matrix_try $uc_val $fsm_val delete_files
+ matrix_try $uc_val $fsm_val create_files
+ matrix_try $uc_val $fsm_val rename_files
+ matrix_try $uc_val $fsm_val file_to_directory
+ matrix_try $uc_val $fsm_val directory_to_file
+
+ if test $fsm_val = true
+ then
+ test_expect_success "Matrix[uc:$uc_val][fsm:$fsm_val] disable fsmonitor at end" '
+ test_unconfig core.fsmonitor &&
+ git update-index --no-fsmonitor &&
+ test_might_fail git fsmonitor--daemon stop
+ '
+ fi
+ done
+done
+
+test_done
diff --git a/t/t7700-repack.sh b/t/t7700-repack.sh
index e489869..ca45c4c 100755
--- a/t/t7700-repack.sh
+++ b/t/t7700-repack.sh
@@ -312,16 +312,13 @@ test_expect_success 'cleans up MIDX when appropriate' '
checksum=$(midx_checksum $objdir) &&
test_path_is_file $midx &&
test_path_is_file $midx-$checksum.bitmap &&
- test_path_is_file $midx-$checksum.rev &&
test_commit repack-3 &&
GIT_TEST_MULTI_PACK_INDEX=0 git repack -Adb --write-midx &&
test_path_is_file $midx &&
test_path_is_missing $midx-$checksum.bitmap &&
- test_path_is_missing $midx-$checksum.rev &&
test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
- test_path_is_file $midx-$(midx_checksum $objdir).rev &&
test_commit repack-4 &&
GIT_TEST_MULTI_PACK_INDEX=0 git repack -Adb &&
@@ -354,7 +351,6 @@ test_expect_success '--write-midx with preferred bitmap tips' '
test_line_count = 1 before &&
rm -fr $midx-$(midx_checksum $objdir).bitmap &&
- rm -fr $midx-$(midx_checksum $objdir).rev &&
rm -fr $midx &&
# instead of constructing the snapshot ourselves (c.f., the test
@@ -373,10 +369,61 @@ test_expect_success '--write-midx with preferred bitmap tips' '
)
'
+# The first argument is expected to be a filename
+# and that file should contain the name of a .idx
+# file. Send the list of objects in that .idx file
+# into stdout.
+get_sorted_objects_from_pack () {
+ git show-index <$(cat "$1") >raw &&
+ cut -d" " -f2 raw
+}
+
test_expect_success '--write-midx -b packs non-kept objects' '
- GIT_TRACE2_EVENT="$(pwd)/trace.txt" \
- git repack --write-midx -a -b &&
- test_subcommand_inexact git pack-objects --honor-pack-keep <trace.txt
+ git init repo &&
+ test_when_finished "rm -fr repo" &&
+ (
+ cd repo &&
+
+ # Create a kept pack-file
+ test_commit base &&
+ git repack -ad &&
+ find $objdir/pack -name "*.idx" >before &&
+ test_line_count = 1 before &&
+ before_name=$(cat before) &&
+ >${before_name%.idx}.keep &&
+
+ # Create a non-kept pack-file
+ test_commit other &&
+ git repack &&
+
+ # Create loose objects
+ test_commit loose &&
+
+ # Repack everything
+ git repack --write-midx -a -b -d &&
+
+ # There should be two pack-files now, the
+ # old, kept pack and the new, non-kept pack.
+ find $objdir/pack -name "*.idx" | sort >after &&
+ test_line_count = 2 after &&
+ find $objdir/pack -name "*.keep" >kept &&
+ kept_name=$(cat kept) &&
+ echo ${kept_name%.keep}.idx >kept-idx &&
+ test_cmp before kept-idx &&
+
+ # Get object list from the kept pack.
+ get_sorted_objects_from_pack before >old.objects &&
+
+ # Get object list from the one non-kept pack-file
+ comm -13 before after >new-pack &&
+ test_line_count = 1 new-pack &&
+ get_sorted_objects_from_pack new-pack >new.objects &&
+
+ # None of the objects in the new pack should
+ # exist within the kept pack.
+ comm -12 old.objects new.objects >shared.objects &&
+ test_must_be_empty shared.objects
+ )
'
test_expect_success TTY '--quiet disables progress' '
@@ -385,4 +432,54 @@ test_expect_success TTY '--quiet disables progress' '
test_must_be_empty stderr
'
+test_expect_success 'setup for update-server-info' '
+ git init update-server-info &&
+ test_commit -C update-server-info message
+'
+
+test_server_info_present () {
+ test_path_is_file update-server-info/.git/objects/info/packs &&
+ test_path_is_file update-server-info/.git/info/refs
+}
+
+test_server_info_missing () {
+ test_path_is_missing update-server-info/.git/objects/info/packs &&
+ test_path_is_missing update-server-info/.git/info/refs
+}
+
+test_server_info_cleanup () {
+ rm -f update-server-info/.git/objects/info/packs update-server-info/.git/info/refs &&
+ test_server_info_missing
+}
+
+test_expect_success 'updates server info by default' '
+ test_server_info_cleanup &&
+ git -C update-server-info repack &&
+ test_server_info_present
+'
+
+test_expect_success '-n skips updating server info' '
+ test_server_info_cleanup &&
+ git -C update-server-info repack -n &&
+ test_server_info_missing
+'
+
+test_expect_success 'repack.updateServerInfo=true updates server info' '
+ test_server_info_cleanup &&
+ git -C update-server-info -c repack.updateServerInfo=true repack &&
+ test_server_info_present
+'
+
+test_expect_success 'repack.updateServerInfo=false skips updating server info' '
+ test_server_info_cleanup &&
+ git -C update-server-info -c repack.updateServerInfo=false repack &&
+ test_server_info_missing
+'
+
+test_expect_success '-n overrides repack.updateServerInfo=true' '
+ test_server_info_cleanup &&
+ git -C update-server-info -c repack.updateServerInfo=true repack -n &&
+ test_server_info_missing
+'
+
test_done
diff --git a/t/t7810-grep.sh b/t/t7810-grep.sh
index 424c31c..6935601 100755
--- a/t/t7810-grep.sh
+++ b/t/t7810-grep.sh
@@ -98,6 +98,37 @@ test_expect_success 'grep should not segfault with a bad input' '
test_invalid_grep_expression --and -e A
+test_pattern_type () {
+ H=$1 &&
+ HC=$2 &&
+ L=$3 &&
+ type=$4 &&
+ shift 4 &&
+
+ expected_str= &&
+ case "$type" in
+ BRE)
+ expected_str="${HC}ab:a+bc"
+ ;;
+ ERE)
+ expected_str="${HC}ab:abc"
+ ;;
+ FIX)
+ expected_str="${HC}ab:a+b*c"
+ ;;
+ *)
+ BUG "unknown pattern type '$type'"
+ ;;
+ esac &&
+ config_str="$@" &&
+
+ test_expect_success "grep $L with '$config_str' interpreted as $type" '
+ echo $expected_str >expected &&
+ git $config_str grep "a+b*c" $H ab >actual &&
+ test_cmp expected actual
+ '
+}
+
for H in HEAD ''
do
case "$H" in
@@ -393,35 +424,13 @@ do
git grep --no-recursive -n -e vvv $H -- t . >actual &&
test_cmp expected actual
'
- test_expect_success "grep $L with grep.extendedRegexp=false" '
- echo "${HC}ab:a+bc" >expected &&
- git -c grep.extendedRegexp=false grep "a+b*c" $H ab >actual &&
- test_cmp expected actual
- '
- test_expect_success "grep $L with grep.extendedRegexp=true" '
- echo "${HC}ab:abc" >expected &&
- git -c grep.extendedRegexp=true grep "a+b*c" $H ab >actual &&
- test_cmp expected actual
- '
- test_expect_success "grep $L with grep.patterntype=basic" '
- echo "${HC}ab:a+bc" >expected &&
- git -c grep.patterntype=basic grep "a+b*c" $H ab >actual &&
- test_cmp expected actual
- '
-
- test_expect_success "grep $L with grep.patterntype=extended" '
- echo "${HC}ab:abc" >expected &&
- git -c grep.patterntype=extended grep "a+b*c" $H ab >actual &&
- test_cmp expected actual
- '
-
- test_expect_success "grep $L with grep.patterntype=fixed" '
- echo "${HC}ab:a+b*c" >expected &&
- git -c grep.patterntype=fixed grep "a+b*c" $H ab >actual &&
- test_cmp expected actual
- '
+ test_pattern_type "$H" "$HC" "$L" BRE -c grep.extendedRegexp=false
+ test_pattern_type "$H" "$HC" "$L" ERE -c grep.extendedRegexp=true
+ test_pattern_type "$H" "$HC" "$L" BRE -c grep.patternType=basic
+ test_pattern_type "$H" "$HC" "$L" ERE -c grep.patternType=extended
+ test_pattern_type "$H" "$HC" "$L" FIX -c grep.patternType=fixed
test_expect_success PCRE "grep $L with grep.patterntype=perl" '
echo "${HC}ab:a+b*c" >expected &&
@@ -433,59 +442,76 @@ do
test_must_fail git -c grep.patterntype=perl grep "foo.*bar"
'
- test_expect_success "grep $L with grep.patternType=default and grep.extendedRegexp=true" '
- echo "${HC}ab:abc" >expected &&
- git \
- -c grep.patternType=default \
- -c grep.extendedRegexp=true \
- grep "a+b*c" $H ab >actual &&
- test_cmp expected actual
- '
-
- test_expect_success "grep $L with grep.extendedRegexp=true and grep.patternType=default" '
- echo "${HC}ab:abc" >expected &&
- git \
- -c grep.extendedRegexp=true \
- -c grep.patternType=default \
- grep "a+b*c" $H ab >actual &&
- test_cmp expected actual
- '
-
- test_expect_success "grep $L with grep.patternType=extended and grep.extendedRegexp=false" '
- echo "${HC}ab:abc" >expected &&
- git \
- -c grep.patternType=extended \
- -c grep.extendedRegexp=false \
- grep "a+b*c" $H ab >actual &&
- test_cmp expected actual
- '
-
- test_expect_success "grep $L with grep.patternType=basic and grep.extendedRegexp=true" '
- echo "${HC}ab:a+bc" >expected &&
- git \
- -c grep.patternType=basic \
- -c grep.extendedRegexp=true \
- grep "a+b*c" $H ab >actual &&
- test_cmp expected actual
- '
-
- test_expect_success "grep $L with grep.extendedRegexp=false and grep.patternType=extended" '
- echo "${HC}ab:abc" >expected &&
- git \
- -c grep.extendedRegexp=false \
- -c grep.patternType=extended \
- grep "a+b*c" $H ab >actual &&
- test_cmp expected actual
- '
-
- test_expect_success "grep $L with grep.extendedRegexp=true and grep.patternType=basic" '
- echo "${HC}ab:a+bc" >expected &&
- git \
- -c grep.extendedRegexp=true \
- -c grep.patternType=basic \
- grep "a+b*c" $H ab >actual &&
- test_cmp expected actual
- '
+ test_pattern_type "$H" "$HC" "$L" ERE \
+ -c grep.patternType=default \
+ -c grep.extendedRegexp=true
+ test_pattern_type "$H" "$HC" "$L" ERE \
+ -c grep.extendedRegexp=true \
+ -c grep.patternType=default
+ test_pattern_type "$H" "$HC" "$L" ERE \
+ -c grep.patternType=extended \
+ -c grep.extendedRegexp=false
+ test_pattern_type "$H" "$HC" "$L" BRE \
+ -c grep.patternType=basic \
+ -c grep.extendedRegexp=true
+ test_pattern_type "$H" "$HC" "$L" ERE \
+ -c grep.extendedRegexp=false \
+ -c grep.patternType=extended
+ test_pattern_type "$H" "$HC" "$L" BRE \
+ -c grep.extendedRegexp=true \
+ -c grep.patternType=basic
+
+ # grep.extendedRegexp is last-one-wins
+ test_pattern_type "$H" "$HC" "$L" BRE \
+ -c grep.extendedRegexp=true \
+ -c grep.extendedRegexp=false
+
+ # grep.patternType=basic pays no attention to grep.extendedRegexp
+ test_pattern_type "$H" "$HC" "$L" BRE \
+ -c grep.extendedRegexp=true \
+ -c grep.patternType=basic \
+ -c grep.extendedRegexp=false
+
+ # grep.patternType=extended pays no attention to grep.extendedRegexp
+ test_pattern_type "$H" "$HC" "$L" ERE \
+ -c grep.extendedRegexp=true \
+ -c grep.patternType=extended \
+ -c grep.extendedRegexp=false
+
+ # grep.extendedRegexp is used with a last-one-wins grep.patternType=default
+ test_pattern_type "$H" "$HC" "$L" ERE \
+ -c grep.patternType=fixed \
+ -c grep.extendedRegexp=true \
+ -c grep.patternType=default
+
+ # grep.extendedRegexp is used with earlier grep.patternType=default
+ test_pattern_type "$H" "$HC" "$L" ERE \
+ -c grep.extendedRegexp=false \
+ -c grep.patternType=default \
+ -c grep.extendedRegexp=true
+
+ # grep.extendedRegexp is used with a last-one-loses grep.patternType=default
+ test_pattern_type "$H" "$HC" "$L" ERE \
+ -c grep.extendedRegexp=false \
+ -c grep.extendedRegexp=true \
+ -c grep.patternType=default
+
+ # grep.extendedRegexp and grep.patternType are both last-one-wins independently
+ test_pattern_type "$H" "$HC" "$L" BRE \
+ -c grep.patternType=default \
+ -c grep.extendedRegexp=true \
+ -c grep.patternType=basic
+
+ # grep.patternType=extended and grep.patternType=default
+ test_pattern_type "$H" "$HC" "$L" BRE \
+ -c grep.patternType=extended \
+ -c grep.patternType=default
+
+ # grep.patternType=[extended -> default -> fixed] (BRE)" '
+ test_pattern_type "$H" "$HC" "$L" FIX \
+ -c grep.patternType=extended \
+ -c grep.patternType=default \
+ -c grep.patternType=fixed
test_expect_success "grep --count $L" '
echo ${HC}ab:3 >expected &&
diff --git a/t/t7812-grep-icase-non-ascii.sh b/t/t7812-grep-icase-non-ascii.sh
index ca3f24f..9047d66 100755
--- a/t/t7812-grep-icase-non-ascii.sh
+++ b/t/t7812-grep-icase-non-ascii.sh
@@ -11,9 +11,19 @@ test_expect_success GETTEXT_LOCALE 'setup' '
export LC_ALL
'
-test_have_prereq GETTEXT_LOCALE &&
-test-tool regex "HALLÓ" "Halló" ICASE &&
-test_set_prereq REGEX_LOCALE
+test_expect_success GETTEXT_LOCALE 'setup REGEX_LOCALE prerequisite' '
+ # This "test-tool" invocation is identical...
+ if test-tool regex "HALLÓ" "Halló" ICASE
+ then
+ test_set_prereq REGEX_LOCALE
+ else
+
+ # ... to this one, but this way "test_must_fail" will
+ # tell a segfault or abort() from the regexec() test
+ # itself
+ test_must_fail test-tool regex "HALLÓ" "Halló" ICASE
+ fi
+'
test_expect_success REGEX_LOCALE 'grep literal string, no -F' '
git grep -i "TILRAUN: Halló Heimur!" &&
diff --git a/t/t7814-grep-recurse-submodules.sh b/t/t7814-grep-recurse-submodules.sh
index 058e5d0..a4476dc 100755
--- a/t/t7814-grep-recurse-submodules.sh
+++ b/t/t7814-grep-recurse-submodules.sh
@@ -544,4 +544,45 @@ test_expect_failure 'grep saves textconv cache in the appropriate repository' '
test_path_is_file "$sub_textconv_cache"
'
+test_expect_success 'grep partially-cloned submodule' '
+ # Set up clean superproject and submodule for partial cloning.
+ git init super &&
+ git init super/sub &&
+ (
+ cd super &&
+ test_commit --no-tag "Add file in superproject" \
+ super-file "Some content for super-file" &&
+ test_commit -C sub --no-tag "Add file in submodule" \
+ sub-file "Some content for sub-file" &&
+ git submodule add ./sub &&
+ git commit -m "Add other as submodule sub" &&
+ test_tick &&
+ test_commit -C sub --no-tag --append "Update file in submodule" \
+ sub-file "Some more content for sub-file" &&
+ git add sub &&
+ git commit -m "Update submodule" &&
+ test_tick &&
+ git config --local uploadpack.allowfilter 1 &&
+ git config --local uploadpack.allowanysha1inwant 1 &&
+ git -C sub config --local uploadpack.allowfilter 1 &&
+ git -C sub config --local uploadpack.allowanysha1inwant 1
+ ) &&
+ # Clone the superproject & submodule, then make sure we can lazy-fetch submodule objects.
+ git clone --filter=blob:none --also-filter-submodules \
+ --recurse-submodules "file://$(pwd)/super" partial &&
+ (
+ cd partial &&
+ cat >expect <<-\EOF &&
+ HEAD^:sub/sub-file:Some content for sub-file
+ HEAD^:super-file:Some content for super-file
+ EOF
+
+ GIT_TRACE2_EVENT="$(pwd)/trace2.log" git grep -e content \
+ --recurse-submodules HEAD^ >actual &&
+ test_cmp expect actual &&
+ # Verify that we actually fetched data from the promisor remote:
+ grep \"category\":\"promisor\",\"key\":\"fetch_count\",\"value\":\"1\" trace2.log
+ )
+'
+
test_done
diff --git a/t/t7817-grep-sparse-checkout.sh b/t/t7817-grep-sparse-checkout.sh
index 590b99b..eb59564 100755
--- a/t/t7817-grep-sparse-checkout.sh
+++ b/t/t7817-grep-sparse-checkout.sh
@@ -83,10 +83,13 @@ test_expect_success 'setup' '
# The test below covers a special case: the sparsity patterns exclude '/b' and
# sparse checkout is enabled, but the path exists in the working tree (e.g.
-# manually created after `git sparse-checkout init`). git grep should skip it.
+# manually created after `git sparse-checkout init`). Although b is marked
+# as SKIP_WORKTREE, git grep should notice it IS present in the worktree and
+# report it.
test_expect_success 'working tree grep honors sparse checkout' '
cat >expect <<-EOF &&
a:text
+ b:new-text
EOF
test_when_finished "rm -f b" &&
echo "new-text" >b &&
@@ -126,12 +129,16 @@ test_expect_success 'grep --cached searches entries with the SKIP_WORKTREE bit'
'
# Note that sub2/ is present in the worktree but it is excluded by the sparsity
-# patterns, so grep should not recurse into it.
+# patterns. We also explicitly mark it as SKIP_WORKTREE in case it got cleared
+# by previous git commands. Thus sub2 starts as SKIP_WORKTREE but since it is
+# present in the working tree, grep should recurse into it.
test_expect_success 'grep --recurse-submodules honors sparse checkout in submodule' '
cat >expect <<-EOF &&
a:text
sub/B/b:text
+ sub2/a:text
EOF
+ git update-index --skip-worktree sub2 &&
git grep --recurse-submodules "text" >actual &&
test_cmp expect actual
'
diff --git a/t/t8007-cat-file-textconv.sh b/t/t8007-cat-file-textconv.sh
index eacd49a..b067983 100755
--- a/t/t8007-cat-file-textconv.sh
+++ b/t/t8007-cat-file-textconv.sh
@@ -19,6 +19,48 @@ test_expect_success 'setup ' '
GIT_AUTHOR_NAME=Number2 git commit -a -m Second --date="2010-01-01 20:00:00"
'
+test_expect_success 'usage: <bad rev>' '
+ cat >expect <<-\EOF &&
+ fatal: Not a valid object name HEAD2
+ EOF
+ test_must_fail git cat-file --textconv HEAD2 2>actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'usage: <bad rev>:<bad path>' '
+ cat >expect <<-\EOF &&
+ fatal: invalid object name '\''HEAD2'\''.
+ EOF
+ test_must_fail git cat-file --textconv HEAD2:two.bin 2>actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'usage: <rev>:<bad path>' '
+ cat >expect <<-\EOF &&
+ fatal: path '\''two.bin'\'' does not exist in '\''HEAD'\''
+ EOF
+ test_must_fail git cat-file --textconv HEAD:two.bin 2>actual &&
+ test_cmp expect actual
+'
+
+
+test_expect_success 'usage: <rev> with no <path>' '
+ cat >expect <<-\EOF &&
+ fatal: <object>:<path> required, only <object> '\''HEAD'\'' given
+ EOF
+ test_must_fail git cat-file --textconv HEAD 2>actual &&
+ test_cmp expect actual
+'
+
+
+test_expect_success 'usage: <bad rev>:<good (in HEAD) path>' '
+ cat >expect <<-\EOF &&
+ fatal: invalid object name '\''HEAD2'\''.
+ EOF
+ test_must_fail git cat-file --textconv HEAD2:one.bin 2>actual &&
+ test_cmp expect actual
+'
+
cat >expected <<EOF
bin: test version 2
EOF
diff --git a/t/t9001-send-email.sh b/t/t9001-send-email.sh
index aa0c204..42694fe 100755
--- a/t/t9001-send-email.sh
+++ b/t/t9001-send-email.sh
@@ -539,7 +539,7 @@ test_expect_success $PREREQ "--validate respects relative core.hooksPath path" '
test_path_is_file my-hooks.ran &&
cat >expect <<-EOF &&
fatal: longline.patch: rejected by sendemail-validate hook
- fatal: command '"'"'my-hooks/sendemail-validate'"'"' died with exit code 1
+ fatal: command '"'"'git hook run --ignore-missing sendemail-validate -- <patch>'"'"' died with exit code 1
warning: no patches were sent
EOF
test_cmp expect actual
@@ -558,7 +558,7 @@ test_expect_success $PREREQ "--validate respects absolute core.hooksPath path" '
test_path_is_file my-hooks.ran &&
cat >expect <<-EOF &&
fatal: longline.patch: rejected by sendemail-validate hook
- fatal: command '"'"'$hooks_path/sendemail-validate'"'"' died with exit code 1
+ fatal: command '"'"'git hook run --ignore-missing sendemail-validate -- <patch>'"'"' died with exit code 1
warning: no patches were sent
EOF
test_cmp expect actual
@@ -2288,9 +2288,7 @@ test_expect_success $PREREQ 'cmdline in-reply-to used with --no-thread' '
'
test_expect_success $PREREQ 'invoke hook' '
- mkdir -p .git/hooks &&
-
- write_script .git/hooks/sendemail-validate <<-\EOF &&
+ test_hook sendemail-validate <<-\EOF &&
# test that we have the correct environment variable, pwd, and
# argument
case "$GIT_DIR" in
diff --git a/t/t9102-git-svn-deep-rmdir.sh b/t/t9102-git-svn-deep-rmdir.sh
index 7b2049c..946ef85 100755
--- a/t/t9102-git-svn-deep-rmdir.sh
+++ b/t/t9102-git-svn-deep-rmdir.sh
@@ -1,7 +1,6 @@
#!/bin/sh
test_description='git svn rmdir'
-TEST_PASSES_SANITIZE_LEAK=true
. ./lib-git-svn.sh
test_expect_success 'initialize repo' '
diff --git a/t/t9123-git-svn-rebuild-with-rewriteroot.sh b/t/t9123-git-svn-rebuild-with-rewriteroot.sh
index 3320b1f..ead4045 100755
--- a/t/t9123-git-svn-rebuild-with-rewriteroot.sh
+++ b/t/t9123-git-svn-rebuild-with-rewriteroot.sh
@@ -5,7 +5,6 @@
test_description='git svn respects rewriteRoot during rebuild'
-TEST_PASSES_SANITIZE_LEAK=true
. ./lib-git-svn.sh
mkdir import
diff --git a/t/t9128-git-svn-cmd-branch.sh b/t/t9128-git-svn-cmd-branch.sh
index 9871f5a..783e3ba 100755
--- a/t/t9128-git-svn-cmd-branch.sh
+++ b/t/t9128-git-svn-cmd-branch.sh
@@ -5,7 +5,6 @@
test_description='git svn partial-rebuild tests'
-TEST_PASSES_SANITIZE_LEAK=true
. ./lib-git-svn.sh
test_expect_success 'initialize svnrepo' '
diff --git a/t/t9167-git-svn-cmd-branch-subproject.sh b/t/t9167-git-svn-cmd-branch-subproject.sh
index d9fd111..d812843 100755
--- a/t/t9167-git-svn-cmd-branch-subproject.sh
+++ b/t/t9167-git-svn-cmd-branch-subproject.sh
@@ -5,7 +5,6 @@
test_description='git svn branch for subproject clones'
-TEST_PASSES_SANITIZE_LEAK=true
. ./lib-git-svn.sh
test_expect_success 'initialize svnrepo' '
diff --git a/t/t9502-gitweb-standalone-parse-output.sh b/t/t9502-gitweb-standalone-parse-output.sh
index 3167473..8cb582f 100755
--- a/t/t9502-gitweb-standalone-parse-output.sh
+++ b/t/t9502-gitweb-standalone-parse-output.sh
@@ -34,7 +34,7 @@ EOF
#
# This will check that gitweb HTTP header contains proposed filename
# as <basename> with '.tar' suffix added, and that generated tarfile
-# (gitweb message body) has <prefix> as prefix for al files in tarfile
+# (gitweb message body) has <prefix> as prefix for all files in tarfile
#
# <prefix> default to <basename>
check_snapshot () {
@@ -207,4 +207,17 @@ test_expect_success 'xss checks' '
xss "" "$TAG+"
'
+no_http_equiv_content_type() {
+ gitweb_run "$@" &&
+ ! grep -E "http-equiv=['\"]?content-type" gitweb.body
+}
+
+# See: <https://html.spec.whatwg.org/dev/semantics.html#attr-meta-http-equiv-content-type>
+test_expect_success 'no http-equiv="content-type" in XHTML' '
+ no_http_equiv_content_type &&
+ no_http_equiv_content_type "p=.git" &&
+ no_http_equiv_content_type "p=.git;a=log" &&
+ no_http_equiv_content_type "p=.git;a=tree"
+'
+
test_done
diff --git a/t/t9800-git-p4-basic.sh b/t/t9800-git-p4-basic.sh
index 806005a..8b30062 100755
--- a/t/t9800-git-p4-basic.sh
+++ b/t/t9800-git-p4-basic.sh
@@ -277,16 +277,21 @@ test_expect_success 'run hook p4-pre-submit before submit' '
git commit -m "add hello.txt" &&
git config git-p4.skipSubmitEdit true &&
git p4 submit --dry-run >out &&
- grep "Would apply" out &&
- mkdir -p .git/hooks &&
- write_script .git/hooks/p4-pre-submit <<-\EOF &&
- exit 0
- EOF
+ grep "Would apply" out
+ ) &&
+ test_hook -C "$git" p4-pre-submit <<-\EOF &&
+ exit 0
+ EOF
+ (
+ cd "$git" &&
git p4 submit --dry-run >out &&
- grep "Would apply" out &&
- write_script .git/hooks/p4-pre-submit <<-\EOF &&
- exit 1
- EOF
+ grep "Would apply" out
+ ) &&
+ test_hook -C "$git" --clobber p4-pre-submit <<-\EOF &&
+ exit 1
+ EOF
+ (
+ cd "$git" &&
test_must_fail git p4 submit --dry-run >errs 2>&1 &&
! grep "Would apply" errs
)
diff --git a/t/t9902-completion.sh b/t/t9902-completion.sh
index 98c6280..31526e6 100755
--- a/t/t9902-completion.sh
+++ b/t/t9902-completion.sh
@@ -1444,6 +1444,161 @@ test_expect_success 'git checkout - with --detach, complete only references' '
EOF
'
+test_expect_success 'setup sparse-checkout tests' '
+ # set up sparse-checkout repo
+ git init sparse-checkout &&
+ (
+ cd sparse-checkout &&
+ mkdir -p folder1/0/1 folder2/0 folder3 &&
+ touch folder1/0/1/t.txt &&
+ touch folder2/0/t.txt &&
+ touch folder3/t.txt &&
+ git add . &&
+ git commit -am "Initial commit"
+ )
+'
+
+test_expect_success 'sparse-checkout completes subcommands' '
+ test_completion "git sparse-checkout " <<-\EOF
+ list Z
+ init Z
+ set Z
+ add Z
+ reapply Z
+ disable Z
+ EOF
+'
+
+test_expect_success 'cone mode sparse-checkout completes directory names' '
+ # initialize sparse-checkout definitions
+ git -C sparse-checkout sparse-checkout set --cone folder1/0 folder3 &&
+
+ # test tab completion
+ (
+ cd sparse-checkout &&
+ test_completion "git sparse-checkout set f" <<-\EOF
+ folder1/
+ folder2/
+ folder3/
+ EOF
+ ) &&
+
+ (
+ cd sparse-checkout &&
+ test_completion "git sparse-checkout set folder1/" <<-\EOF
+ folder1/0/
+ EOF
+ ) &&
+
+ (
+ cd sparse-checkout &&
+ test_completion "git sparse-checkout set folder1/0/" <<-\EOF
+ folder1/0/1/
+ EOF
+ ) &&
+
+ (
+ cd sparse-checkout/folder1 &&
+ test_completion "git sparse-checkout add 0" <<-\EOF
+ 0/
+ EOF
+ )
+'
+
+test_expect_success 'cone mode sparse-checkout completes directory names with spaces and accents' '
+ # reset sparse-checkout
+ git -C sparse-checkout sparse-checkout disable &&
+ (
+ cd sparse-checkout &&
+ mkdir "directory with spaces" &&
+ mkdir "directory-with-áccent" &&
+ >"directory with spaces/randomfile" &&
+ >"directory-with-áccent/randomfile" &&
+ git add . &&
+ git commit -m "Add directory with spaces and directory with accent" &&
+ git sparse-checkout set --cone "directory with spaces" \
+ "directory-with-áccent" &&
+ test_completion "git sparse-checkout add dir" <<-\EOF &&
+ directory with spaces/
+ directory-with-áccent/
+ EOF
+ rm -rf "directory with spaces" &&
+ rm -rf "directory-with-áccent" &&
+ git add . &&
+ git commit -m "Remove directory with spaces and directory with accent"
+ )
+'
+
+# use FUNNYNAMES to avoid running on Windows, which doesn't permit tabs in paths
+test_expect_success FUNNYNAMES 'cone mode sparse-checkout completes directory names with tabs' '
+ # reset sparse-checkout
+ git -C sparse-checkout sparse-checkout disable &&
+ (
+ cd sparse-checkout &&
+ mkdir "$(printf "directory\twith\ttabs")" &&
+ >"$(printf "directory\twith\ttabs")/randomfile" &&
+ git add . &&
+ git commit -m "Add directory with tabs" &&
+ git sparse-checkout set --cone \
+ "$(printf "directory\twith\ttabs")" &&
+ test_completion "git sparse-checkout add dir" <<-\EOF &&
+ directory with tabs/
+ EOF
+ rm -rf "$(printf "directory\twith\ttabs")" &&
+ git add . &&
+ git commit -m "Remove directory with tabs"
+ )
+'
+
+# use FUNNYNAMES to avoid running on Windows, and !CYGWIN for Cygwin, as neither permit backslashes in paths
+test_expect_success FUNNYNAMES,!CYGWIN 'cone mode sparse-checkout completes directory names with backslashes' '
+ # reset sparse-checkout
+ git -C sparse-checkout sparse-checkout disable &&
+ (
+ cd sparse-checkout &&
+ mkdir "directory\with\backslashes" &&
+ >"directory\with\backslashes/randomfile" &&
+ git add . &&
+ git commit -m "Add directory with backslashes" &&
+ git sparse-checkout set --cone \
+ "directory\with\backslashes" &&
+ test_completion "git sparse-checkout add dir" <<-\EOF &&
+ directory\with\backslashes/
+ EOF
+ rm -rf "directory\with\backslashes" &&
+ git add . &&
+ git commit -m "Remove directory with backslashes"
+ )
+'
+
+test_expect_success 'non-cone mode sparse-checkout uses bash completion' '
+ # reset sparse-checkout repo to non-cone mode
+ git -C sparse-checkout sparse-checkout disable &&
+ git -C sparse-checkout sparse-checkout set --no-cone &&
+
+ (
+ cd sparse-checkout &&
+ # expected to be empty since we have not configured
+ # custom completion for non-cone mode
+ test_completion "git sparse-checkout set f" <<-\EOF
+
+ EOF
+ )
+'
+
+test_expect_success 'git sparse-checkout set --cone completes directory names' '
+ git -C sparse-checkout sparse-checkout disable &&
+
+ (
+ cd sparse-checkout &&
+ test_completion "git sparse-checkout set --cone f" <<-\EOF
+ folder1/
+ folder2/
+ folder3/
+ EOF
+ )
+'
+
test_expect_success 'git switch - with -d, complete all references' '
test_completion "git switch -d " <<-\EOF
HEAD Z
@@ -2396,27 +2551,33 @@ test_expect_success 'options with value' '
'
test_expect_success 'sourcing the completion script clears cached commands' '
- __git_compute_all_commands &&
- verbose test -n "$__git_all_commands" &&
- . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
- verbose test -z "$__git_all_commands"
+ (
+ __git_compute_all_commands &&
+ verbose test -n "$__git_all_commands" &&
+ . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ verbose test -z "$__git_all_commands"
+ )
'
test_expect_success 'sourcing the completion script clears cached merge strategies' '
- __git_compute_merge_strategies &&
- verbose test -n "$__git_merge_strategies" &&
- . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
- verbose test -z "$__git_merge_strategies"
+ (
+ __git_compute_merge_strategies &&
+ verbose test -n "$__git_merge_strategies" &&
+ . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ verbose test -z "$__git_merge_strategies"
+ )
'
test_expect_success 'sourcing the completion script clears cached --options' '
- __gitcomp_builtin checkout &&
- verbose test -n "$__gitcomp_builtin_checkout" &&
- __gitcomp_builtin notes_edit &&
- verbose test -n "$__gitcomp_builtin_notes_edit" &&
- . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
- verbose test -z "$__gitcomp_builtin_checkout" &&
- verbose test -z "$__gitcomp_builtin_notes_edit"
+ (
+ __gitcomp_builtin checkout &&
+ verbose test -n "$__gitcomp_builtin_checkout" &&
+ __gitcomp_builtin notes_edit &&
+ verbose test -n "$__gitcomp_builtin_notes_edit" &&
+ . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ verbose test -z "$__gitcomp_builtin_checkout" &&
+ verbose test -z "$__gitcomp_builtin_notes_edit"
+ )
'
test_expect_success 'option aliases are not shown by default' '
@@ -2424,12 +2585,45 @@ test_expect_success 'option aliases are not shown by default' '
'
test_expect_success 'option aliases are shown with GIT_COMPLETION_SHOW_ALL' '
- . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
- GIT_COMPLETION_SHOW_ALL=1 && export GIT_COMPLETION_SHOW_ALL &&
- test_completion "git clone --recurs" <<-\EOF
- --recurse-submodules Z
- --recursive Z
- EOF
+ (
+ . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ GIT_COMPLETION_SHOW_ALL=1 && export GIT_COMPLETION_SHOW_ALL &&
+ test_completion "git clone --recurs" <<-\EOF
+ --recurse-submodules Z
+ --recursive Z
+ EOF
+ )
+'
+
+test_expect_success 'plumbing commands are excluded without GIT_COMPLETION_SHOW_ALL_COMMANDS' '
+ (
+ . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ sane_unset GIT_TESTING_PORCELAIN_COMMAND_LIST &&
+
+ # Just mainporcelain, not plumbing commands
+ run_completion "git c" &&
+ grep checkout out &&
+ ! grep cat-file out
+ )
+'
+
+test_expect_success 'all commands are shown with GIT_COMPLETION_SHOW_ALL_COMMANDS (also main non-builtin)' '
+ (
+ . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ GIT_COMPLETION_SHOW_ALL_COMMANDS=1 &&
+ export GIT_COMPLETION_SHOW_ALL_COMMANDS &&
+ sane_unset GIT_TESTING_PORCELAIN_COMMAND_LIST &&
+
+ # Both mainporcelain and plumbing commands
+ run_completion "git c" &&
+ grep checkout out &&
+ grep cat-file out &&
+
+ # Check "gitk", a "main" command, but not a built-in + more plumbing
+ run_completion "git g" &&
+ grep gitk out &&
+ grep get-tar-commit-id out
+ )
'
test_expect_success '__git_complete' '
diff --git a/t/test-lib-functions.sh b/t/test-lib-functions.sh
index c3d38aa..93c0338 100644
--- a/t/test-lib-functions.sh
+++ b/t/test-lib-functions.sh
@@ -329,7 +329,7 @@ test_commit () {
else
$echo "${3-$1}" >"$indir$file"
fi &&
- git ${indir:+ -C "$indir"} add "$file" &&
+ git ${indir:+ -C "$indir"} add -- "$file" &&
if test -z "$notick"
then
test_tick
@@ -551,6 +551,82 @@ write_script () {
chmod +x "$1"
}
+# Usage: test_hook [options] <hook-name> <<-\EOF
+#
+# -C <dir>:
+# Run all git commands in directory <dir>
+# --setup
+# Setup a hook for subsequent tests, i.e. don't remove it in a
+# "test_when_finished"
+# --clobber
+# Overwrite an existing <hook-name>, if it exists. Implies
+# --setup (i.e. the "test_when_finished" is assumed to have been
+# set up already).
+# --disable
+# Disable (chmod -x) an existing <hook-name>, which must exist.
+# --remove
+# Remove (rm -f) an existing <hook-name>, which must exist.
+test_hook () {
+ setup= &&
+ clobber= &&
+ disable= &&
+ remove= &&
+ indir= &&
+ while test $# != 0
+ do
+ case "$1" in
+ -C)
+ indir="$2" &&
+ shift
+ ;;
+ --setup)
+ setup=t
+ ;;
+ --clobber)
+ clobber=t
+ ;;
+ --disable)
+ disable=t
+ ;;
+ --remove)
+ remove=t
+ ;;
+ -*)
+ BUG "invalid argument: $1"
+ ;;
+ *)
+ break
+ ;;
+ esac &&
+ shift
+ done &&
+
+ git_dir=$(git -C "$indir" rev-parse --absolute-git-dir) &&
+ hook_dir="$git_dir/hooks" &&
+ hook_file="$hook_dir/$1" &&
+ if test -n "$disable$remove"
+ then
+ test_path_is_file "$hook_file" &&
+ if test -n "$disable"
+ then
+ chmod -x "$hook_file"
+ elif test -n "$remove"
+ then
+ rm -f "$hook_file"
+ fi &&
+ return 0
+ fi &&
+ if test -z "$clobber"
+ then
+ test_path_is_missing "$hook_file"
+ fi &&
+ if test -z "$setup$clobber"
+ then
+ test_when_finished "rm \"$hook_file\""
+ fi &&
+ write_script "$hook_file"
+}
+
# Use test_set_prereq to tell that a particular prerequisite is available.
# The prerequisite can later be checked for in two ways:
#
@@ -856,6 +932,16 @@ test_path_is_file () {
fi
}
+test_path_is_file_not_symlink () {
+ test "$#" -ne 1 && BUG "1 param"
+ test_path_is_file "$1" &&
+ if test -h "$1"
+ then
+ echo "$1 shouldn't be a symbolic link"
+ false
+ fi
+}
+
test_path_is_dir () {
test "$#" -ne 1 && BUG "1 param"
if ! test -d "$1"
@@ -865,6 +951,16 @@ test_path_is_dir () {
fi
}
+test_path_is_dir_not_symlink () {
+ test "$#" -ne 1 && BUG "1 param"
+ test_path_is_dir "$1" &&
+ if test -h "$1"
+ then
+ echo "$1 shouldn't be a symbolic link"
+ false
+ fi
+}
+
test_path_exists () {
test "$#" -ne 1 && BUG "1 param"
if ! test -e "$1"
@@ -874,6 +970,15 @@ test_path_exists () {
fi
}
+test_path_is_symlink () {
+ test "$#" -ne 1 && BUG "1 param"
+ if ! test -h "$1"
+ then
+ echo "Symbolic link $1 doesn't exist"
+ false
+ fi
+}
+
# Check if the directory exists and is empty as expected, barf otherwise.
test_dir_is_empty () {
test "$#" -ne 1 && BUG "1 param"
@@ -1760,40 +1865,6 @@ test_subcommand () {
}
# Check that the given command was invoked as part of the
-# trace2-format trace on stdin, but without an exact set of
-# arguments.
-#
-# test_subcommand [!] <command> <args>... < <trace>
-#
-# For example, to look for an invocation of "git pack-objects"
-# with the "--honor-pack-keep" argument, use
-#
-# GIT_TRACE2_EVENT=event.log git repack ... &&
-# test_subcommand git pack-objects --honor-pack-keep <event.log
-#
-# If the first parameter passed is !, this instead checks that
-# the given command was not called.
-#
-test_subcommand_inexact () {
- local negate=
- if test "$1" = "!"
- then
- negate=t
- shift
- fi
-
- local expr=$(printf '"%s".*' "$@")
- expr="${expr%,}"
-
- if test -n "$negate"
- then
- ! grep "\"event\":\"child_start\".*\[$expr\]"
- else
- grep "\"event\":\"child_start\".*\[$expr\]"
- fi
-}
-
-# Check that the given command was invoked as part of the
# trace2-format trace on stdin.
#
# test_region [!] <category> <label> git <command> <args>...
@@ -1840,3 +1911,36 @@ test_region () {
test_readlink () {
perl -le 'print readlink($_) for @ARGV' "$@"
}
+
+# Set mtime to a fixed "magic" timestamp in mid February 2009, before we
+# run an operation that may or may not touch the file. If the file was
+# touched, its timestamp will not accidentally have such an old timestamp,
+# as long as your filesystem clock is reasonably correct. To verify the
+# timestamp, follow up with test_is_magic_mtime.
+#
+# An optional increment to the magic timestamp may be specified as second
+# argument.
+test_set_magic_mtime () {
+ local inc=${2:-0} &&
+ local mtime=$((1234567890 + $inc)) &&
+ test-tool chmtime =$mtime "$1" &&
+ test_is_magic_mtime "$1" $inc
+}
+
+# Test whether the given file has the "magic" mtime set. This is meant to
+# be used in combination with test_set_magic_mtime.
+#
+# An optional increment to the magic timestamp may be specified as second
+# argument. Usually, this should be the same increment which was used for
+# the associated test_set_magic_mtime.
+test_is_magic_mtime () {
+ local inc=${2:-0} &&
+ local mtime=$((1234567890 + $inc)) &&
+ echo $mtime >.git/test-mtime-expect &&
+ test-tool chmtime --get "$1" >.git/test-mtime-actual &&
+ test_cmp .git/test-mtime-expect .git/test-mtime-actual
+ local ret=$?
+ rm -f .git/test-mtime-expect
+ rm -f .git/test-mtime-actual
+ return $ret
+}
diff --git a/t/test-lib.sh b/t/test-lib.sh
index 0f7a137..531cef0 100644
--- a/t/test-lib.sh
+++ b/t/test-lib.sh
@@ -19,13 +19,20 @@
# t/ subdirectory and are run in 'trash directory' subdirectory.
if test -z "$TEST_DIRECTORY"
then
- # We allow tests to override this, in case they want to run tests
- # outside of t/, e.g. for running tests on the test library
- # itself.
- TEST_DIRECTORY=$(pwd)
-else
# ensure that TEST_DIRECTORY is an absolute path so that it
# is valid even if the current working directory is changed
+ TEST_DIRECTORY=$(pwd)
+else
+ # The TEST_DIRECTORY will always be the path to the "t"
+ # directory in the git.git checkout. This is overridden by
+ # e.g. t/lib-subtest.sh, but only because its $(pwd) is
+ # different. Those tests still set "$TEST_DIRECTORY" to the
+ # same path.
+ #
+ # See use of "$GIT_BUILD_DIR" and "$TEST_DIRECTORY" below for
+ # hard assumptions about "$GIT_BUILD_DIR/t" existing and being
+ # the "$TEST_DIRECTORY", and e.g. "$TEST_DIRECTORY/helper"
+ # needing to exist.
TEST_DIRECTORY=$(cd "$TEST_DIRECTORY" && pwd) || exit 1
fi
if test -z "$TEST_OUTPUT_DIRECTORY"
@@ -34,19 +41,42 @@ then
# elsewhere
TEST_OUTPUT_DIRECTORY=$TEST_DIRECTORY
fi
-GIT_BUILD_DIR="$TEST_DIRECTORY"/..
+GIT_BUILD_DIR="${TEST_DIRECTORY%/t}"
+if test "$TEST_DIRECTORY" = "$GIT_BUILD_DIR"
+then
+ echo "PANIC: Running in a $TEST_DIRECTORY that doesn't end in '/t'?" >&2
+ exit 1
+fi
+
+# Prepend a string to a VAR using an arbitrary ":" delimiter, not
+# adding the delimiter if VAR or VALUE is empty. I.e. a generalized:
+#
+# VAR=$1${VAR:+${1:+$2}$VAR}
+#
+# Usage (using ":" as the $2 delimiter):
+#
+# prepend_var VAR : VALUE
+prepend_var () {
+ eval "$1=$3\${$1:+${3:+$2}\$$1}"
+}
+
+# If [AL]SAN is in effect we want to abort so that we notice
+# problems. The GIT_SAN_OPTIONS variable can be used to set common
+# defaults shared between [AL]SAN_OPTIONS.
+prepend_var GIT_SAN_OPTIONS : abort_on_error=1
+prepend_var GIT_SAN_OPTIONS : strip_path_prefix=\"$GIT_BUILD_DIR/\"
# If we were built with ASAN, it may complain about leaks
# of program-lifetime variables. Disable it by default to lower
# the noise level. This needs to happen at the start of the script,
# before we even do our "did we build git yet" check (since we don't
# want that one to complain to stderr).
-: ${ASAN_OPTIONS=detect_leaks=0:abort_on_error=1}
+prepend_var ASAN_OPTIONS : $GIT_SAN_OPTIONS
+prepend_var ASAN_OPTIONS : detect_leaks=0
export ASAN_OPTIONS
-# If LSAN is in effect we _do_ want leak checking, but we still
-# want to abort so that we notice the problems.
-: ${LSAN_OPTIONS=abort_on_error=1}
+prepend_var LSAN_OPTIONS : $GIT_SAN_OPTIONS
+prepend_var LSAN_OPTIONS : fast_unwind_on_malloc=0
export LSAN_OPTIONS
if test ! -f "$GIT_BUILD_DIR"/GIT-BUILD-OPTIONS
@@ -449,6 +479,8 @@ unset VISUAL EMAIL LANGUAGE $("$PERL_PATH" -e '
unset XDG_CACHE_HOME
unset XDG_CONFIG_HOME
unset GITPERLLIB
+unset GIT_TRACE2_PARENT_NAME
+unset GIT_TRACE2_PARENT_SID
TEST_AUTHOR_LOCALNAME=author
TEST_AUTHOR_DOMAIN=example.com
GIT_AUTHOR_EMAIL=${TEST_AUTHOR_LOCALNAME}@${TEST_AUTHOR_DOMAIN}
@@ -516,11 +548,29 @@ then
}
else
setup_malloc_check () {
+ local g
+ local t
MALLOC_CHECK_=3 MALLOC_PERTURB_=165
export MALLOC_CHECK_ MALLOC_PERTURB_
+ if _GLIBC_VERSION=$(getconf GNU_LIBC_VERSION 2>/dev/null) &&
+ _GLIBC_VERSION=${_GLIBC_VERSION#"glibc "} &&
+ expr 2.34 \<= "$_GLIBC_VERSION" >/dev/null
+ then
+ g=
+ LD_PRELOAD="libc_malloc_debug.so.0"
+ for t in \
+ glibc.malloc.check=1 \
+ glibc.malloc.perturb=165
+ do
+ g="${g#:}:$t"
+ done
+ GLIBC_TUNABLES=$g
+ export LD_PRELOAD GLIBC_TUNABLES
+ fi
}
teardown_malloc_check () {
unset MALLOC_CHECK_ MALLOC_PERTURB_
+ unset LD_PRELOAD GLIBC_TUNABLES
}
fi
@@ -756,7 +806,11 @@ test_failure_ () {
say_color error "not ok $test_count - $1"
shift
printf '%s\n' "$*" | sed -e 's/^/# /'
- test "$immediate" = "" || _error_exit
+ if test -n "$immediate"
+ then
+ say_color error "1..$test_count"
+ _error_exit
+ fi
}
test_known_broken_ok_ () {
@@ -1797,3 +1851,10 @@ test_lazy_prereq SHA1 '
# Tests that verify the scheduler integration must set this locally
# to avoid errors.
GIT_TEST_MAINT_SCHEDULER="none:exit 1"
+
+# Does this platform support `git fsmonitor--daemon`
+#
+test_lazy_prereq FSMONITOR_DAEMON '
+ git version --build-options >output &&
+ grep "feature: fsmonitor--daemon" output
+'
diff --git a/templates/Makefile b/templates/Makefile
index d22a71a..367ad00 100644
--- a/templates/Makefile
+++ b/templates/Makefile
@@ -1,9 +1,7 @@
-# make and install sample templates
-
-ifndef V
- QUIET = @
-endif
+# Import tree-wide shared Makefile behavior and libraries
+include ../shared.mak
+# make and install sample templates
INSTALL ?= install
TAR ?= tar
RM ?= rm -f
diff --git a/tmp-objdir.c b/tmp-objdir.c
index 3d38eea..adf6033 100644
--- a/tmp-objdir.c
+++ b/tmp-objdir.c
@@ -79,6 +79,11 @@ static void remove_tmp_objdir_on_signal(int signo)
raise(signo);
}
+void tmp_objdir_discard_objects(struct tmp_objdir *t)
+{
+ remove_dir_recursively(&t->path, REMOVE_DIR_KEEP_TOPLEVEL);
+}
+
/*
* These env_* functions are for setting up the child environment; the
* "replace" variant overrides the value of any existing variable with that
diff --git a/tmp-objdir.h b/tmp-objdir.h
index cda5ec7..76efc7e 100644
--- a/tmp-objdir.h
+++ b/tmp-objdir.h
@@ -47,6 +47,12 @@ int tmp_objdir_migrate(struct tmp_objdir *);
int tmp_objdir_destroy(struct tmp_objdir *);
/*
+ * Remove all objects from the temporary object directory, while leaving it
+ * around so more objects can be added.
+ */
+void tmp_objdir_discard_objects(struct tmp_objdir *);
+
+/*
* Add the temporary object directory as an alternate object store in the
* current process.
*/
diff --git a/trace.c b/trace.c
index f726686..794a087 100644
--- a/trace.c
+++ b/trace.c
@@ -108,16 +108,11 @@ static int prepare_trace_line(const char *file, int line,
gettimeofday(&tv, NULL);
secs = tv.tv_sec;
localtime_r(&secs, &tm);
- strbuf_addf(buf, "%02d:%02d:%02d.%06ld ", tm.tm_hour, tm.tm_min,
- tm.tm_sec, (long) tv.tv_usec);
-
-#ifdef HAVE_VARIADIC_MACROS
- /* print file:line */
- strbuf_addf(buf, "%s:%d ", file, line);
+ strbuf_addf(buf, "%02d:%02d:%02d.%06ld %s:%d", tm.tm_hour, tm.tm_min,
+ tm.tm_sec, (long) tv.tv_usec, file, line);
/* align trace output (column 40 catches most files names in git) */
while (buf->len < 40)
strbuf_addch(buf, ' ');
-#endif
return 1;
}
@@ -229,74 +224,6 @@ static void trace_performance_vprintf_fl(const char *file, int line,
strbuf_release(&buf);
}
-#ifndef HAVE_VARIADIC_MACROS
-
-void trace_printf(const char *format, ...)
-{
- va_list ap;
- va_start(ap, format);
- trace_vprintf_fl(NULL, 0, &trace_default_key, format, ap);
- va_end(ap);
-}
-
-void trace_printf_key(struct trace_key *key, const char *format, ...)
-{
- va_list ap;
- va_start(ap, format);
- trace_vprintf_fl(NULL, 0, key, format, ap);
- va_end(ap);
-}
-
-void trace_argv_printf(const char **argv, const char *format, ...)
-{
- va_list ap;
- va_start(ap, format);
- trace_argv_vprintf_fl(NULL, 0, argv, format, ap);
- va_end(ap);
-}
-
-void trace_strbuf(struct trace_key *key, const struct strbuf *data)
-{
- trace_strbuf_fl(NULL, 0, key, data);
-}
-
-void trace_performance(uint64_t nanos, const char *format, ...)
-{
- va_list ap;
- va_start(ap, format);
- trace_performance_vprintf_fl(NULL, 0, nanos, format, ap);
- va_end(ap);
-}
-
-void trace_performance_since(uint64_t start, const char *format, ...)
-{
- va_list ap;
- va_start(ap, format);
- trace_performance_vprintf_fl(NULL, 0, getnanotime() - start,
- format, ap);
- va_end(ap);
-}
-
-void trace_performance_leave(const char *format, ...)
-{
- va_list ap;
- uint64_t since;
-
- if (perf_indent)
- perf_indent--;
-
- if (!format) /* Allow callers to leave without tracing anything */
- return;
-
- since = perf_start_times[perf_indent];
- va_start(ap, format);
- trace_performance_vprintf_fl(NULL, 0, getnanotime() - since,
- format, ap);
- va_end(ap);
-}
-
-#else
-
void trace_printf_key_fl(const char *file, int line, struct trace_key *key,
const char *format, ...)
{
@@ -342,9 +269,6 @@ void trace_performance_leave_fl(const char *file, int line,
va_end(ap);
}
-#endif /* HAVE_VARIADIC_MACROS */
-
-
static const char *quote_crnl(const char *path)
{
static struct strbuf new_path = STRBUF_INIT;
diff --git a/trace.h b/trace.h
index e259840..4e771f8 100644
--- a/trace.h
+++ b/trace.h
@@ -126,71 +126,6 @@ void trace_command_performance(const char **argv);
void trace_verbatim(struct trace_key *key, const void *buf, unsigned len);
uint64_t trace_performance_enter(void);
-#ifndef HAVE_VARIADIC_MACROS
-
-/**
- * Prints a formatted message, similar to printf.
- */
-__attribute__((format (printf, 1, 2)))
-void trace_printf(const char *format, ...);
-
-__attribute__((format (printf, 2, 3)))
-void trace_printf_key(struct trace_key *key, const char *format, ...);
-
-/**
- * Prints a formatted message, followed by a quoted list of arguments.
- */
-__attribute__((format (printf, 2, 3)))
-void trace_argv_printf(const char **argv, const char *format, ...);
-
-/**
- * Prints the strbuf, without additional formatting (i.e. doesn't
- * choke on `%` or even `\0`).
- */
-void trace_strbuf(struct trace_key *key, const struct strbuf *data);
-
-/**
- * Prints elapsed time (in nanoseconds) if GIT_TRACE_PERFORMANCE is enabled.
- *
- * Example:
- * ------------
- * uint64_t t = 0;
- * for (;;) {
- * // ignore
- * t -= getnanotime();
- * // code section to measure
- * t += getnanotime();
- * // ignore
- * }
- * trace_performance(t, "frotz");
- * ------------
- */
-__attribute__((format (printf, 2, 3)))
-void trace_performance(uint64_t nanos, const char *format, ...);
-
-/**
- * Prints elapsed time since 'start' if GIT_TRACE_PERFORMANCE is enabled.
- *
- * Example:
- * ------------
- * uint64_t start = getnanotime();
- * // code section to measure
- * trace_performance_since(start, "foobar");
- * ------------
- */
-__attribute__((format (printf, 2, 3)))
-void trace_performance_since(uint64_t start, const char *format, ...);
-
-__attribute__((format (printf, 1, 2)))
-void trace_performance_leave(const char *format, ...);
-
-#else
-
-/*
- * Macros to add file:line - see above for C-style declarations of how these
- * should be used.
- */
-
/*
* TRACE_CONTEXT may be set to __FUNCTION__ if the compiler supports it. The
* default is __FILE__, as it is consistent with assert(), and static function
@@ -204,7 +139,10 @@ void trace_performance_leave(const char *format, ...);
# define TRACE_CONTEXT __FILE__
#endif
-/*
+/**
+ * Macros to add the file:line of the calling code, instead of that of
+ * the trace function itself.
+ *
* Note: with C99 variadic macros, __VA_ARGS__ must include the last fixed
* parameter ('format' in this case). Otherwise, a call without variable
* arguments will have a surplus ','. E.g.:
@@ -220,6 +158,16 @@ void trace_performance_leave(const char *format, ...);
* comma, but this is non-standard.
*/
+/**
+ * trace_printf(), accepts "const char *format, ...".
+ *
+ * Prints a formatted message, similar to printf.
+ */
+#define trace_printf(...) trace_printf_key(&trace_default_key, __VA_ARGS__)
+
+/**
+ * trace_printf_key(), accepts "struct trace_key *key, const char *format, ...".
+ */
#define trace_printf_key(key, ...) \
do { \
if (trace_pass_fl(key)) \
@@ -227,8 +175,11 @@ void trace_performance_leave(const char *format, ...);
__VA_ARGS__); \
} while (0)
-#define trace_printf(...) trace_printf_key(&trace_default_key, __VA_ARGS__)
-
+/**
+ * trace_argv_printf(), accepts "struct trace_key *key, const char *format, ...)".
+ *
+ * Prints a formatted message, followed by a quoted list of arguments.
+ */
#define trace_argv_printf(argv, ...) \
do { \
if (trace_pass_fl(&trace_default_key)) \
@@ -236,12 +187,36 @@ void trace_performance_leave(const char *format, ...);
argv, __VA_ARGS__); \
} while (0)
+/**
+ * trace_strbuf(), accepts "struct trace_key *key, const struct strbuf *data".
+ *
+ * Prints the strbuf, without additional formatting (i.e. doesn't
+ * choke on `%` or even `\0`).
+ */
#define trace_strbuf(key, data) \
do { \
if (trace_pass_fl(key)) \
trace_strbuf_fl(TRACE_CONTEXT, __LINE__, key, data);\
} while (0)
+/**
+ * trace_performance(), accepts "uint64_t nanos, const char *format, ...".
+ *
+ * Prints elapsed time (in nanoseconds) if GIT_TRACE_PERFORMANCE is enabled.
+ *
+ * Example:
+ * ------------
+ * uint64_t t = 0;
+ * for (;;) {
+ * // ignore
+ * t -= getnanotime();
+ * // code section to measure
+ * t += getnanotime();
+ * // ignore
+ * }
+ * trace_performance(t, "frotz");
+ * ------------
+ */
#define trace_performance(nanos, ...) \
do { \
if (trace_pass_fl(&trace_perf_key)) \
@@ -249,6 +224,18 @@ void trace_performance_leave(const char *format, ...);
__VA_ARGS__); \
} while (0)
+/**
+ * trace_performance_since(), accepts "uint64_t start, const char *format, ...".
+ *
+ * Prints elapsed time since 'start' if GIT_TRACE_PERFORMANCE is enabled.
+ *
+ * Example:
+ * ------------
+ * uint64_t start = getnanotime();
+ * // code section to measure
+ * trace_performance_since(start, "foobar");
+ * ------------
+ */
#define trace_performance_since(start, ...) \
do { \
if (trace_pass_fl(&trace_perf_key)) \
@@ -257,6 +244,9 @@ void trace_performance_leave(const char *format, ...);
__VA_ARGS__); \
} while (0)
+/**
+ * trace_performance_leave(), accepts "const char *format, ...".
+ */
#define trace_performance_leave(...) \
do { \
if (trace_pass_fl(&trace_perf_key)) \
@@ -285,6 +275,4 @@ static inline int trace_pass_fl(struct trace_key *key)
return key->fd || !key->initialized;
}
-#endif /* HAVE_VARIADIC_MACROS */
-
#endif /* TRACE_H */
diff --git a/trace2.c b/trace2.c
index b2d4715..e01cf77 100644
--- a/trace2.c
+++ b/trace2.c
@@ -214,6 +214,7 @@ int trace2_cmd_exit_fl(const char *file, int line, int code)
if (!trace2_enabled)
return code;
+ trace_git_fsync_stats();
trace2_collect_process_info(TRACE2_PROCESS_INFO_EXIT);
tr2main_exit_code = code;
@@ -641,20 +642,6 @@ void trace2_region_enter_printf_fl(const char *file, int line,
va_end(ap);
}
-#ifndef HAVE_VARIADIC_MACROS
-void trace2_region_enter_printf(const char *category, const char *label,
- const struct repository *repo, const char *fmt,
- ...)
-{
- va_list ap;
-
- va_start(ap, fmt);
- trace2_region_enter_printf_va_fl(NULL, 0, category, label, repo, fmt,
- ap);
- va_end(ap);
-}
-#endif
-
void trace2_region_leave_printf_va_fl(const char *file, int line,
const char *category, const char *label,
const struct repository *repo,
@@ -717,20 +704,6 @@ void trace2_region_leave_printf_fl(const char *file, int line,
va_end(ap);
}
-#ifndef HAVE_VARIADIC_MACROS
-void trace2_region_leave_printf(const char *category, const char *label,
- const struct repository *repo, const char *fmt,
- ...)
-{
- va_list ap;
-
- va_start(ap, fmt);
- trace2_region_leave_printf_va_fl(NULL, 0, category, label, repo, fmt,
- ap);
- va_end(ap);
-}
-#endif
-
void trace2_data_string_fl(const char *file, int line, const char *category,
const struct repository *repo, const char *key,
const char *value)
@@ -826,17 +799,6 @@ void trace2_printf_fl(const char *file, int line, const char *fmt, ...)
va_end(ap);
}
-#ifndef HAVE_VARIADIC_MACROS
-void trace2_printf(const char *fmt, ...)
-{
- va_list ap;
-
- va_start(ap, fmt);
- trace2_printf_va_fl(NULL, 0, fmt, ap);
- va_end(ap);
-}
-#endif
-
const char *trace2_session_id(void)
{
return tr2_sid_get();
diff --git a/trace2.h b/trace2.h
index 0cc7b5f..1b109f5 100644
--- a/trace2.h
+++ b/trace2.h
@@ -397,18 +397,9 @@ void trace2_region_enter_printf_fl(const char *file, int line,
const struct repository *repo,
const char *fmt, ...);
-#ifdef HAVE_VARIADIC_MACROS
#define trace2_region_enter_printf(category, label, repo, ...) \
trace2_region_enter_printf_fl(__FILE__, __LINE__, (category), (label), \
(repo), __VA_ARGS__)
-#else
-/* clang-format off */
-__attribute__((format (region_enter_printf, 4, 5)))
-void trace2_region_enter_printf(const char *category, const char *label,
- const struct repository *repo, const char *fmt,
- ...);
-/* clang-format on */
-#endif
/**
* Emit a 'region_leave' event for <category>.<label> with optional
@@ -442,18 +433,9 @@ void trace2_region_leave_printf_fl(const char *file, int line,
const struct repository *repo,
const char *fmt, ...);
-#ifdef HAVE_VARIADIC_MACROS
#define trace2_region_leave_printf(category, label, repo, ...) \
trace2_region_leave_printf_fl(__FILE__, __LINE__, (category), (label), \
(repo), __VA_ARGS__)
-#else
-/* clang-format off */
-__attribute__((format (region_leave_printf, 4, 5)))
-void trace2_region_leave_printf(const char *category, const char *label,
- const struct repository *repo, const char *fmt,
- ...);
-/* clang-format on */
-#endif
/**
* Emit a key-value pair 'data' event of the form <category>.<key> = <value>.
@@ -506,14 +488,7 @@ void trace2_printf_va_fl(const char *file, int line, const char *fmt,
void trace2_printf_fl(const char *file, int line, const char *fmt, ...);
-#ifdef HAVE_VARIADIC_MACROS
#define trace2_printf(...) trace2_printf_fl(__FILE__, __LINE__, __VA_ARGS__)
-#else
-/* clang-format off */
-__attribute__((format (printf, 1, 2)))
-void trace2_printf(const char *fmt, ...);
-/* clang-format on */
-#endif
/*
* Optional platform-specific code to dump information about the
diff --git a/trace2/tr2_tgt_event.c b/trace2/tr2_tgt_event.c
index bd17ecd..c5c8cfb 100644
--- a/trace2/tr2_tgt_event.c
+++ b/trace2/tr2_tgt_event.c
@@ -10,7 +10,9 @@
#include "trace2/tr2_tgt.h"
#include "trace2/tr2_tls.h"
-static struct tr2_dst tr2dst_event = { TR2_SYSENV_EVENT, 0, 0, 0, 0 };
+static struct tr2_dst tr2dst_event = {
+ .sysenv_var = TR2_SYSENV_EVENT,
+};
/*
* The version number of the JSON data generated by the EVENT target in this
@@ -613,34 +615,34 @@ static void fn_data_json_fl(const char *file, int line,
}
struct tr2_tgt tr2_tgt_event = {
- &tr2dst_event,
-
- fn_init,
- fn_term,
-
- fn_version_fl,
- fn_start_fl,
- fn_exit_fl,
- fn_signal,
- fn_atexit,
- fn_error_va_fl,
- fn_command_path_fl,
- fn_command_ancestry_fl,
- fn_command_name_fl,
- fn_command_mode_fl,
- fn_alias_fl,
- fn_child_start_fl,
- fn_child_exit_fl,
- fn_child_ready_fl,
- fn_thread_start_fl,
- fn_thread_exit_fl,
- fn_exec_fl,
- fn_exec_result_fl,
- fn_param_fl,
- fn_repo_fl,
- fn_region_enter_printf_va_fl,
- fn_region_leave_printf_va_fl,
- fn_data_fl,
- fn_data_json_fl,
- NULL, /* printf */
+ .pdst = &tr2dst_event,
+
+ .pfn_init = fn_init,
+ .pfn_term = fn_term,
+
+ .pfn_version_fl = fn_version_fl,
+ .pfn_start_fl = fn_start_fl,
+ .pfn_exit_fl = fn_exit_fl,
+ .pfn_signal = fn_signal,
+ .pfn_atexit = fn_atexit,
+ .pfn_error_va_fl = fn_error_va_fl,
+ .pfn_command_path_fl = fn_command_path_fl,
+ .pfn_command_ancestry_fl = fn_command_ancestry_fl,
+ .pfn_command_name_fl = fn_command_name_fl,
+ .pfn_command_mode_fl = fn_command_mode_fl,
+ .pfn_alias_fl = fn_alias_fl,
+ .pfn_child_start_fl = fn_child_start_fl,
+ .pfn_child_exit_fl = fn_child_exit_fl,
+ .pfn_child_ready_fl = fn_child_ready_fl,
+ .pfn_thread_start_fl = fn_thread_start_fl,
+ .pfn_thread_exit_fl = fn_thread_exit_fl,
+ .pfn_exec_fl = fn_exec_fl,
+ .pfn_exec_result_fl = fn_exec_result_fl,
+ .pfn_param_fl = fn_param_fl,
+ .pfn_repo_fl = fn_repo_fl,
+ .pfn_region_enter_printf_va_fl = fn_region_enter_printf_va_fl,
+ .pfn_region_leave_printf_va_fl = fn_region_leave_printf_va_fl,
+ .pfn_data_fl = fn_data_fl,
+ .pfn_data_json_fl = fn_data_json_fl,
+ .pfn_printf_va_fl = NULL,
};
diff --git a/trace2/tr2_tgt_normal.c b/trace2/tr2_tgt_normal.c
index 6e429a3..c42fbad 100644
--- a/trace2/tr2_tgt_normal.c
+++ b/trace2/tr2_tgt_normal.c
@@ -9,7 +9,9 @@
#include "trace2/tr2_tgt.h"
#include "trace2/tr2_tls.h"
-static struct tr2_dst tr2dst_normal = { TR2_SYSENV_NORMAL, 0, 0, 0, 0 };
+static struct tr2_dst tr2dst_normal = {
+ .sysenv_var = TR2_SYSENV_NORMAL,
+};
/*
* Use the TR2_SYSENV_NORMAL_BRIEF setting to omit the "<time> <file>:<line>"
@@ -325,34 +327,34 @@ static void fn_printf_va_fl(const char *file, int line,
}
struct tr2_tgt tr2_tgt_normal = {
- &tr2dst_normal,
-
- fn_init,
- fn_term,
-
- fn_version_fl,
- fn_start_fl,
- fn_exit_fl,
- fn_signal,
- fn_atexit,
- fn_error_va_fl,
- fn_command_path_fl,
- fn_command_ancestry_fl,
- fn_command_name_fl,
- fn_command_mode_fl,
- fn_alias_fl,
- fn_child_start_fl,
- fn_child_exit_fl,
- fn_child_ready_fl,
- NULL, /* thread_start */
- NULL, /* thread_exit */
- fn_exec_fl,
- fn_exec_result_fl,
- fn_param_fl,
- fn_repo_fl,
- NULL, /* region_enter */
- NULL, /* region_leave */
- NULL, /* data */
- NULL, /* data_json */
- fn_printf_va_fl,
+ .pdst = &tr2dst_normal,
+
+ .pfn_init = fn_init,
+ .pfn_term = fn_term,
+
+ .pfn_version_fl = fn_version_fl,
+ .pfn_start_fl = fn_start_fl,
+ .pfn_exit_fl = fn_exit_fl,
+ .pfn_signal = fn_signal,
+ .pfn_atexit = fn_atexit,
+ .pfn_error_va_fl = fn_error_va_fl,
+ .pfn_command_path_fl = fn_command_path_fl,
+ .pfn_command_ancestry_fl = fn_command_ancestry_fl,
+ .pfn_command_name_fl = fn_command_name_fl,
+ .pfn_command_mode_fl = fn_command_mode_fl,
+ .pfn_alias_fl = fn_alias_fl,
+ .pfn_child_start_fl = fn_child_start_fl,
+ .pfn_child_exit_fl = fn_child_exit_fl,
+ .pfn_child_ready_fl = fn_child_ready_fl,
+ .pfn_thread_start_fl = NULL,
+ .pfn_thread_exit_fl = NULL,
+ .pfn_exec_fl = fn_exec_fl,
+ .pfn_exec_result_fl = fn_exec_result_fl,
+ .pfn_param_fl = fn_param_fl,
+ .pfn_repo_fl = fn_repo_fl,
+ .pfn_region_enter_printf_va_fl = NULL,
+ .pfn_region_leave_printf_va_fl = NULL,
+ .pfn_data_fl = NULL,
+ .pfn_data_json_fl = NULL,
+ .pfn_printf_va_fl = fn_printf_va_fl,
};
diff --git a/trace2/tr2_tgt_perf.c b/trace2/tr2_tgt_perf.c
index 2ff9cf7..a1eff8b 100644
--- a/trace2/tr2_tgt_perf.c
+++ b/trace2/tr2_tgt_perf.c
@@ -11,7 +11,9 @@
#include "trace2/tr2_tgt.h"
#include "trace2/tr2_tls.h"
-static struct tr2_dst tr2dst_perf = { TR2_SYSENV_PERF, 0, 0, 0, 0 };
+static struct tr2_dst tr2dst_perf = {
+ .sysenv_var = TR2_SYSENV_PERF,
+};
/*
* Use TR2_SYSENV_PERF_BRIEF to omit the "<time> <file>:<line>"
@@ -549,34 +551,34 @@ static void fn_printf_va_fl(const char *file, int line,
}
struct tr2_tgt tr2_tgt_perf = {
- &tr2dst_perf,
-
- fn_init,
- fn_term,
-
- fn_version_fl,
- fn_start_fl,
- fn_exit_fl,
- fn_signal,
- fn_atexit,
- fn_error_va_fl,
- fn_command_path_fl,
- fn_command_ancestry_fl,
- fn_command_name_fl,
- fn_command_mode_fl,
- fn_alias_fl,
- fn_child_start_fl,
- fn_child_exit_fl,
- fn_child_ready_fl,
- fn_thread_start_fl,
- fn_thread_exit_fl,
- fn_exec_fl,
- fn_exec_result_fl,
- fn_param_fl,
- fn_repo_fl,
- fn_region_enter_printf_va_fl,
- fn_region_leave_printf_va_fl,
- fn_data_fl,
- fn_data_json_fl,
- fn_printf_va_fl,
+ .pdst = &tr2dst_perf,
+
+ .pfn_init = fn_init,
+ .pfn_term = fn_term,
+
+ .pfn_version_fl = fn_version_fl,
+ .pfn_start_fl = fn_start_fl,
+ .pfn_exit_fl = fn_exit_fl,
+ .pfn_signal = fn_signal,
+ .pfn_atexit = fn_atexit,
+ .pfn_error_va_fl = fn_error_va_fl,
+ .pfn_command_path_fl = fn_command_path_fl,
+ .pfn_command_ancestry_fl = fn_command_ancestry_fl,
+ .pfn_command_name_fl = fn_command_name_fl,
+ .pfn_command_mode_fl = fn_command_mode_fl,
+ .pfn_alias_fl = fn_alias_fl,
+ .pfn_child_start_fl = fn_child_start_fl,
+ .pfn_child_exit_fl = fn_child_exit_fl,
+ .pfn_child_ready_fl = fn_child_ready_fl,
+ .pfn_thread_start_fl = fn_thread_start_fl,
+ .pfn_thread_exit_fl = fn_thread_exit_fl,
+ .pfn_exec_fl = fn_exec_fl,
+ .pfn_exec_result_fl = fn_exec_result_fl,
+ .pfn_param_fl = fn_param_fl,
+ .pfn_repo_fl = fn_repo_fl,
+ .pfn_region_enter_printf_va_fl = fn_region_enter_printf_va_fl,
+ .pfn_region_leave_printf_va_fl = fn_region_leave_printf_va_fl,
+ .pfn_data_fl = fn_data_fl,
+ .pfn_data_json_fl = fn_data_json_fl,
+ .pfn_printf_va_fl = fn_printf_va_fl,
};
diff --git a/transport-helper.c b/transport-helper.c
index a0297b0..b4dbbab 100644
--- a/transport-helper.c
+++ b/transport-helper.c
@@ -715,6 +715,9 @@ static int fetch_refs(struct transport *transport,
if (data->transport_options.update_shallow)
set_helper_option(transport, "update-shallow", "true");
+ if (data->transport_options.refetch)
+ set_helper_option(transport, "refetch", "true");
+
if (data->transport_options.filter_options.choice) {
const char *spec = expand_list_objects_filter_spec(
&data->transport_options.filter_options);
diff --git a/transport.c b/transport.c
index 2a3e324..3d64a43 100644
--- a/transport.c
+++ b/transport.c
@@ -125,16 +125,9 @@ struct bundle_transport_data {
unsigned get_refs_from_bundle_called : 1;
};
-static struct ref *get_refs_from_bundle(struct transport *transport,
- int for_push,
- struct transport_ls_refs_options *transport_options)
+static void get_refs_from_bundle_inner(struct transport *transport)
{
struct bundle_transport_data *data = transport->data;
- struct ref *result = NULL;
- int i;
-
- if (for_push)
- return NULL;
data->get_refs_from_bundle_called = 1;
@@ -145,6 +138,20 @@ static struct ref *get_refs_from_bundle(struct transport *transport,
die(_("could not read bundle '%s'"), transport->url);
transport->hash_algo = data->header.hash_algo;
+}
+
+static struct ref *get_refs_from_bundle(struct transport *transport,
+ int for_push,
+ struct transport_ls_refs_options *transport_options)
+{
+ struct bundle_transport_data *data = transport->data;
+ struct ref *result = NULL;
+ int i;
+
+ if (for_push)
+ return NULL;
+
+ get_refs_from_bundle_inner(transport);
for (i = 0; i < data->header.references.nr; i++) {
struct string_list_item *e = data->header.references.items + i;
@@ -169,7 +176,7 @@ static int fetch_refs_from_bundle(struct transport *transport,
strvec_push(&extra_index_pack_args, "-v");
if (!data->get_refs_from_bundle_called)
- get_refs_from_bundle(transport, 0, NULL);
+ get_refs_from_bundle_inner(transport);
ret = unbundle(the_repository, &data->header, data->fd,
&extra_index_pack_args);
transport->hash_algo = data->header.hash_algo;
@@ -243,6 +250,9 @@ static int set_git_option(struct git_transport_options *opts,
list_objects_filter_die_if_populated(&opts->filter_options);
parse_list_objects_filter(&opts->filter_options, value);
return 0;
+ } else if (!strcmp(name, TRANS_OPT_REFETCH)) {
+ opts->refetch = !!value;
+ return 0;
} else if (!strcmp(name, TRANS_OPT_REJECT_SHALLOW)) {
opts->reject_shallow = !!value;
return 0;
@@ -377,6 +387,7 @@ static int fetch_refs_via_pack(struct transport *transport,
args.update_shallow = data->options.update_shallow;
args.from_promisor = data->options.from_promisor;
args.filter_options = data->options.filter_options;
+ args.refetch = data->options.refetch;
args.stateless_rpc = transport->stateless_rpc;
args.server_options = transport->server_options;
args.negotiation_tips = data->options.negotiation_tips;
@@ -1292,7 +1303,7 @@ int transport_push(struct repository *r,
&transport_options);
trace2_region_leave("transport_push", "get_refs_list", r);
- strvec_clear(&transport_options.ref_prefixes);
+ transport_ls_refs_options_release(&transport_options);
if (flags & TRANSPORT_PUSH_ALL)
match_flags |= MATCH_REFS_ALL;
@@ -1420,6 +1431,12 @@ const struct ref *transport_get_remote_refs(struct transport *transport,
return transport->remote_refs;
}
+void transport_ls_refs_options_release(struct transport_ls_refs_options *opts)
+{
+ strvec_clear(&opts->ref_prefixes);
+ free((char *)opts->unborn_head_target);
+}
+
int transport_fetch_refs(struct transport *transport, struct ref *refs)
{
int rc;
diff --git a/transport.h b/transport.h
index 3f16e50..12bc08f 100644
--- a/transport.h
+++ b/transport.h
@@ -16,6 +16,7 @@ struct git_transport_options {
unsigned update_shallow : 1;
unsigned reject_shallow : 1;
unsigned deepen_relative : 1;
+ unsigned refetch : 1;
/* see documentation of corresponding flag in fetch-pack.h */
unsigned from_promisor : 1;
@@ -216,6 +217,9 @@ void transport_check_allowed(const char *type);
/* Filter objects for partial clone and fetch */
#define TRANS_OPT_LIST_OBJECTS_FILTER "filter"
+/* Refetch all objects without negotiating */
+#define TRANS_OPT_REFETCH "refetch"
+
/* Request atomic (all-or-nothing) updates when pushing */
#define TRANS_OPT_ATOMIC "atomic"
@@ -257,15 +261,19 @@ struct transport_ls_refs_options {
/*
* If unborn_head_target is not NULL, and the remote reports HEAD as
* pointing to an unborn branch, transport_get_remote_refs() stores the
- * unborn branch in unborn_head_target. It should be freed by the
- * caller.
+ * unborn branch in unborn_head_target.
*/
- char *unborn_head_target;
+ const char *unborn_head_target;
};
#define TRANSPORT_LS_REFS_OPTIONS_INIT { \
.ref_prefixes = STRVEC_INIT, \
}
+/**
+ * Release the "struct transport_ls_refs_options".
+ */
+void transport_ls_refs_options_release(struct transport_ls_refs_options *opts);
+
/*
* Retrieve refs from a remote.
*/
diff --git a/tree-walk.c b/tree-walk.c
index 3a94959..506234b 100644
--- a/tree-walk.c
+++ b/tree-walk.c
@@ -89,7 +89,7 @@ void *fill_tree_descriptor(struct repository *r,
void *buf = NULL;
if (oid) {
- buf = read_object_with_reference(r, oid, tree_type, &size, NULL);
+ buf = read_object_with_reference(r, oid, OBJ_TREE, &size, NULL);
if (!buf)
die("unable to read tree %s", oid_to_hex(oid));
}
@@ -605,7 +605,7 @@ int get_tree_entry(struct repository *r,
unsigned long size;
struct object_id root;
- tree = read_object_with_reference(r, tree_oid, tree_type, &size, &root);
+ tree = read_object_with_reference(r, tree_oid, OBJ_TREE, &size, &root);
if (!tree)
return -1;
@@ -677,7 +677,7 @@ enum get_oid_result get_tree_entry_follow_symlinks(struct repository *r,
unsigned long size;
tree = read_object_with_reference(r,
&current_tree_oid,
- tree_type, &size,
+ OBJ_TREE, &size,
&root);
if (!tree)
goto done;
diff --git a/unpack-trees.c b/unpack-trees.c
index 360844b..7f528d3 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -595,13 +595,6 @@ static void mark_ce_used(struct cache_entry *ce, struct unpack_trees_options *o)
{
ce->ce_flags |= CE_UNPACKED;
- /*
- * If this is a sparse directory, don't advance cache_bottom.
- * That will be advanced later using the cache-tree data.
- */
- if (S_ISSPARSEDIR(ce->ce_mode))
- return;
-
if (o->cache_bottom < o->src_index->cache_nr &&
o->src_index->cache[o->cache_bottom] == ce) {
int bottom = o->cache_bottom;
@@ -651,24 +644,17 @@ static void mark_ce_used_same_name(struct cache_entry *ce,
}
}
-static struct cache_entry *next_cache_entry(struct unpack_trees_options *o, int *hint)
+static struct cache_entry *next_cache_entry(struct unpack_trees_options *o)
{
const struct index_state *index = o->src_index;
int pos = o->cache_bottom;
- if (*hint > pos)
- pos = *hint;
-
while (pos < index->cache_nr) {
struct cache_entry *ce = index->cache[pos];
- if (!(ce->ce_flags & CE_UNPACKED)) {
- *hint = pos + 1;
+ if (!(ce->ce_flags & CE_UNPACKED))
return ce;
- }
pos++;
}
-
- *hint = pos;
return NULL;
}
@@ -1360,6 +1346,42 @@ static int is_sparse_directory_entry(struct cache_entry *ce,
return sparse_dir_matches_path(ce, info, name);
}
+static int unpack_sparse_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info)
+{
+ struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, };
+ struct unpack_trees_options *o = info->data;
+ int ret;
+
+ assert(o->merge);
+
+ /*
+ * Unlike in 'unpack_callback', where src[0] is derived from the index when
+ * merging, src[0] is a transient cache entry derived from the first tree
+ * provided. Create the temporary entry as if it came from a non-sparse index.
+ */
+ if (!is_null_oid(&names[0].oid)) {
+ src[0] = create_ce_entry(info, &names[0], 0,
+ &o->result, 1,
+ dirmask & (1ul << 0));
+ src[0]->ce_flags |= (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);
+ }
+
+ /*
+ * 'unpack_single_entry' assumes that src[0] is derived directly from
+ * the index, rather than from an entry in 'names'. This is *not* true when
+ * merging a sparse directory, in which case names[0] is the "index" source
+ * entry. To match the expectations of 'unpack_single_entry', shift past the
+ * "index" tree (i.e., names[0]) and adjust 'names', 'n', 'mask', and
+ * 'dirmask' accordingly.
+ */
+ ret = unpack_single_entry(n - 1, mask >> 1, dirmask >> 1, src, names + 1, info);
+
+ if (src[0])
+ discard_cache_entry(src[0]);
+
+ return ret >= 0 ? mask : -1;
+}
+
/*
* Note that traverse_by_cache_tree() duplicates some logic in this function
* without actually calling it. If you change the logic here you may need to
@@ -1380,13 +1402,12 @@ static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, str
/* Are we supposed to look at the index too? */
if (o->merge) {
- int hint = -1;
while (1) {
int cmp;
struct cache_entry *ce;
if (o->diff_index_cached)
- ce = next_cache_entry(o, &hint);
+ ce = next_cache_entry(o);
else
ce = find_cache_entry(info, p);
@@ -1442,7 +1463,14 @@ static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, str
* it does not do any look-ahead, so this is safe.
*/
if (matches) {
- o->cache_bottom += matches;
+ /*
+ * Only increment the cache_bottom if the
+ * directory isn't a sparse directory index
+ * entry (if it is, it was already incremented)
+ * in 'mark_ce_used()'
+ */
+ if (!src[0] || !S_ISSPARSEDIR(src[0]->ce_mode))
+ o->cache_bottom += matches;
return mask;
}
}
@@ -1693,6 +1721,41 @@ static void populate_from_existing_patterns(struct unpack_trees_options *o,
o->pl = pl;
}
+static void update_sparsity_for_prefix(const char *prefix,
+ struct index_state *istate)
+{
+ int prefix_len = strlen(prefix);
+ struct strbuf ce_prefix = STRBUF_INIT;
+
+ if (!istate->sparse_index)
+ return;
+
+ while (prefix_len > 0 && prefix[prefix_len - 1] == '/')
+ prefix_len--;
+
+ if (prefix_len <= 0)
+ BUG("Invalid prefix passed to update_sparsity_for_prefix");
+
+ strbuf_grow(&ce_prefix, prefix_len + 1);
+ strbuf_add(&ce_prefix, prefix, prefix_len);
+ strbuf_addch(&ce_prefix, '/');
+
+ /*
+ * If the prefix points to a sparse directory or a path inside a sparse
+ * directory, the index should be expanded. This is accomplished in one
+ * of two ways:
+ * - if the prefix is inside a sparse directory, it will be expanded by
+ * the 'ensure_full_index(...)' call in 'index_name_pos(...)'.
+ * - if the prefix matches an existing sparse directory entry,
+ * 'index_name_pos(...)' will return its index position, triggering
+ * the 'ensure_full_index(...)' below.
+ */
+ if (!path_in_cone_mode_sparse_checkout(ce_prefix.buf, istate) &&
+ index_name_pos(istate, ce_prefix.buf, ce_prefix.len) >= 0)
+ ensure_full_index(istate);
+
+ strbuf_release(&ce_prefix);
+}
static int verify_absent(const struct cache_entry *,
enum unpack_trees_error_types,
@@ -1706,7 +1769,7 @@ static int verify_absent(const struct cache_entry *,
int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options *o)
{
struct repository *repo = the_repository;
- int i, hint, ret;
+ int i, ret;
static struct cache_entry *dfc;
struct pattern_list pl;
int free_pattern_list = 0;
@@ -1739,6 +1802,9 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
setup_standard_excludes(o->dir);
}
+ if (o->prefix)
+ update_sparsity_for_prefix(o->prefix, o->src_index);
+
if (!core_apply_sparse_checkout || !o->update)
o->skip_sparse_checkout = 1;
if (!o->skip_sparse_checkout && !o->pl) {
@@ -1795,15 +1861,13 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
info.pathspec = o->pathspec;
if (o->prefix) {
- hint = -1;
-
/*
* Unpack existing index entries that sort before the
* prefix the tree is spliced into. Note that o->merge
* is always true in this case.
*/
while (1) {
- struct cache_entry *ce = next_cache_entry(o, &hint);
+ struct cache_entry *ce = next_cache_entry(o);
if (!ce)
break;
if (ce_in_traverse_path(ce, &info))
@@ -1824,9 +1888,8 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
/* Any left-over entries in the index? */
if (o->merge) {
- hint = -1;
while (1) {
- struct cache_entry *ce = next_cache_entry(o, &hint);
+ struct cache_entry *ce = next_cache_entry(o);
if (!ce)
break;
if (unpack_index_entry(ce, o) < 0)
@@ -2065,7 +2128,9 @@ static int verify_uptodate_1(const struct cache_entry *ce,
int verify_uptodate(const struct cache_entry *ce,
struct unpack_trees_options *o)
{
- if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))
+ if (!o->skip_sparse_checkout &&
+ (ce->ce_flags & CE_SKIP_WORKTREE) &&
+ (ce->ce_flags & CE_NEW_SKIP_WORKTREE))
return 0;
return verify_uptodate_1(ce, o, ERROR_NOT_UPTODATE_FILE);
}
@@ -2434,6 +2499,37 @@ static int merged_entry(const struct cache_entry *ce,
return 1;
}
+static int merged_sparse_dir(const struct cache_entry * const *src, int n,
+ struct unpack_trees_options *o)
+{
+ struct tree_desc t[MAX_UNPACK_TREES + 1];
+ void * tree_bufs[MAX_UNPACK_TREES + 1];
+ struct traverse_info info;
+ int i, ret;
+
+ /*
+ * Create the tree traversal information for traversing into *only* the
+ * sparse directory.
+ */
+ setup_traverse_info(&info, src[0]->name);
+ info.fn = unpack_sparse_callback;
+ info.data = o;
+ info.show_all_errors = o->show_all_errors;
+ info.pathspec = o->pathspec;
+
+ /* Get the tree descriptors of the sparse directory in each of the merging trees */
+ for (i = 0; i < n; i++)
+ tree_bufs[i] = fill_tree_descriptor(o->src_index->repo, &t[i],
+ src[i] && !is_null_oid(&src[i]->oid) ? &src[i]->oid : NULL);
+
+ ret = traverse_trees(o->src_index, n, t, &info);
+
+ for (i = 0; i < n; i++)
+ free(tree_bufs[i]);
+
+ return ret;
+}
+
static int deleted_entry(const struct cache_entry *ce,
const struct cache_entry *old,
struct unpack_trees_options *o)
@@ -2538,16 +2634,24 @@ int threeway_merge(const struct cache_entry * const *stages,
*/
/* #14, #14ALT, #2ALT */
if (remote && !df_conflict_head && head_match && !remote_match) {
- if (index && !same(index, remote) && !same(index, head))
- return reject_merge(index, o);
+ if (index && !same(index, remote) && !same(index, head)) {
+ if (S_ISSPARSEDIR(index->ce_mode))
+ return merged_sparse_dir(stages, 4, o);
+ else
+ return reject_merge(index, o);
+ }
return merged_entry(remote, index, o);
}
/*
* If we have an entry in the index cache, then we want to
* make sure that it matches head.
*/
- if (index && !same(index, head))
- return reject_merge(index, o);
+ if (index && !same(index, head)) {
+ if (S_ISSPARSEDIR(index->ce_mode))
+ return merged_sparse_dir(stages, 4, o);
+ else
+ return reject_merge(index, o);
+ }
if (head) {
/* #5ALT, #15 */
@@ -2609,11 +2713,21 @@ int threeway_merge(const struct cache_entry * const *stages,
}
- /* Below are "no merge" cases, which require that the index be
- * up-to-date to avoid the files getting overwritten with
- * conflict resolution files.
- */
+ /* Handle "no merge" cases (see t/t1000-read-tree-m-3way.sh) */
if (index) {
+ /*
+ * If we've reached the "no merge" cases and we're merging
+ * a sparse directory, we may have an "edit/edit" conflict that
+ * can be resolved by individually merging directory contents.
+ */
+ if (S_ISSPARSEDIR(index->ce_mode))
+ return merged_sparse_dir(stages, 4, o);
+
+ /*
+ * If we're not merging a sparse directory, ensure the index is
+ * up-to-date to avoid files getting overwritten with conflict
+ * resolution files
+ */
if (verify_uptodate(index, o))
return -1;
}
@@ -2704,6 +2818,14 @@ int twoway_merge(const struct cache_entry * const *src,
* reject the merge instead.
*/
return merged_entry(newtree, current, o);
+ } else if (S_ISSPARSEDIR(current->ce_mode)) {
+ /*
+ * The sparse directories differ, but we don't know whether that's
+ * because of two different files in the directory being modified
+ * (can be trivially merged) or if there is a real file conflict.
+ * Merge the sparse directory by OID to compare file-by-file.
+ */
+ return merged_sparse_dir(src, 3, o);
} else
return reject_merge(current, o);
}
diff --git a/upload-pack.c b/upload-pack.c
index 8acc987..3a851b3 100644
--- a/upload-pack.c
+++ b/upload-pack.c
@@ -1400,13 +1400,19 @@ static int parse_want(struct packet_writer *writer, const char *line,
const char *arg;
if (skip_prefix(line, "want ", &arg)) {
struct object_id oid;
+ struct commit *commit;
struct object *o;
if (get_oid_hex(arg, &oid))
die("git upload-pack: protocol error, "
"expected to get oid, not '%s'", line);
- o = parse_object(the_repository, &oid);
+ commit = lookup_commit_in_graph(the_repository, &oid);
+ if (commit)
+ o = &commit->object;
+ else
+ o = parse_object(the_repository, &oid);
+
if (!o) {
packet_writer_error(writer,
"upload-pack: not our ref %s",
@@ -1434,7 +1440,7 @@ static int parse_want_ref(struct packet_writer *writer, const char *line,
if (skip_prefix(line, "want-ref ", &refname_nons)) {
struct object_id oid;
struct string_list_item *item;
- struct object *o;
+ struct object *o = NULL;
struct strbuf refname = STRBUF_INIT;
strbuf_addf(&refname, "%s%s", get_git_namespace(), refname_nons);
@@ -1448,7 +1454,15 @@ static int parse_want_ref(struct packet_writer *writer, const char *line,
item = string_list_append(wanted_refs, refname_nons);
item->util = oiddup(&oid);
- o = parse_object_or_die(&oid, refname_nons);
+ if (!starts_with(refname_nons, "refs/tags/")) {
+ struct commit *commit = lookup_commit_in_graph(the_repository, &oid);
+ if (commit)
+ o = &commit->object;
+ }
+
+ if (!o)
+ o = parse_object_or_die(&oid, refname_nons);
+
if (!(o->flags & WANTED)) {
o->flags |= WANTED;
add_object_array(o, NULL, want_obj);
diff --git a/urlmatch.c b/urlmatch.c
index 03ad3f3..b615adc 100644
--- a/urlmatch.c
+++ b/urlmatch.c
@@ -611,3 +611,8 @@ int urlmatch_config_entry(const char *var, const char *value, void *cb)
strbuf_release(&synthkey);
return retval;
}
+
+void urlmatch_config_release(struct urlmatch_config *config)
+{
+ string_list_clear(&config->vars, 1);
+}
diff --git a/urlmatch.h b/urlmatch.h
index 34a3ba6..9f40b00 100644
--- a/urlmatch.h
+++ b/urlmatch.h
@@ -71,5 +71,6 @@ struct urlmatch_config {
}
int urlmatch_config_entry(const char *var, const char *value, void *cb);
+void urlmatch_config_release(struct urlmatch_config *config);
#endif /* URL_MATCH_H */
diff --git a/usage.c b/usage.c
index 9943dd8..b738dd1 100644
--- a/usage.c
+++ b/usage.c
@@ -299,10 +299,7 @@ static NORETURN void BUG_vfl(const char *file, int line, const char *fmt, va_lis
va_copy(params_copy, params);
/* truncation via snprintf is OK here */
- if (file)
- snprintf(prefix, sizeof(prefix), "BUG: %s:%d: ", file, line);
- else
- snprintf(prefix, sizeof(prefix), "BUG: ");
+ snprintf(prefix, sizeof(prefix), "BUG: %s:%d: ", file, line);
vreportf(prefix, fmt, params);
@@ -317,7 +314,6 @@ static NORETURN void BUG_vfl(const char *file, int line, const char *fmt, va_lis
abort();
}
-#ifdef HAVE_VARIADIC_MACROS
NORETURN void BUG_fl(const char *file, int line, const char *fmt, ...)
{
va_list ap;
@@ -325,15 +321,6 @@ NORETURN void BUG_fl(const char *file, int line, const char *fmt, ...)
BUG_vfl(file, line, fmt, ap);
va_end(ap);
}
-#else
-NORETURN void BUG(const char *fmt, ...)
-{
- va_list ap;
- va_start(ap, fmt);
- BUG_vfl(NULL, 0, fmt, ap);
- va_end(ap);
-}
-#endif
#ifdef SUPPRESS_ANNOTATED_LEAKS
void unleak_memory(const void *ptr, size_t len)
diff --git a/userdiff.c b/userdiff.c
index 8578cb0..151d9a5 100644
--- a/userdiff.c
+++ b/userdiff.c
@@ -7,12 +7,24 @@ static struct userdiff_driver *drivers;
static int ndrivers;
static int drivers_alloc;
-#define PATTERNS(name, pattern, word_regex) \
- { name, NULL, -1, { pattern, REG_EXTENDED }, \
- word_regex "|[^[:space:]]|[\xc0-\xff][\x80-\xbf]+" }
-#define IPATTERN(name, pattern, word_regex) \
- { name, NULL, -1, { pattern, REG_EXTENDED | REG_ICASE }, \
- word_regex "|[^[:space:]]|[\xc0-\xff][\x80-\xbf]+" }
+#define PATTERNS(lang, rx, wrx) { \
+ .name = lang, \
+ .binary = -1, \
+ .funcname = { \
+ .pattern = rx, \
+ .cflags = REG_EXTENDED, \
+ }, \
+ .word_regex = wrx "|[^[:space:]]|[\xc0-\xff][\x80-\xbf]+", \
+}
+#define IPATTERN(lang, rx, wrx) { \
+ .name = lang, \
+ .binary = -1, \
+ .funcname = { \
+ .pattern = rx, \
+ .cflags = REG_EXTENDED | REG_ICASE, \
+ }, \
+ .word_regex = wrx "|[^[:space:]]|[\xc0-\xff][\x80-\xbf]+", \
+}
/*
* Built-in drivers for various languages, sorted by their names
@@ -168,6 +180,18 @@ PATTERNS("java",
"|[-+0-9.e]+[fFlL]?|0[xXbB]?[0-9a-fA-F]+[lL]?"
"|[-+*/<>%&^|=!]="
"|--|\\+\\+|<<=?|>>>?=?|&&|\\|\\|"),
+PATTERNS("kotlin",
+ "^[ \t]*(([a-z]+[ \t]+)*(fun|class|interface)[ \t]+.*)$",
+ /* -- */
+ "[a-zA-Z_][a-zA-Z0-9_]*"
+ /* hexadecimal and binary numbers */
+ "|0[xXbB][0-9a-fA-F_]+[lLuU]*"
+ /* integers and floats */
+ "|[0-9][0-9_]*([.][0-9_]*)?([Ee][-+]?[0-9]+)?[fFlLuU]*"
+ /* floating point numbers beginning with decimal point */
+ "|[.][0-9][0-9_]*([Ee][-+]?[0-9]+)?[fFlLuU]?"
+ /* unary and binary operators */
+ "|[-+*/<>%&^|=!]==?|--|\\+\\+|<<=|>>=|&&|\\|\\||->|\\.\\*|!!|[?:.][.:]"),
PATTERNS("markdown",
"^ {0,3}#{1,6}[ \t].*",
/* -- */
@@ -275,17 +299,13 @@ PATTERNS("tex", "^(\\\\((sub)*section|chapter|part)\\*{0,1}\\{.*)$",
#undef IPATTERN
static struct userdiff_driver driver_true = {
- "diff=true",
- NULL,
- 0,
- { NULL, 0 }
+ .name = "diff=true",
+ .binary = 0,
};
static struct userdiff_driver driver_false = {
- "!diff",
- NULL,
- 1,
- { NULL, 0 }
+ .name = "!diff",
+ .binary = 1,
};
struct find_by_namelen_data {
diff --git a/worktree.c b/worktree.c
index 6f598dc..90fc085 100644
--- a/worktree.c
+++ b/worktree.c
@@ -5,6 +5,7 @@
#include "worktree.h"
#include "dir.h"
#include "wt-status.h"
+#include "config.h"
void free_worktrees(struct worktree **worktrees)
{
@@ -28,13 +29,11 @@ static void add_head_info(struct worktree *wt)
{
int flags;
const char *target;
- int ignore_errno;
target = refs_resolve_ref_unsafe(get_worktree_ref_store(wt),
"HEAD",
0,
- &wt->head_oid, &flags,
- &ignore_errno);
+ &wt->head_oid, &flags);
if (!target)
return;
@@ -416,7 +415,6 @@ const struct worktree *find_shared_symref(struct worktree **worktrees,
const char *symref_target;
struct ref_store *refs;
int flags;
- int ignore_errno;
if (wt->is_bare)
continue;
@@ -434,8 +432,7 @@ const struct worktree *find_shared_symref(struct worktree **worktrees,
refs = get_worktree_ref_store(wt);
symref_target = refs_resolve_ref_unsafe(refs, symref, 0,
- NULL, &flags,
- &ignore_errno);
+ NULL, &flags);
if ((flags & REF_ISSYMREF) &&
symref_target && !strcmp(symref_target, target)) {
existing = wt;
@@ -563,7 +560,6 @@ int other_head_refs(each_ref_fn fn, void *cb_data)
struct worktree *wt = *p;
struct object_id oid;
int flag;
- int ignore_errno;
if (wt->is_current)
continue;
@@ -573,7 +569,7 @@ int other_head_refs(each_ref_fn fn, void *cb_data)
if (refs_resolve_ref_unsafe(get_main_ref_store(the_repository),
refname.buf,
RESOLVE_REF_READING,
- &oid, &flag, &ignore_errno))
+ &oid, &flag))
ret = fn(refname.buf, &oid, flag, cb_data);
if (ret)
break;
@@ -826,3 +822,75 @@ int should_prune_worktree(const char *id, struct strbuf *reason, char **wtpath,
*wtpath = path;
return 0;
}
+
+static int move_config_setting(const char *key, const char *value,
+ const char *from_file, const char *to_file)
+{
+ if (git_config_set_in_file_gently(to_file, key, value))
+ return error(_("unable to set %s in '%s'"), key, to_file);
+ if (git_config_set_in_file_gently(from_file, key, NULL))
+ return error(_("unable to unset %s in '%s'"), key, from_file);
+ return 0;
+}
+
+int init_worktree_config(struct repository *r)
+{
+ int res = 0;
+ int bare = 0;
+ struct config_set cs = { { 0 } };
+ const char *core_worktree;
+ char *common_config_file;
+ char *main_worktree_file;
+
+ /*
+ * If the extension is already enabled, then we can skip the
+ * upgrade process.
+ */
+ if (repository_format_worktree_config)
+ return 0;
+ if ((res = git_config_set_gently("extensions.worktreeConfig", "true")))
+ return error(_("failed to set extensions.worktreeConfig setting"));
+
+ common_config_file = xstrfmt("%s/config", r->commondir);
+ main_worktree_file = xstrfmt("%s/config.worktree", r->commondir);
+
+ git_configset_init(&cs);
+ git_configset_add_file(&cs, common_config_file);
+
+ /*
+ * If core.bare is true in the common config file, then we need to
+ * move it to the main worktree's config file or it will break all
+ * worktrees. If it is false, then leave it in place because it
+ * _could_ be negating a global core.bare=true.
+ */
+ if (!git_configset_get_bool(&cs, "core.bare", &bare) && bare) {
+ if ((res = move_config_setting("core.bare", "true",
+ common_config_file,
+ main_worktree_file)))
+ goto cleanup;
+ }
+ /*
+ * If core.worktree is set, then the main worktree is located
+ * somewhere different than the parent of the common Git dir.
+ * Relocate that value to avoid breaking all worktrees with this
+ * upgrade to worktree config.
+ */
+ if (!git_configset_get_value(&cs, "core.worktree", &core_worktree)) {
+ if ((res = move_config_setting("core.worktree", core_worktree,
+ common_config_file,
+ main_worktree_file)))
+ goto cleanup;
+ }
+
+ /*
+ * Ensure that we use worktree config for the remaining lifetime
+ * of the current process.
+ */
+ repository_format_worktree_config = 1;
+
+cleanup:
+ git_configset_clear(&cs);
+ free(common_config_file);
+ free(main_worktree_file);
+ return res;
+}
diff --git a/worktree.h b/worktree.h
index 9e06fcb..e9e8399 100644
--- a/worktree.h
+++ b/worktree.h
@@ -183,4 +183,25 @@ void strbuf_worktree_ref(const struct worktree *wt,
struct strbuf *sb,
const char *refname);
+/**
+ * Enable worktree config for the first time. This will make the following
+ * adjustments:
+ *
+ * 1. Add extensions.worktreeConfig=true in the common config file.
+ *
+ * 2. If the common config file has a core.worktree value, then that value
+ * is moved to the main worktree's config.worktree file.
+ *
+ * 3. If the common config file has a core.bare enabled, then that value
+ * is moved to the main worktree's config.worktree file.
+ *
+ * If extensions.worktreeConfig is already true, then this method
+ * terminates early without any of the above steps. The existing config
+ * arrangement is assumed to be intentional.
+ *
+ * Returns 0 on success. Reports an error message and returns non-zero
+ * if any of these steps fail.
+ */
+int init_worktree_config(struct repository *r);
+
#endif
diff --git a/wrapper.c b/wrapper.c
index 36e1211..f512994 100644
--- a/wrapper.c
+++ b/wrapper.c
@@ -4,6 +4,16 @@
#include "cache.h"
#include "config.h"
+static intmax_t count_fsync_writeout_only;
+static intmax_t count_fsync_hardware_flush;
+
+#ifdef HAVE_RTLGENRANDOM
+/* This is required to get access to RtlGenRandom. */
+#define SystemFunction036 NTAPI SystemFunction036
+#include <NTSecAPI.h>
+#undef SystemFunction036
+#endif
+
static int memory_limit_check(size_t size, int gentle)
{
static size_t limit = 0;
@@ -463,8 +473,6 @@ int git_mkstemps_mode(char *pattern, int suffix_len, int mode)
static const int num_letters = ARRAY_SIZE(letters) - 1;
static const char x_pattern[] = "XXXXXX";
static const int num_x = ARRAY_SIZE(x_pattern) - 1;
- uint64_t value;
- struct timeval tv;
char *filename_template;
size_t len;
int fd, count;
@@ -485,12 +493,13 @@ int git_mkstemps_mode(char *pattern, int suffix_len, int mode)
* Replace pattern's XXXXXX characters with randomness.
* Try TMP_MAX different filenames.
*/
- gettimeofday(&tv, NULL);
- value = ((uint64_t)tv.tv_usec << 16) ^ tv.tv_sec ^ getpid();
filename_template = &pattern[len - num_x - suffix_len];
for (count = 0; count < TMP_MAX; ++count) {
- uint64_t v = value;
int i;
+ uint64_t v;
+ if (csprng_bytes(&v, sizeof(v)) < 0)
+ return error_errno("unable to get random bytes for temporary file");
+
/* Fill in the random bits. */
for (i = 0; i < num_x; i++) {
filename_template[i] = letters[v % num_letters];
@@ -506,12 +515,6 @@ int git_mkstemps_mode(char *pattern, int suffix_len, int mode)
*/
if (errno != EEXIST)
break;
- /*
- * This is a random value. It is only necessary that
- * the next TMP_MAX values generated by adding 7777 to
- * VALUE are different with (module 2^32).
- */
- value += 7777;
}
/* We return the null string if we can't find a unique file name. */
pattern[0] = '\0';
@@ -546,6 +549,79 @@ int xmkstemp_mode(char *filename_template, int mode)
return fd;
}
+/*
+ * Some platforms return EINTR from fsync. Since fsync is invoked in some
+ * cases by a wrapper that dies on failure, do not expose EINTR to callers.
+ */
+static int fsync_loop(int fd)
+{
+ int err;
+
+ do {
+ err = fsync(fd);
+ } while (err < 0 && errno == EINTR);
+ return err;
+}
+
+int git_fsync(int fd, enum fsync_action action)
+{
+ switch (action) {
+ case FSYNC_WRITEOUT_ONLY:
+ count_fsync_writeout_only += 1;
+
+#ifdef __APPLE__
+ /*
+ * On macOS, fsync just causes filesystem cache writeback but
+ * does not flush hardware caches.
+ */
+ return fsync_loop(fd);
+#endif
+
+#ifdef HAVE_SYNC_FILE_RANGE
+ /*
+ * On linux 2.6.17 and above, sync_file_range is the way to
+ * issue a writeback without a hardware flush. An offset of
+ * 0 and size of 0 indicates writeout of the entire file and the
+ * wait flags ensure that all dirty data is written to the disk
+ * (potentially in a disk-side cache) before we continue.
+ */
+
+ return sync_file_range(fd, 0, 0, SYNC_FILE_RANGE_WAIT_BEFORE |
+ SYNC_FILE_RANGE_WRITE |
+ SYNC_FILE_RANGE_WAIT_AFTER);
+#endif
+
+#ifdef fsync_no_flush
+ return fsync_no_flush(fd);
+#endif
+
+ errno = ENOSYS;
+ return -1;
+
+ case FSYNC_HARDWARE_FLUSH:
+ count_fsync_hardware_flush += 1;
+
+ /*
+ * On macOS, a special fcntl is required to really flush the
+ * caches within the storage controller. As of this writing,
+ * this is a very expensive operation on Apple SSDs.
+ */
+#ifdef __APPLE__
+ return fcntl(fd, F_FULLFSYNC);
+#else
+ return fsync_loop(fd);
+#endif
+ default:
+ BUG("unexpected git_fsync(%d) call", action);
+ }
+}
+
+void trace_git_fsync_stats(void)
+{
+ trace2_data_intmax("fsync", the_repository, "fsync/writeout-only", count_fsync_writeout_only);
+ trace2_data_intmax("fsync", the_repository, "fsync/hardware-flush", count_fsync_hardware_flush);
+}
+
static int warn_if_unremovable(const char *op, const char *file, int rc)
{
int err;
@@ -702,3 +778,69 @@ int open_nofollow(const char *path, int flags)
return open(path, flags);
#endif
}
+
+int csprng_bytes(void *buf, size_t len)
+{
+#if defined(HAVE_ARC4RANDOM) || defined(HAVE_ARC4RANDOM_LIBBSD)
+ /* This function never returns an error. */
+ arc4random_buf(buf, len);
+ return 0;
+#elif defined(HAVE_GETRANDOM)
+ ssize_t res;
+ char *p = buf;
+ while (len) {
+ res = getrandom(p, len, 0);
+ if (res < 0)
+ return -1;
+ len -= res;
+ p += res;
+ }
+ return 0;
+#elif defined(HAVE_GETENTROPY)
+ int res;
+ char *p = buf;
+ while (len) {
+ /* getentropy has a maximum size of 256 bytes. */
+ size_t chunk = len < 256 ? len : 256;
+ res = getentropy(p, chunk);
+ if (res < 0)
+ return -1;
+ len -= chunk;
+ p += chunk;
+ }
+ return 0;
+#elif defined(HAVE_RTLGENRANDOM)
+ if (!RtlGenRandom(buf, len))
+ return -1;
+ return 0;
+#elif defined(HAVE_OPENSSL_CSPRNG)
+ int res = RAND_bytes(buf, len);
+ if (res == 1)
+ return 0;
+ if (res == -1)
+ errno = ENOTSUP;
+ else
+ errno = EIO;
+ return -1;
+#else
+ ssize_t res;
+ char *p = buf;
+ int fd, err;
+ fd = open("/dev/urandom", O_RDONLY);
+ if (fd < 0)
+ return -1;
+ while (len) {
+ res = xread(fd, p, len);
+ if (res < 0) {
+ err = errno;
+ close(fd);
+ errno = err;
+ return -1;
+ }
+ len -= res;
+ p += res;
+ }
+ close(fd);
+ return 0;
+#endif
+}
diff --git a/write-or-die.c b/write-or-die.c
index a3d5784..c4fd91b 100644
--- a/write-or-die.c
+++ b/write-or-die.c
@@ -56,16 +56,37 @@ void fprintf_or_die(FILE *f, const char *fmt, ...)
}
}
-void fsync_or_die(int fd, const char *msg)
+static int maybe_fsync(int fd)
{
if (use_fsync < 0)
use_fsync = git_env_bool("GIT_TEST_FSYNC", 1);
if (!use_fsync)
- return;
- while (fsync(fd) < 0) {
- if (errno != EINTR)
- die_errno("fsync error on '%s'", msg);
- }
+ return 0;
+
+ if (fsync_method == FSYNC_METHOD_WRITEOUT_ONLY &&
+ git_fsync(fd, FSYNC_WRITEOUT_ONLY) >= 0)
+ return 0;
+
+ return git_fsync(fd, FSYNC_HARDWARE_FLUSH);
+}
+
+void fsync_or_die(int fd, const char *msg)
+{
+ if (maybe_fsync(fd) < 0)
+ die_errno("fsync error on '%s'", msg);
+}
+
+int fsync_component(enum fsync_component component, int fd)
+{
+ if (fsync_components & component)
+ return maybe_fsync(fd);
+ return 0;
+}
+
+void fsync_component_or_die(enum fsync_component component, int fd, const char *msg)
+{
+ if (fsync_components & component)
+ fsync_or_die(fd, msg);
}
void write_or_die(int fd, const void *buf, size_t count)
diff --git a/wt-status.c b/wt-status.c
index 335e723..d33f927 100644
--- a/wt-status.c
+++ b/wt-status.c
@@ -651,6 +651,15 @@ static void wt_status_collect_changes_index(struct wt_status *s)
rev.diffopt.detect_rename = s->detect_rename >= 0 ? s->detect_rename : rev.diffopt.detect_rename;
rev.diffopt.rename_limit = s->rename_limit >= 0 ? s->rename_limit : rev.diffopt.rename_limit;
rev.diffopt.rename_score = s->rename_score >= 0 ? s->rename_score : rev.diffopt.rename_score;
+
+ /*
+ * The `recursive` option must be enabled to allow the diff to recurse
+ * into subdirectories of sparse directory index entries. If it is not
+ * enabled, a subdirectory containing file(s) with changes is reported
+ * as "modified", rather than the modified files themselves.
+ */
+ rev.diffopt.flags.recursive = 1;
+
copy_pathspec(&rev.prune_data, &s->pathspec);
run_diff_index(&rev, 1);
object_array_clear(&rev.pending);
@@ -1374,10 +1383,10 @@ static void show_rebase_information(struct wt_status *s,
status_printf_ln(s, color, _("No commands done."));
else {
status_printf_ln(s, color,
- Q_("Last command done (%d command done):",
- "Last commands done (%d commands done):",
+ Q_("Last command done (%"PRIuMAX" command done):",
+ "Last commands done (%"PRIuMAX" commands done):",
have_done.nr),
- have_done.nr);
+ (uintmax_t)have_done.nr);
for (i = (have_done.nr > nr_lines_to_show)
? have_done.nr - nr_lines_to_show : 0;
i < have_done.nr;
@@ -1393,10 +1402,10 @@ static void show_rebase_information(struct wt_status *s,
_("No commands remaining."));
else {
status_printf_ln(s, color,
- Q_("Next command to do (%d remaining command):",
- "Next commands to do (%d remaining commands):",
+ Q_("Next command to do (%"PRIuMAX" remaining command):",
+ "Next commands to do (%"PRIuMAX" remaining commands):",
yet_to_do.nr),
- yet_to_do.nr);
+ (uintmax_t)yet_to_do.nr);
for (i = 0; i < nr_lines_to_show && i < yet_to_do.nr; i++)
status_printf_ln(s, color, " %s", yet_to_do.items[i].string);
if (s->hints)
diff --git a/xdiff/xdiffi.c b/xdiff/xdiffi.c
index 69689fa..758410c 100644
--- a/xdiff/xdiffi.c
+++ b/xdiff/xdiffi.c
@@ -315,16 +315,19 @@ int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
long *kvd, *kvdf, *kvdb;
xdalgoenv_t xenv;
diffdata_t dd1, dd2;
+ int res;
- if (XDF_DIFF_ALG(xpp->flags) == XDF_PATIENCE_DIFF)
- return xdl_do_patience_diff(mf1, mf2, xpp, xe);
-
- if (XDF_DIFF_ALG(xpp->flags) == XDF_HISTOGRAM_DIFF)
- return xdl_do_histogram_diff(mf1, mf2, xpp, xe);
+ if (xdl_prepare_env(mf1, mf2, xpp, xe) < 0)
+ return -1;
- if (xdl_prepare_env(mf1, mf2, xpp, xe) < 0) {
+ if (XDF_DIFF_ALG(xpp->flags) == XDF_PATIENCE_DIFF) {
+ res = xdl_do_patience_diff(mf1, mf2, xpp, xe);
+ goto out;
+ }
- return -1;
+ if (XDF_DIFF_ALG(xpp->flags) == XDF_HISTOGRAM_DIFF) {
+ res = xdl_do_histogram_diff(mf1, mf2, xpp, xe);
+ goto out;
}
/*
@@ -359,17 +362,15 @@ int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
dd2.rchg = xe->xdf2.rchg;
dd2.rindex = xe->xdf2.rindex;
- if (xdl_recs_cmp(&dd1, 0, dd1.nrec, &dd2, 0, dd2.nrec,
- kvdf, kvdb, (xpp->flags & XDF_NEED_MINIMAL) != 0, &xenv) < 0) {
-
- xdl_free(kvd);
- xdl_free_env(xe);
- return -1;
- }
-
+ res = xdl_recs_cmp(&dd1, 0, dd1.nrec, &dd2, 0, dd2.nrec,
+ kvdf, kvdb, (xpp->flags & XDF_NEED_MINIMAL) != 0,
+ &xenv);
xdl_free(kvd);
+ out:
+ if (res < 0)
+ xdl_free_env(xe);
- return 0;
+ return res;
}
diff --git a/xdiff/xhistogram.c b/xdiff/xhistogram.c
index 8079474..01decff 100644
--- a/xdiff/xhistogram.c
+++ b/xdiff/xhistogram.c
@@ -372,9 +372,6 @@ out:
int xdl_do_histogram_diff(mmfile_t *file1, mmfile_t *file2,
xpparam_t const *xpp, xdfenv_t *env)
{
- if (xdl_prepare_env(file1, file2, xpp, env) < 0)
- return -1;
-
return histogram_diff(xpp, env,
env->xdf1.dstart + 1, env->xdf1.dend - env->xdf1.dstart + 1,
env->xdf2.dstart + 1, env->xdf2.dend - env->xdf2.dstart + 1);
diff --git a/xdiff/xmerge.c b/xdiff/xmerge.c
index fff0b59..af40c88 100644
--- a/xdiff/xmerge.c
+++ b/xdiff/xmerge.c
@@ -684,42 +684,42 @@ static int xdl_do_merge(xdfenv_t *xe1, xdchange_t *xscr1,
int xdl_merge(mmfile_t *orig, mmfile_t *mf1, mmfile_t *mf2,
xmparam_t const *xmp, mmbuffer_t *result)
{
- xdchange_t *xscr1, *xscr2;
+ xdchange_t *xscr1 = NULL, *xscr2 = NULL;
xdfenv_t xe1, xe2;
- int status;
+ int status = -1;
xpparam_t const *xpp = &xmp->xpp;
result->ptr = NULL;
result->size = 0;
- if (xdl_do_diff(orig, mf1, xpp, &xe1) < 0) {
+ if (xdl_do_diff(orig, mf1, xpp, &xe1) < 0)
return -1;
- }
- if (xdl_do_diff(orig, mf2, xpp, &xe2) < 0) {
- xdl_free_env(&xe1);
- return -1;
- }
+
+ if (xdl_do_diff(orig, mf2, xpp, &xe2) < 0)
+ goto free_xe1; /* avoid double free of xe2 */
+
if (xdl_change_compact(&xe1.xdf1, &xe1.xdf2, xpp->flags) < 0 ||
xdl_change_compact(&xe1.xdf2, &xe1.xdf1, xpp->flags) < 0 ||
- xdl_build_script(&xe1, &xscr1) < 0) {
- xdl_free_env(&xe1);
- return -1;
- }
+ xdl_build_script(&xe1, &xscr1) < 0)
+ goto out;
+
if (xdl_change_compact(&xe2.xdf1, &xe2.xdf2, xpp->flags) < 0 ||
xdl_change_compact(&xe2.xdf2, &xe2.xdf1, xpp->flags) < 0 ||
- xdl_build_script(&xe2, &xscr2) < 0) {
- xdl_free_script(xscr1);
- xdl_free_env(&xe1);
- xdl_free_env(&xe2);
- return -1;
- }
- status = 0;
+ xdl_build_script(&xe2, &xscr2) < 0)
+ goto out;
+
if (!xscr1) {
result->ptr = xdl_malloc(mf2->size);
+ if (!result->ptr)
+ goto out;
+ status = 0;
memcpy(result->ptr, mf2->ptr, mf2->size);
result->size = mf2->size;
} else if (!xscr2) {
result->ptr = xdl_malloc(mf1->size);
+ if (!result->ptr)
+ goto out;
+ status = 0;
memcpy(result->ptr, mf1->ptr, mf1->size);
result->size = mf1->size;
} else {
@@ -727,11 +727,13 @@ int xdl_merge(mmfile_t *orig, mmfile_t *mf1, mmfile_t *mf2,
&xe2, xscr2,
xmp, result);
}
+ out:
xdl_free_script(xscr1);
xdl_free_script(xscr2);
- xdl_free_env(&xe1);
xdl_free_env(&xe2);
+ free_xe1:
+ xdl_free_env(&xe1);
return status;
}
diff --git a/xdiff/xpatience.c b/xdiff/xpatience.c
index c5d48e8..1a21c6a 100644
--- a/xdiff/xpatience.c
+++ b/xdiff/xpatience.c
@@ -198,7 +198,7 @@ static int binary_search(struct entry **sequence, int longest,
* item per sequence length: the sequence with the smallest last
* element (in terms of line2).
*/
-static struct entry *find_longest_common_sequence(struct hashmap *map)
+static int find_longest_common_sequence(struct hashmap *map, struct entry **res)
{
struct entry **sequence = xdl_malloc(map->nr * sizeof(struct entry *));
int longest = 0, i;
@@ -211,6 +211,9 @@ static struct entry *find_longest_common_sequence(struct hashmap *map)
*/
int anchor_i = -1;
+ if (!sequence)
+ return -1;
+
for (entry = map->first; entry; entry = entry->next) {
if (!entry->line2 || entry->line2 == NON_UNIQUE)
continue;
@@ -230,8 +233,9 @@ static struct entry *find_longest_common_sequence(struct hashmap *map)
/* No common unique lines were found */
if (!longest) {
+ *res = NULL;
xdl_free(sequence);
- return NULL;
+ return 0;
}
/* Iterate starting at the last element, adjusting the "next" members */
@@ -241,8 +245,9 @@ static struct entry *find_longest_common_sequence(struct hashmap *map)
entry->previous->next = entry;
entry = entry->previous;
}
+ *res = entry;
xdl_free(sequence);
- return entry;
+ return 0;
}
static int match(struct hashmap *map, int line1, int line2)
@@ -358,14 +363,16 @@ static int patience_diff(mmfile_t *file1, mmfile_t *file2,
return 0;
}
- first = find_longest_common_sequence(&map);
+ result = find_longest_common_sequence(&map, &first);
+ if (result)
+ goto out;
if (first)
result = walk_common_sequence(&map, first,
line1, count1, line2, count2);
else
result = fall_back_to_classic_diff(&map,
line1, count1, line2, count2);
-
+ out:
xdl_free(map.entries);
return result;
}
@@ -373,10 +380,6 @@ static int patience_diff(mmfile_t *file1, mmfile_t *file2,
int xdl_do_patience_diff(mmfile_t *file1, mmfile_t *file2,
xpparam_t const *xpp, xdfenv_t *env)
{
- if (xdl_prepare_env(file1, file2, xpp, env) < 0)
- return -1;
-
- /* environment is cleaned up in xdl_diff() */
return patience_diff(file1, file2, xpp, env,
1, env->xdf1.nrec, 1, env->xdf2.nrec);
}