summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml2
-rw-r--r--.github/workflows/main.yml20
-rw-r--r--.gitignore1
-rw-r--r--Documentation/CodingGuidelines16
-rw-r--r--Documentation/Makefile10
-rw-r--r--Documentation/MyFirstContribution.txt155
-rw-r--r--Documentation/RelNotes/2.36.1.txt33
-rw-r--r--Documentation/RelNotes/2.36.2.txt50
-rw-r--r--Documentation/RelNotes/2.37.0.txt337
-rw-r--r--Documentation/SubmittingPatches5
-rw-r--r--Documentation/ToolsForGit.txt51
-rw-r--r--Documentation/config/add.txt6
-rw-r--r--Documentation/config/branch.txt9
-rw-r--r--Documentation/config/core.txt14
-rw-r--r--Documentation/config/gc.txt21
-rw-r--r--Documentation/config/gpg.txt9
-rw-r--r--Documentation/config/http.txt18
-rw-r--r--Documentation/config/mergetool.txt9
-rw-r--r--Documentation/config/push.txt11
-rw-r--r--Documentation/config/repack.txt9
-rw-r--r--Documentation/config/safe.txt18
-rw-r--r--Documentation/config/transfer.txt38
-rw-r--r--Documentation/git-archive.txt31
-rw-r--r--Documentation/git-branch.txt18
-rw-r--r--Documentation/git-config.txt2
-rw-r--r--Documentation/git-gc.txt4
-rw-r--r--Documentation/git-mergetool.txt8
-rw-r--r--Documentation/git-p4.txt37
-rw-r--r--Documentation/git-pack-objects.txt30
-rw-r--r--Documentation/git-read-tree.txt11
-rw-r--r--Documentation/git-rebase.txt5
-rw-r--r--Documentation/git-remote.txt2
-rw-r--r--Documentation/git-repack.txt11
-rw-r--r--Documentation/git-sparse-checkout.txt317
-rw-r--r--Documentation/git.txt4
-rw-r--r--Documentation/mergetools/vimdiff.txt194
-rw-r--r--Documentation/rev-list-options.txt5
-rw-r--r--Documentation/technical/api-error-handling.txt24
-rw-r--r--Documentation/technical/api-trace2.txt48
-rw-r--r--Documentation/technical/cruft-packs.txt123
-rw-r--r--Documentation/technical/pack-format.txt19
-rwxr-xr-xGIT-VERSION-GEN2
-rw-r--r--Makefile187
l---------RelNotes2
-rw-r--r--add-interactive.c3
-rw-r--r--add-patch.c2
-rw-r--r--alloc.c21
-rw-r--r--alloc.h1
-rw-r--r--apply.c6
-rw-r--r--archive.c91
-rw-r--r--bisect.c20
-rw-r--r--bisect.h9
-rw-r--r--blame.c6
-rw-r--r--branch.c33
-rw-r--r--branch.h1
-rw-r--r--builtin/add.c31
-rw-r--r--builtin/am.c3
-rw-r--r--builtin/apply.c1
-rw-r--r--builtin/bisect--helper.c73
-rw-r--r--builtin/blame.c10
-rw-r--r--builtin/checkout.c5
-rw-r--r--builtin/clone.c6
-rw-r--r--builtin/commit.c17
-rw-r--r--builtin/describe.c2
-rw-r--r--builtin/diff-files.c8
-rw-r--r--builtin/diff-index.c5
-rw-r--r--builtin/diff-tree.c3
-rw-r--r--builtin/diff.c4
-rw-r--r--builtin/difftool.c6
-rw-r--r--builtin/fast-export.c2
-rw-r--r--builtin/fast-import.c2
-rw-r--r--builtin/fetch.c17
-rw-r--r--builtin/fsmonitor--daemon.c116
-rw-r--r--builtin/gc.c18
-rw-r--r--builtin/index-pack.c8
-rw-r--r--builtin/log.c75
-rw-r--r--builtin/ls-remote.c2
-rw-r--r--builtin/mailsplit.c2
-rw-r--r--builtin/merge.c2
-rw-r--r--builtin/multi-pack-index.c45
-rw-r--r--builtin/name-rev.c23
-rw-r--r--builtin/pack-objects.c351
-rw-r--r--builtin/pack-redundant.c10
-rw-r--r--builtin/prune.c1
-rw-r--r--builtin/pull.c10
-rw-r--r--builtin/push.c64
-rw-r--r--builtin/rebase.c75
-rw-r--r--builtin/receive-pack.c53
-rw-r--r--builtin/reflog.c1
-rw-r--r--builtin/remote.c18
-rw-r--r--builtin/repack.c234
-rw-r--r--builtin/replace.c2
-rw-r--r--builtin/rev-list.c25
-rw-r--r--builtin/rev-parse.c5
-rw-r--r--builtin/shortlog.c8
-rw-r--r--builtin/show-branch.c4
-rw-r--r--builtin/show-ref.c17
-rw-r--r--builtin/sparse-checkout.c10
-rw-r--r--builtin/stash.c164
-rw-r--r--builtin/submodule--helper.c203
-rw-r--r--builtin/tag.c4
-rw-r--r--builtin/unpack-objects.c3
-rw-r--r--builtin/update-index.c36
-rw-r--r--builtin/worktree.c8
-rw-r--r--bulk-checkin.c119
-rw-r--r--bulk-checkin.h27
-rw-r--r--bundle.c20
-rw-r--r--bundle.h2
-rw-r--r--cache-tree.c11
-rw-r--r--cache.h47
-rw-r--r--chunk-format.c12
-rw-r--r--chunk-format.h3
-rwxr-xr-xci/install-dependencies.sh26
-rwxr-xr-xci/lib.sh92
-rwxr-xr-xci/make-test-artifacts.sh2
-rwxr-xr-xci/run-build-and-tests.sh16
-rwxr-xr-xci/run-static-analysis.sh2
-rwxr-xr-xci/run-test-slice.sh5
-rw-r--r--combine-diff.c4
-rw-r--r--commit-graph.c37
-rw-r--r--commit.c37
-rw-r--r--common-main.c28
-rw-r--r--compat/fsmonitor/fsm-health-darwin.c24
-rw-r--r--compat/fsmonitor/fsm-health-win32.c278
-rw-r--r--compat/fsmonitor/fsm-health.h47
-rw-r--r--compat/fsmonitor/fsm-listen-darwin.c124
-rw-r--r--compat/fsmonitor/fsm-listen-win32.c436
-rw-r--r--compat/fsmonitor/fsm-listen.h2
-rw-r--r--compat/fsmonitor/fsm-settings-darwin.c89
-rw-r--r--compat/fsmonitor/fsm-settings-win32.c137
-rw-r--r--compat/mingw.c8
-rw-r--r--compat/mingw.h3
-rw-r--r--compat/mkdir.c2
-rw-r--r--compat/mmap.c2
-rw-r--r--compat/nedmalloc/nedmalloc.c1
-rw-r--r--compat/win32/path-utils.h6
-rw-r--r--compat/win32/syslog.c2
-rw-r--r--config.c15
-rw-r--r--config.mak.dev1
-rw-r--r--config.mak.uname5
-rw-r--r--configure.ac3
-rw-r--r--connect.c42
-rw-r--r--connected.c2
-rw-r--r--contrib/buildsystems/CMakeLists.txt30
-rw-r--r--contrib/coccinelle/equals-null.cocci30
-rw-r--r--contrib/coccinelle/free.cocci27
-rw-r--r--contrib/coccinelle/object_id.cocci12
-rw-r--r--contrib/coccinelle/the_repository.pending.cocci15
-rw-r--r--contrib/coccinelle/xstrdup_or_null.cocci8
-rw-r--r--contrib/completion/git-prompt.sh32
-rw-r--r--contrib/scalar/scalar.c292
-rw-r--r--contrib/scalar/scalar.txt12
-rwxr-xr-xcontrib/scalar/t/t9099-scalar.sh27
-rw-r--r--contrib/vscode/README.md6
-rwxr-xr-xcontrib/vscode/init.sh1
-rw-r--r--convert.c12
-rw-r--r--daemon.c16
-rwxr-xr-xdetect-compiler2
-rw-r--r--diff-lib.c8
-rw-r--r--diff.c8
-rw-r--r--dir.c128
-rw-r--r--dir.h63
-rw-r--r--editor.c2
-rw-r--r--environment.c2
-rw-r--r--ewah/bitmap.c7
-rw-r--r--ewah/ewah_bitmap.c2
-rw-r--r--ewah/ewok.h1
-rw-r--r--fetch-pack.c64
-rw-r--r--fmt-merge-msg.c1
-rw-r--r--fsck.c23
-rw-r--r--fsmonitor--daemon.h11
-rw-r--r--fsmonitor-settings.c171
-rw-r--r--fsmonitor-settings.h33
-rw-r--r--fsmonitor.c73
-rw-r--r--fsmonitor.h11
-rw-r--r--git-compat-util.h82
-rw-r--r--git-mergetool--lib.sh10
-rwxr-xr-xgit-p4.py1074
-rw-r--r--git.c13
-rwxr-xr-xgitk-git/gitk17
-rwxr-xr-xgitweb/gitweb.perl5
-rw-r--r--gpg-interface.c2
-rw-r--r--hook.c3
-rw-r--r--http-backend.c4
-rw-r--r--http-fetch.c2
-rw-r--r--http-push.c15
-rw-r--r--http-walker.c14
-rw-r--r--http.c96
-rw-r--r--http.h9
-rw-r--r--kwset.c4
-rw-r--r--ll-merge.c2
-rw-r--r--log-tree.c3
-rw-r--r--mailinfo.c4
-rw-r--r--mailmap.c14
-rw-r--r--merge-ort.c5
-rw-r--r--merge-recursive.c17
-rw-r--r--mergetools/araxis8
-rw-r--r--mergetools/bc8
-rw-r--r--mergetools/codecompare8
-rw-r--r--mergetools/deltawalker8
-rw-r--r--mergetools/diffmerge8
-rw-r--r--mergetools/diffuse8
-rw-r--r--mergetools/ecmerge8
-rw-r--r--mergetools/emerge8
-rw-r--r--mergetools/examdiff8
-rw-r--r--mergetools/guiffy8
-rw-r--r--mergetools/kdiff38
-rw-r--r--mergetools/kompare8
-rw-r--r--mergetools/meld8
-rw-r--r--mergetools/opendiff8
-rw-r--r--mergetools/p4merge8
-rw-r--r--mergetools/smerge8
-rw-r--r--mergetools/tkdiff8
-rw-r--r--mergetools/tortoisemerge8
-rw-r--r--mergetools/vimdiff607
-rw-r--r--mergetools/winmerge8
-rw-r--r--mergetools/xxdiff8
-rw-r--r--midx.c36
-rw-r--r--object-file.c17
-rw-r--r--object-name.c25
-rw-r--r--object-store.h16
-rw-r--r--pack-bitmap-write.c1
-rw-r--r--pack-bitmap.c32
-rw-r--r--pack-mtimes.c130
-rw-r--r--pack-mtimes.h26
-rw-r--r--pack-objects.c6
-rw-r--r--pack-objects.h25
-rw-r--r--pack-write.c88
-rw-r--r--pack.h4
-rw-r--r--packfile.c21
-rw-r--r--pager.c4
-rw-r--r--parse-options.c53
-rw-r--r--path.c4
-rw-r--r--pathspec.c2
-rw-r--r--po/.gitignore2
-rw-r--r--po/README.md232
-rw-r--r--po/TEAMS6
-rw-r--r--po/bg.po22080
-rw-r--r--po/ca.po21698
-rw-r--r--po/de.po21069
-rw-r--r--po/es.po22660
-rw-r--r--po/fr.po21655
-rw-r--r--po/git.pot25151
-rw-r--r--po/id.po25663
-rw-r--r--po/ru.po23312
-rw-r--r--po/sv.po23435
-rw-r--r--po/tr.po20641
-rw-r--r--po/vi.po20792
-rw-r--r--po/zh_CN.po25750
-rw-r--r--po/zh_TW.po26951
-rw-r--r--prio-queue.c2
-rw-r--r--promisor-remote.c4
-rw-r--r--range-diff.c4
-rw-r--r--reachable.c58
-rw-r--r--reachable.h9
-rw-r--r--read-cache.c32
-rw-r--r--ref-filter.c7
-rw-r--r--reflog-walk.c24
-rw-r--r--reflog-walk.h1
-rw-r--r--refs.c6
-rw-r--r--refs/ref-cache.c2
-rw-r--r--reftable/blocksource.c9
-rw-r--r--reftable/stack_test.c2
-rw-r--r--reftable/tree.c4
-rw-r--r--reftable/writer.c12
-rw-r--r--remote.c171
-rw-r--r--remote.h32
-rw-r--r--rerere.c2
-rw-r--r--revision.c84
-rw-r--r--revision.h74
-rw-r--r--run-command.c92
-rw-r--r--run-command.h45
-rw-r--r--sequencer.c45
-rw-r--r--serve.c1
-rw-r--r--setup.c13
-rw-r--r--sh-i18n--envsubst.c2
-rw-r--r--shallow.c10
-rw-r--r--shared.mak2
-rw-r--r--sparse-index.c134
-rw-r--r--sparse-index.h14
-rw-r--r--submodule-config.c16
-rw-r--r--submodule-config.h2
-rw-r--r--submodule.c49
-rw-r--r--submodule.h6
-rw-r--r--t/README2
-rw-r--r--t/annotate-tests.sh40
-rw-r--r--t/helper/test-fast-rebase.c23
-rw-r--r--t/helper/test-fsmonitor-client.c106
-rw-r--r--t/helper/test-hexdump.c30
-rw-r--r--t/helper/test-pack-mtimes.c56
-rw-r--r--t/helper/test-revision-walking.c1
-rw-r--r--t/helper/test-run-command.c24
-rw-r--r--t/helper/test-tool.c2
-rw-r--r--t/helper/test-tool.h2
-rw-r--r--t/helper/test-trace2.c29
-rw-r--r--t/lib-git-p4.sh3
-rw-r--r--t/lib-git-svn.sh4
-rw-r--r--t/lib-httpd.sh2
-rw-r--r--t/lib-sudo.sh15
-rw-r--r--t/lib-t3100.sh10
-rwxr-xr-xt/lib-unicode-nfc-nfd.sh162
-rw-r--r--t/lib-unique-files.sh34
-rwxr-xr-xt/perf/p0008-odb-fsync.sh82
-rwxr-xr-xt/perf/p2000-sparse-operations.sh3
-rwxr-xr-xt/perf/p4220-log-grep-engines.sh3
-rwxr-xr-xt/perf/p4221-log-grep-engines-fixed.sh3
-rwxr-xr-xt/perf/p5302-pack-index.sh15
-rwxr-xr-xt/perf/p7519-fsmonitor.sh18
-rwxr-xr-xt/perf/p7527-builtin-fsmonitor.sh257
-rwxr-xr-xt/perf/p7820-grep-engines.sh6
-rw-r--r--t/perf/perf-lib.sh65
-rwxr-xr-xt/t0012-help.sh2
-rwxr-xr-xt/t0027-auto-crlf.sh8
-rwxr-xr-xt/t0033-safe-directory.sh30
-rwxr-xr-xt/t0034-root-safe-directory.sh93
-rwxr-xr-xt/t0056-git-C.sh1
-rwxr-xr-xt/t0060-path-utils.sh4
-rwxr-xr-xt/t0061-run-command.sh30
-rwxr-xr-xt/t0062-revision-walking.sh1
-rwxr-xr-xt/t0100-previous.sh1
-rwxr-xr-xt/t0101-at-syntax.sh2
-rwxr-xr-xt/t0210-trace2-normal.sh76
-rwxr-xr-xt/t1001-read-tree-m-2way.sh1
-rwxr-xr-xt/t1002-read-tree-m-u-2way.sh1
-rwxr-xr-xt/t1006-cat-file.sh10
-rwxr-xr-xt/t1011-read-tree-sparse-checkout.sh46
-rwxr-xr-xt/t1060-object-corruption.sh2
-rwxr-xr-xt/t1091-sparse-checkout-builtin.sh15
-rwxr-xr-xt/t1092-sparse-checkout-compatibility.sh209
-rwxr-xr-xt/t1300-config.sh9
-rwxr-xr-xt/t1401-symbolic-ref.sh2
-rwxr-xr-xt/t1411-reflog-show.sh1
-rwxr-xr-xt/t1412-reflog-loop.sh2
-rwxr-xr-xt/t1415-worktree-refs.sh1
-rwxr-xr-xt/t1450-fsck.sh13
-rwxr-xr-xt/t1800-hook.sh31
-rwxr-xr-xt/t2015-checkout-unborn.sh1
-rwxr-xr-xt/t2016-checkout-patch.sh42
-rwxr-xr-xt/t2107-update-index-basic.sh31
-rwxr-xr-xt/t2200-add-update.sh1
-rwxr-xr-xt/t2203-add-intent.sh1
-rwxr-xr-xt/t3104-ls-tree-format.sh5
-rwxr-xr-xt/t3105-ls-tree-output.sh192
-rwxr-xr-xt/t3200-branch.sh35
-rwxr-xr-xt/t3202-show-branch.sh14
-rwxr-xr-xt/t3206-range-diff.sh51
-rwxr-xr-xt/t3302-notes-index-expensive.sh1
-rwxr-xr-xt/t3303-notes-subtrees.sh1
-rwxr-xr-xt/t3305-notes-fanout.sh1
-rwxr-xr-xt/t3408-rebase-multi-line.sh1
-rwxr-xr-xt/t3416-rebase-onto-threedots.sh29
-rwxr-xr-xt/t3501-revert-cherry-pick.sh9
-rwxr-xr-xt/t3602-rm-sparse-checkout.sh6
-rwxr-xr-xt/t3700-add.sh28
-rwxr-xr-xt/t3701-add-interactive.sh12
-rwxr-xr-xt/t3705-add-sparse-checkout.sh4
-rwxr-xr-xt/t3903-stash.sh20
-rwxr-xr-xt/t4013-diff-various.sh33
-rwxr-xr-xt/t4014-format-patch.sh33
-rwxr-xr-xt/t4020-diff-external.sh8
-rwxr-xr-xt/t4021-format-patch-numbered.sh1
-rwxr-xr-xt/t4027-diff-submodule.sh1
-rwxr-xr-xt/t4028-format-patch-mime-headers.sh2
-rwxr-xr-xt/t4036-format-patch-signer-mime.sh1
-rwxr-xr-xt/t4039-diff-assume-unchanged.sh1
-rwxr-xr-xt/t4055-diff-context.sh1
-rwxr-xr-xt/t4066-diff-emit-delay.sh1
-rwxr-xr-xt/t4122-apply-symlink-inside.sh1
-rwxr-xr-xt/t4126-apply-empty.sh1
-rwxr-xr-xt/t4128-apply-root.sh1
-rwxr-xr-xt/t4202-log.sh11
-rwxr-xr-xt/t4206-log-follow-harder-copies.sh2
-rwxr-xr-xt/t4207-log-decoration-colors.sh1
-rwxr-xr-xt/t4212-log-corrupt.sh1
-rwxr-xr-xt/t4217-log-limit.sh41
-rwxr-xr-xt/t5003-archive-zip.sh20
-rwxr-xr-xt/t5300-pack-object.sh41
-rwxr-xr-xt/t5301-sliding-window.sh2
-rwxr-xr-xt/t5313-pack-bounds-checks.sh2
-rwxr-xr-xt/t5316-pack-delta-depth.sh2
-rwxr-xr-xt/t5317-pack-objects-filter-objects.sh91
-rwxr-xr-xt/t5320-delta-islands.sh2
-rwxr-xr-xt/t5322-pack-objects-sparse.sh1
-rwxr-xr-xt/t5329-pack-objects-cruft.sh741
-rwxr-xr-xt/t5505-remote.sh34
-rwxr-xr-xt/t5506-remote-groups.sh1
-rwxr-xr-xt/t5512-ls-remote.sh17
-rwxr-xr-xt/t5513-fetch-track.sh1
-rwxr-xr-xt/t5515-fetch-merge-logic.sh1
-rwxr-xr-xt/t5516-fetch-push.sh52
-rwxr-xr-xt/t5518-fetch-exit-status.sh1
-rwxr-xr-xt/t5526-fetch-submodules.sh27
-rwxr-xr-xt/t5528-push-default.sh77
-rwxr-xr-xt/t5532-fetch-proxy.sh2
-rwxr-xr-xt/t5537-fetch-shallow.sh12
-rwxr-xr-xt/t5551-http-fetch-smart.sh7
-rwxr-xr-xt/t5572-pull-submodule.sh26
-rwxr-xr-xt/t5600-clone-fail-cleanup.sh1
-rwxr-xr-xt/t5601-clone.sh23
-rwxr-xr-xt/t5605-clone-local.sh9
-rwxr-xr-xt/t5900-repo-selection.sh2
-rwxr-xr-xt/t6002-rev-list-bisect.sh1
-rwxr-xr-xt/t6003-rev-list-topo-order.sh1
-rwxr-xr-xt/t6005-rev-list-count.sh1
-rwxr-xr-xt/t6018-rev-list-glob.sh1
-rwxr-xr-xt/t6030-bisect-porcelain.sh28
-rwxr-xr-xt/t6100-rev-list-in-order.sh1
-rwxr-xr-xt/t6101-rev-parse-parents.sh1
-rwxr-xr-xt/t6110-rev-list-sparse.sh1
-rwxr-xr-xt/t6114-keep-packs.sh2
-rwxr-xr-xt/t6131-pathspec-icase.sh2
-rwxr-xr-xt/t6132-pathspec-exclude.sh181
-rwxr-xr-xt/t6424-merge-unrelated-index-changes.sh32
-rwxr-xr-xt/t6428-merge-conflicts-sparse.sh4
-rwxr-xr-xt/t7002-mv-sparse-checkout.sh2
-rwxr-xr-xt/t7008-filter-branch-null-sha1.sh1
-rwxr-xr-xt/t7011-skip-worktree-reading.sh1
-rwxr-xr-xt/t7012-skip-worktree-writing.sh2
-rwxr-xr-xt/t7063-status-untracked-cache.sh113
-rwxr-xr-xt/t7406-submodule-update.sh33
-rwxr-xr-xt/t7519-status-fsmonitor.sh32
-rwxr-xr-xt/t7524-commit-summary.sh31
-rwxr-xr-xt/t7527-builtin-fsmonitor.sh401
-rwxr-xr-xt/t7609-mergetool--lib.sh14
-rwxr-xr-xt/t7702-repack-cyclic-alternate.sh2
-rwxr-xr-xt/t7703-repack-geometric.sh75
-rwxr-xr-xt/t7812-grep-icase-non-ascii.sh10
-rwxr-xr-xt/t9001-send-email.sh1
-rwxr-xr-xt/t9100-git-svn-basic.sh1
-rwxr-xr-xt/t9101-git-svn-props.sh2
-rwxr-xr-xt/t9104-git-svn-follow-parent.sh2
-rwxr-xr-xt/t9106-git-svn-commit-diff-clobber.sh2
-rwxr-xr-xt/t9115-git-svn-dcommit-funky-renames.sh1
-rwxr-xr-xt/t9116-git-svn-log.sh1
-rwxr-xr-xt/t9122-git-svn-author.sh2
-rwxr-xr-xt/t9127-git-svn-partial-rebuild.sh1
-rwxr-xr-xt/t9129-git-svn-i18n-commitencoding.sh1
-rwxr-xr-xt/t9132-git-svn-broken-symlink.sh1
-rwxr-xr-xt/t9139-git-svn-non-utf8-commitencoding.sh1
-rwxr-xr-xt/t9146-git-svn-empty-dirs.sh2
-rwxr-xr-xt/t9148-git-svn-propset.sh1
-rwxr-xr-xt/t9160-git-svn-preserve-empty-dirs.sh1
-rwxr-xr-xt/t9162-git-svn-dcommit-interactive.sh2
-rwxr-xr-xt/t9164-git-svn-dcommit-concurrent.sh2
-rwxr-xr-xt/t9350-fast-export.sh7
-rwxr-xr-xt/t9501-gitweb-standalone-http-status.sh1
-rwxr-xr-xt/t9502-gitweb-standalone-parse-output.sh14
-rwxr-xr-xt/t9800-git-p4-basic.sh85
-rwxr-xr-xt/t9801-git-p4-branch.sh10
-rwxr-xr-xt/t9802-git-p4-filetype.sh34
-rwxr-xr-xt/t9835-git-p4-metadata-encoding-python2.sh213
-rwxr-xr-xt/t9836-git-p4-metadata-encoding-python3.sh214
-rwxr-xr-xt/t9903-bash-prompt.sh2
-rw-r--r--t/test-lib-functions.sh16
-rw-r--r--t/test-lib-github-workflow-markup.sh56
-rw-r--r--t/test-lib-junit.sh132
-rw-r--r--t/test-lib.sh139
-rw-r--r--tempfile.c63
-rw-r--r--tempfile.h13
-rwxr-xr-xtemplates/hooks--fsmonitor-watchman.sample5
-rw-r--r--trace2.c8
-rw-r--r--trace2.h8
-rw-r--r--trailer.c4
-rw-r--r--transport-helper.c2
-rw-r--r--transport.c262
-rw-r--r--transport.h1
-rw-r--r--unpack-trees.c11
-rw-r--r--usage.c35
-rw-r--r--wildmatch.c2
-rw-r--r--worktree.c2
-rw-r--r--wrapper.c2
-rw-r--r--wt-status.c17
-rw-r--r--xdiff-interface.c2
-rw-r--r--xdiff/xemit.c2
-rw-r--r--xdiff/xmacros.h1
-rw-r--r--xdiff/xprepare.c2
-rw-r--r--xdiff/xutils.c2
477 files changed, 128008 insertions, 189244 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index e114ffe..4860beb 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -9,7 +9,7 @@ freebsd_12_task:
DEFAULT_TEST_TARGET: prove
DEVELOPER: 1
freebsd_instance:
- image_family: freebsd-12-2
+ image_family: freebsd-12-3
memory: 2G
install_script:
pkg install -y gettext gmake perl5
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index c35200d..cd1f526 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -119,8 +119,8 @@ jobs:
- name: test
shell: bash
run: . /etc/profile && ci/run-test-slice.sh ${{matrix.nr}} 10
- - name: ci/print-test-failures.sh
- if: failure()
+ - name: print test failures
+ if: failure() && env.FAILED_TEST_ARTIFACTS != ''
shell: bash
run: ci/print-test-failures.sh
- name: Upload failed tests' directories
@@ -204,8 +204,8 @@ jobs:
env:
NO_SVN_TESTS: 1
run: . /etc/profile && ci/run-test-slice.sh ${{matrix.nr}} 10
- - name: ci/print-test-failures.sh
- if: failure()
+ - name: print test failures
+ if: failure() && env.FAILED_TEST_ARTIFACTS != ''
shell: bash
run: ci/print-test-failures.sh
- name: Upload failed tests' directories
@@ -261,8 +261,10 @@ jobs:
- uses: actions/checkout@v2
- run: ci/install-dependencies.sh
- run: ci/run-build-and-tests.sh
- - run: ci/print-test-failures.sh
- if: failure()
+ - name: print test failures
+ if: failure() && env.FAILED_TEST_ARTIFACTS != ''
+ shell: bash
+ run: ci/print-test-failures.sh
- name: Upload failed tests' directories
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
uses: actions/upload-artifact@v2
@@ -292,8 +294,10 @@ jobs:
- uses: actions/checkout@v1
- run: ci/install-docker-dependencies.sh
- run: ci/run-build-and-tests.sh
- - run: ci/print-test-failures.sh
- if: failure()
+ - name: print test failures
+ if: failure() && env.FAILED_TEST_ARTIFACTS != ''
+ shell: bash
+ run: ci/print-test-failures.sh
- name: Upload failed tests' directories
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
uses: actions/upload-artifact@v1
diff --git a/.gitignore b/.gitignore
index e81de10..a452215 100644
--- a/.gitignore
+++ b/.gitignore
@@ -200,6 +200,7 @@
*.[aos]
*.o.json
*.py[co]
+.build/
.depend/
*.gcda
*.gcno
diff --git a/Documentation/CodingGuidelines b/Documentation/CodingGuidelines
index b20b2f9..4c756be 100644
--- a/Documentation/CodingGuidelines
+++ b/Documentation/CodingGuidelines
@@ -43,7 +43,10 @@ the overall style of existing code. Modifications to existing
code is expected to match the style the surrounding code already
uses (even if it doesn't match the overall style of existing code).
-But if you must have a list of rules, here they are.
+But if you must have a list of rules, here are some language
+specific ones. Note that Documentation/ToolsForGit.txt document
+has a collection of tips to help you use some external tools
+to conform to these guidelines.
For shell scripts specifically (not exhaustive):
@@ -492,17 +495,6 @@ For Perl programs:
- Learn and use Git.pm if you need that functionality.
- - For Emacs, it's useful to put the following in
- GIT_CHECKOUT/.dir-locals.el, assuming you use cperl-mode:
-
- ;; note the first part is useful for C editing, too
- ((nil . ((indent-tabs-mode . t)
- (tab-width . 8)
- (fill-column . 80)))
- (cperl-mode . ((cperl-indent-level . 8)
- (cperl-extra-newline-before-brace . nil)
- (cperl-merge-trailing-else . t))))
-
For Python scripts:
- We follow PEP-8 (http://www.python.org/dev/peps/pep-0008/).
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 44c080e..f2e7fc1 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -93,7 +93,9 @@ SP_ARTICLES += $(API_DOCS)
TECH_DOCS += MyFirstContribution
TECH_DOCS += MyFirstObjectWalk
TECH_DOCS += SubmittingPatches
+TECH_DOCS += ToolsForGit
TECH_DOCS += technical/bundle-format
+TECH_DOCS += technical/cruft-packs
TECH_DOCS += technical/hash-function-transition
TECH_DOCS += technical/http-protocol
TECH_DOCS += technical/index-format
@@ -302,12 +304,12 @@ $(mergetools_txt): mergetools-list.made
mergetools-list.made: ../git-mergetool--lib.sh $(wildcard ../mergetools/*)
$(QUIET_GEN) \
- $(SHELL_PATH) -c 'MERGE_TOOLS_DIR=../mergetools && \
+ $(SHELL_PATH) -c 'MERGE_TOOLS_DIR=../mergetools && TOOL_MODE=diff && \
. ../git-mergetool--lib.sh && \
- show_tool_names can_diff "* " || :' >mergetools-diff.txt && \
- $(SHELL_PATH) -c 'MERGE_TOOLS_DIR=../mergetools && \
+ show_tool_names can_diff' | sed -e "s/\([a-z0-9]*\)/\`\1\`;;/" >mergetools-diff.txt && \
+ $(SHELL_PATH) -c 'MERGE_TOOLS_DIR=../mergetools && TOOL_MODE=merge && \
. ../git-mergetool--lib.sh && \
- show_tool_names can_merge "* " || :' >mergetools-merge.txt && \
+ show_tool_names can_merge' | sed -e "s/\([a-z0-9]*\)/\`\1\`;;/" >mergetools-merge.txt && \
date >$@
TRACK_ASCIIDOCFLAGS = $(subst ','\'',$(ASCIIDOC_COMMON):$(ASCIIDOC_HTML):$(ASCIIDOC_DOCBOOK))
diff --git a/Documentation/MyFirstContribution.txt b/Documentation/MyFirstContribution.txt
index 63a2ef5..1da15d9 100644
--- a/Documentation/MyFirstContribution.txt
+++ b/Documentation/MyFirstContribution.txt
@@ -710,13 +710,104 @@ dependencies. `prove` also makes the output nicer.
Go ahead and commit this change, as well.
[[ready-to-share]]
-== Getting Ready to Share
+== Getting Ready to Share: Anatomy of a Patch Series
You may have noticed already that the Git project performs its code reviews via
emailed patches, which are then applied by the maintainer when they are ready
-and approved by the community. The Git project does not accept patches from
+and approved by the community. The Git project does not accept contributions from
pull requests, and the patches emailed for review need to be formatted a
-specific way. At this point the tutorial diverges, in order to demonstrate two
+specific way.
+
+:patch-series: https://lore.kernel.org/git/pull.1218.git.git.1645209647.gitgitgadget@gmail.com/
+:lore: https://lore.kernel.org/git/
+
+Before taking a look at how to convert your commits into emailed patches,
+let's analyze what the end result, a "patch series", looks like. Here is an
+{patch-series}[example] of the summary view for a patch series on the web interface of
+the {lore}[Git mailing list archive]:
+
+----
+2022-02-18 18:40 [PATCH 0/3] libify reflog John Cai via GitGitGadget
+2022-02-18 18:40 ` [PATCH 1/3] reflog: libify delete reflog function and helpers John Cai via GitGitGadget
+2022-02-18 19:10 ` Ævar Arnfjörð Bjarmason [this message]
+2022-02-18 19:39 ` Taylor Blau
+2022-02-18 19:48 ` Ævar Arnfjörð Bjarmason
+2022-02-18 19:35 ` Taylor Blau
+2022-02-21 1:43 ` John Cai
+2022-02-21 1:50 ` Taylor Blau
+2022-02-23 19:50 ` John Cai
+2022-02-18 20:00 ` // other replies ellided
+2022-02-18 18:40 ` [PATCH 2/3] reflog: call reflog_delete from reflog.c John Cai via GitGitGadget
+2022-02-18 19:15 ` Ævar Arnfjörð Bjarmason
+2022-02-18 20:26 ` Junio C Hamano
+2022-02-18 18:40 ` [PATCH 3/3] stash: call reflog_delete from reflog.c John Cai via GitGitGadget
+2022-02-18 19:20 ` Ævar Arnfjörð Bjarmason
+2022-02-19 0:21 ` Taylor Blau
+2022-02-22 2:36 ` John Cai
+2022-02-22 10:51 ` Ævar Arnfjörð Bjarmason
+2022-02-18 19:29 ` [PATCH 0/3] libify reflog Ævar Arnfjörð Bjarmason
+2022-02-22 18:30 ` [PATCH v2 0/3] libify reflog John Cai via GitGitGadget
+2022-02-22 18:30 ` [PATCH v2 1/3] stash: add test to ensure reflog --rewrite --updatref behavior John Cai via GitGitGadget
+2022-02-23 8:54 ` Ævar Arnfjörð Bjarmason
+2022-02-23 21:27 ` Junio C Hamano
+// continued
+----
+
+We can note a few things:
+
+- Each commit is sent as a separate email, with the commit message title as
+ subject, prefixed with "[PATCH _i_/_n_]" for the _i_-th commit of an
+ _n_-commit series.
+- Each patch is sent as a reply to an introductory email called the _cover
+ letter_ of the series, prefixed "[PATCH 0/_n_]".
+- Subsequent iterations of the patch series are labelled "PATCH v2", "PATCH
+ v3", etc. in place of "PATCH". For example, "[PATCH v2 1/3]" would be the first of
+ three patches in the second iteration. Each iteration is sent with a new cover
+ letter (like "[PATCH v2 0/3]" above), itself a reply to the cover letter of the
+ previous iteration (more on that below).
+
+NOTE: A single-patch topic is sent with "[PATCH]", "[PATCH v2]", etc. without
+_i_/_n_ numbering (in the above thread overview, no single-patch topic appears,
+though).
+
+[[cover-letter]]
+=== The cover letter
+
+In addition to an email per patch, the Git community also expects your patches
+to come with a cover letter. This is an important component of change
+submission as it explains to the community from a high level what you're trying
+to do, and why, in a way that's more apparent than just looking at your
+patches.
+
+The title of your cover letter should be something which succinctly covers the
+purpose of your entire topic branch. It's often in the imperative mood, just
+like our commit message titles. Here is how we'll title our series:
+
+---
+Add the 'psuh' command
+---
+
+The body of the cover letter is used to give additional context to reviewers.
+Be sure to explain anything your patches don't make clear on their own, but
+remember that since the cover letter is not recorded in the commit history,
+anything that might be useful to future readers of the repository's history
+should also be in your commit messages.
+
+Here's an example body for `psuh`:
+
+----
+Our internal metrics indicate widespread interest in the command
+git-psuh - that is, many users are trying to use it, but finding it is
+unavailable, using some unknown workaround instead.
+
+The following handful of patches add the psuh command and implement some
+handy features on top of it.
+
+This patchset is part of the MyFirstContribution tutorial and should not
+be merged.
+----
+
+At this point the tutorial diverges, in order to demonstrate two
different methods of formatting your patchset and getting it reviewed.
The first method to be covered is GitGitGadget, which is useful for those
@@ -808,8 +899,22 @@ https://github.com/gitgitgadget/git and open a PR either with the "New pull
request" button or the convenient "Compare & pull request" button that may
appear with the name of your newly pushed branch.
-Review the PR's title and description, as it's used by GitGitGadget as the cover
-letter for your change. When you're happy, submit your pull request.
+Review the PR's title and description, as they're used by GitGitGadget
+respectively as the subject and body of the cover letter for your change. Refer
+to <<cover-letter,"The cover letter">> above for advice on how to title your
+submission and what content to include in the description.
+
+NOTE: For single-patch contributions, your commit message should already be
+meaningful and explain at a high level the purpose (what is happening and why)
+of your patch, so you usually do not need any additional context. In that case,
+remove the PR description that GitHub automatically generates from your commit
+message (your PR description should be empty). If you do need to supply even
+more context, you can do so in that space and it will be appended to the email
+that GitGitGadget will send, between the three-dash line and the diffstat
+(see <<single-patch,Bonus Chapter: One-Patch Changes>> for how this looks once
+submitted).
+
+When you're happy, submit your pull request.
[[run-ci-ggg]]
=== Running CI and Getting Ready to Send
@@ -952,49 +1057,29 @@ but want reviewers to look at what they have so far. You can add this flag with
Check and make sure that your patches and cover letter template exist in the
directory you specified - you're nearly ready to send out your review!
-[[cover-letter]]
+[[preparing-cover-letter]]
=== Preparing Email
-In addition to an email per patch, the Git community also expects your patches
-to come with a cover letter, typically with a subject line [PATCH 0/x] (where
-x is the number of patches you're sending). Since you invoked `format-patch`
-with `--cover-letter`, you've already got a template ready. Open it up in your
-favorite editor.
+Since you invoked `format-patch` with `--cover-letter`, you've already got a
+cover letter template ready. Open it up in your favorite editor.
You should see a number of headers present already. Check that your `From:`
-header is correct. Then modify your `Subject:` to something which succinctly
-covers the purpose of your entire topic branch, for example:
+header is correct. Then modify your `Subject:` (see <<cover-letter,above>> for
+how to choose good title for your patch series):
----
-Subject: [PATCH 0/7] adding the 'psuh' command
+Subject: [PATCH 0/7] Add the 'psuh' command
----
Make sure you retain the ``[PATCH 0/X]'' part; that's what indicates to the Git
-community that this email is the beginning of a review, and many reviewers
-filter their email for this type of flag.
+community that this email is the beginning of a patch series, and many
+reviewers filter their email for this type of flag.
You'll need to add some extra parameters when you invoke `git send-email` to add
the cover letter.
-Next you'll have to fill out the body of your cover letter. This is an important
-component of change submission as it explains to the community from a high level
-what you're trying to do, and why, in a way that's more apparent than just
-looking at your diff. Be sure to explain anything your diff doesn't make clear
-on its own.
-
-Here's an example body for `psuh`:
-
-----
-Our internal metrics indicate widespread interest in the command
-git-psuh - that is, many users are trying to use it, but finding it is
-unavailable, using some unknown workaround instead.
-
-The following handful of patches add the psuh command and implement some
-handy features on top of it.
-
-This patchset is part of the MyFirstContribution tutorial and should not
-be merged.
-----
+Next you'll have to fill out the body of your cover letter. Again, see
+<<cover-letter,above>> for what content to include.
The template created by `git format-patch --cover-letter` includes a diffstat.
This gives reviewers a summary of what they're in for when reviewing your topic.
diff --git a/Documentation/RelNotes/2.36.1.txt b/Documentation/RelNotes/2.36.1.txt
new file mode 100644
index 0000000..a961709
--- /dev/null
+++ b/Documentation/RelNotes/2.36.1.txt
@@ -0,0 +1,33 @@
+Git v2.36.1 Release Notes
+=========================
+
+Fixes since v2.36
+-----------------
+
+ * "git submodule update" without pathspec should silently skip an
+ uninitialized submodule, but it started to become noisy by mistake.
+
+ * "diff-tree --stdin" has been broken for about a year, but 2.36
+ release broke it even worse by breaking running the command with
+ <pathspec>, which in turn broke "gitk" and got noticed. This has
+ been corrected by aligning its behaviour to that of "log".
+
+ * Regression fix for 2.36 where "git name-rev" started to sometimes
+ reference strings after they are freed.
+
+ * "git show <commit1> <commit2>... -- <pathspec>" lost the pathspec
+ when showing the second and subsequent commits, which has been
+ corrected.
+
+ * "git fast-export -- <pathspec>" lost the pathspec when showing the
+ second and subsequent commits, which has been corrected.
+
+ * "git format-patch <args> -- <pathspec>" lost the pathspec when
+ showing the second and subsequent commits, which has been
+ corrected.
+
+ * Get rid of a bogus and over-eager coccinelle rule.
+
+ * Correct choices of C compilers used in various CI jobs.
+
+Also contains minor documentation updates and code clean-ups.
diff --git a/Documentation/RelNotes/2.36.2.txt b/Documentation/RelNotes/2.36.2.txt
new file mode 100644
index 0000000..ba5d5ac
--- /dev/null
+++ b/Documentation/RelNotes/2.36.2.txt
@@ -0,0 +1,50 @@
+Git v2.36.2 Release Notes
+=========================
+
+This maintenance release is primarily to merge down updates to the
+build and CI procedures from the 'master' front, in order to ensure
+that we can cut healthy maintenance releases in the future. It also
+contains a handful of small and trivially-correct bugfixes.
+
+Fixes since v2.36.1
+-------------------
+
+ * Fixes real problems noticed by gcc 12 and works around false
+ positives.
+
+ * Update URL to the gitk repository.
+
+ * The "--current" option of "git show-branch" should have been made
+ incompatible with the "--reflog" mode, but this was not enforced,
+ which has been corrected.
+
+ * "git archive --add-file=<path>" picked up the raw permission bits
+ from the path and propagated to zip output in some cases, without
+ normalization, which has been corrected (tar output did not have
+ this issue).
+
+ * A bit of test framework fixes with a few fixes to issues found by
+ valgrind.
+
+ * macOS CI jobs have been occasionally flaky due to tentative version
+ skew between perforce and the homebrew packager. Instead of
+ failing the whole CI job, just let it skip the p4 tests when this
+ happens.
+
+ * The commit summary shown after making a commit is matched to what
+ is given in "git status" not to use the break-rewrite heuristics.
+
+ * Avoid problems from interaction between malloc_check and address
+ sanitizer.
+
+ * "git rebase --keep-base <upstream> <branch-to-rebase>" computed the
+ commit to rebase onto incorrectly, which has been corrected.
+
+ * The path taken by "git multi-pack-index" command from the end user
+ was compared with path internally prepared by the tool withut first
+ normalizing, which lead to duplicated paths not being noticed,
+ which has been corrected.
+
+ * "git clone --origin X" leaked piece of memory that held value read
+ from the clone.defaultRemoteName configuration variable, which has
+ been plugged.
diff --git a/Documentation/RelNotes/2.37.0.txt b/Documentation/RelNotes/2.37.0.txt
new file mode 100644
index 0000000..99dc7e3
--- /dev/null
+++ b/Documentation/RelNotes/2.37.0.txt
@@ -0,0 +1,337 @@
+Git v2.37 Release Notes
+=======================
+
+UI, Workflows & Features
+
+ * "vimdiff[123]" mergetool drivers have been reimplemented with a
+ more generic layout mechanism.
+
+ * "git -v" and "git -h" are now understood as "git --version" and
+ "git --help".
+
+ * The temporary files fed to external diff command are now generated
+ inside a new temporary directory under the same basename.
+
+ * "git log --since=X" will stop traversal upon seeing a commit that
+ is older than X, but there may be commits behind it that is younger
+ than X when the commit was created with a faulty clock. A new
+ option is added to keep digging without stopping, and instead
+ filter out commits with timestamp older than X.
+
+ * "git -c branch.autosetupmerge=simple branch $A $B" will set the $B
+ as $A's upstream only when $A and $B shares the same name, and "git
+ -c push.default=simple" on branch $A would push to update the
+ branch $A at the remote $B came from. Also more places use the
+ sole remote, if exists, before defaulting to 'origin'.
+
+ * A new doc has been added that lists tips for tools to work with
+ Git's codebase.
+
+ * "git remote -v" now shows the list-objects-filter used during
+ fetching from the remote, if available.
+
+ * With the new http.curloptResolve configuration, the CURLOPT_RESOLVE
+ mechanism that allows cURL based applications to use pre-resolved
+ IP addresses for the requests is exposed to the scripts.
+
+ * "git add -i" was rewritten in C some time ago and has been in
+ testing; the reimplementation is now exposed to general public by
+ default.
+
+ * Deprecate non-cone mode of the sparse-checkout feature.
+
+ * Introduce a filesystem-dependent mechanism to optimize the way the
+ bits for many loose object files are ensured to hit the disk
+ platter.
+
+ * The "do not remove the directory the user started Git in" logic,
+ when Git cannot tell where that directory is, is disabled. Earlier
+ we refused to run in such a case.
+
+ * A mechanism to pack unreachable objects into a "cruft pack",
+ instead of ejecting them into loose form to be reclaimed later, has
+ been introduced.
+
+ * Update the doctype written in gitweb output to xhtml5.
+
+ * The "transfer.credentialsInURL" configuration variable controls what
+ happens when a URL with embedded login credential is used on either
+ "fetch" or "push". Credentials are currently only detected in
+ `remote.<name>.url` config, not `remote.<name>.pushurl`.
+
+ * "git revert" learns "--reference" option to use more human-readable
+ reference to the commit it reverts in the message template it
+ prepares for the user.
+
+ * Various error messages that talk about the removal of
+ "--preserve-merges" in "rebase" have been strengthened, and "rebase
+ --abort" learned to get out of a state that was left by an earlier
+ use of the option.
+
+
+Performance, Internal Implementation, Development Support etc.
+
+ * The performance of the "untracked cache" feature has been improved
+ when "--untracked-files=<mode>" and "status.showUntrackedFiles"
+ are combined.
+
+ * "git stash" works better with sparse index entries.
+
+ * "git show :<path>" learned to work better with the sparse-index
+ feature.
+
+ * Introduce and apply coccinelle rule to discourage an explicit
+ comparison between a pointer and NULL, and applies the clean-up to
+ the maintenance track.
+
+ * Preliminary code refactoring around transport and bundle code.
+
+ * "sparse-checkout" learns to work better with the sparse-index
+ feature.
+
+ * A workflow change for translators are being proposed. git.pot is
+ no longer version controlled and it is local responsibility of
+ translators to generate it.
+
+ * Plug the memory leaks from the trickiest API of all, the revision
+ walker.
+
+ * Rename .env_array member to .env in the child_process structure.
+
+ * The fsmonitor--daemon handles even more corner cases when
+ watching filesystem events.
+
+ * A new bug() and BUG_if_bug() API is introduced to make it easier to
+ uniformly log "detect multiple bugs and abort in the end" pattern.
+
+
+Fixes since v2.36
+-----------------
+
+ * "git submodule update" without pathspec should silently skip an
+ uninitialized submodule, but it started to become noisy by mistake.
+ (merge 4f1ccef87c gc/submodule-update-part2 later to maint).
+
+ * "diff-tree --stdin" has been broken for about a year, but 2.36
+ release broke it even worse by breaking running the command with
+ <pathspec>, which in turn broke "gitk" and got noticed. This has
+ been corrected by aligning its behaviour to that of "log".
+ (merge f8781bfda3 jc/diff-tree-stdin-fix later to maint).
+
+ * Regression fix for 2.36 where "git name-rev" started to sometimes
+ reference strings after they are freed.
+ (merge 45a14f578e rs/name-rev-fix-free-after-use later to maint).
+
+ * "git show <commit1> <commit2>... -- <pathspec>" lost the pathspec
+ when showing the second and subsequent commits, which has been
+ corrected.
+ (merge 5cdb38458e jc/show-pathspec-fix later to maint).
+
+ * "git fast-export -- <pathspec>" lost the pathspec when showing the
+ second and subsequent commits, which has been corrected.
+ (merge d1c25272f5 rs/fast-export-pathspec-fix later to maint).
+
+ * "git format-patch <args> -- <pathspec>" lost the pathspec when
+ showing the second and subsequent commits, which has been
+ corrected.
+ (merge 91f8f7e46f rs/format-patch-pathspec-fix later to maint).
+
+ * "git clone --origin X" leaked piece of memory that held value read
+ from the clone.defaultRemoteName configuration variable, which has
+ been plugged.
+ (merge 6dfadc8981 jc/clone-remote-name-leak-fix later to maint).
+
+ * Get rid of a bogus and over-eager coccinelle rule.
+ (merge 08bdd3a185 jc/cocci-xstrdup-or-null-fix later to maint).
+
+ * The path taken by "git multi-pack-index" command from the end user
+ was compared with path internally prepared by the tool without first
+ normalizing, which lead to duplicated paths not being noticed,
+ which has been corrected.
+ (merge 11f9e8de3d ds/midx-normalize-pathname-before-comparison later to maint).
+
+ * Correct choices of C compilers used in various CI jobs.
+ (merge 3506cae04f ab/cc-package-fixes later to maint).
+
+ * Various cleanups to "git p4".
+ (merge 4ff0108d9e jh/p4-various-fixups later to maint).
+
+ * The progress meter of "git blame" was showing incorrect numbers
+ when processing only parts of the file.
+ (merge e5f5d7d42e ea/progress-partial-blame later to maint).
+
+ * "git rebase --keep-base <upstream> <branch-to-rebase>" computed the
+ commit to rebase onto incorrectly, which has been corrected.
+ (merge 9e5ebe9668 ah/rebase-keep-base-fix later to maint).
+
+ * Fix a leak of FILE * in an error codepath.
+ (merge c0befa0c03 kt/commit-graph-plug-fp-leak-on-error later to maint).
+
+ * Avoid problems from interaction between malloc_check and address
+ sanitizer.
+ (merge 067109a5e7 pw/test-malloc-with-sanitize-address later to maint).
+
+ * The commit summary shown after making a commit is matched to what
+ is given in "git status" not to use the break-rewrite heuristics.
+ (merge 84792322ed rs/commit-summary-wo-break-rewrite later to maint).
+
+ * Update a few end-user facing messages around EOL conversion.
+ (merge c970d30c2c ah/convert-warning-message later to maint).
+
+ * Trace2 documentation updates.
+ (merge a6c80c313c js/trace2-doc-fixes later to maint).
+
+ * Build procedure fixup.
+ (merge 1fbfd96f50 mg/detect-compiler-in-c-locale later to maint).
+
+ * "git pull" without "--recurse-submodules=<arg>" made
+ submodule.recurse take precedence over fetch.recurseSubmodules by
+ mistake, which has been corrected.
+ (merge 5819417365 gc/pull-recurse-submodules later to maint).
+
+ * "git bisect" was too silent before it is ready to start computing
+ the actual bisection, which has been corrected.
+ (merge f11046e6de cd/bisect-messages-from-pre-flight-states later to maint).
+
+ * macOS CI jobs have been occasionally flaky due to tentative version
+ skew between perforce and the homebrew packager. Instead of
+ failing the whole CI job, just let it skip the p4 tests when this
+ happens.
+ (merge f15e00b463 cb/ci-make-p4-optional later to maint).
+
+ * A bit of test framework fixes with a few fixes to issues found by
+ valgrind.
+ (merge 7c898554d7 ab/valgrind-fixes later to maint).
+
+ * "git archive --add-file=<path>" picked up the raw permission bits
+ from the path and propagated to zip output in some cases, without
+ normalization, which has been corrected (tar output did not have
+ this issue).
+ (merge 6a61661967 jc/archive-add-file-normalize-mode later to maint).
+
+ * "make coverage-report" without first running "make coverage" did
+ not produce any meaningful result, which has been corrected.
+ (merge 96ddfecc5b ep/coverage-report-wants-test-to-have-run later to maint).
+
+ * The "--current" option of "git show-branch" should have been made
+ incompatible with the "--reflog" mode, but this was not enforced,
+ which has been corrected.
+ (merge 41c64ae0e7 jc/show-branch-g-current later to maint).
+
+ * "git fetch" unnecessarily failed when an unexpected optional
+ section appeared in the output, which has been corrected.
+ (merge 7709acf7be jt/fetch-peek-optional-section later to maint).
+
+ * The way "git fetch" without "--update-head-ok" ensures that HEAD in
+ no worktree points at any ref being updated was too wasteful, which
+ has been optimized a bit.
+ (merge f7400da800 os/fetch-check-not-current-branch later to maint).
+
+ * "git fetch --recurse-submodules" from multiple remotes (either from
+ a remote group, or "--all") used to make one extra "git fetch" in
+ the submodules, which has been corrected.
+ (merge 0353c68818 jc/avoid-redundant-submodule-fetch later to maint).
+
+ * With a recent update to refuse access to repositories of other
+ people by default, "sudo make install" and "sudo git describe"
+ stopped working, which has been corrected.
+ (merge 6b11e3d52e cb/path-owner-check-with-sudo-plus later to maint).
+
+ * The tests that ensured merges stop when interfering local changes
+ are present did not make sure that local changes are preserved; now
+ they do.
+ (merge 4b317450ce jc/t6424-failing-merge-preserve-local-changes later to maint).
+
+ * Some real problems noticed by gcc 12 have been fixed, while false
+ positives have been worked around.
+
+ * Update the version of FreeBSD image used in Cirrus CI.
+ (merge c58bebd4c6 pb/use-freebsd-12.3-in-cirrus-ci later to maint).
+
+ * The multi-pack-index code did not protect the packfile it is going
+ to depend on from getting removed while in use, which has been
+ corrected.
+ (merge 4090511e40 tb/midx-race-in-pack-objects later to maint).
+
+ * Teach "git repack --geometric" work better with "--keep-pack" and
+ avoid corrupting the repository when packsize limit is used.
+ (merge 66731ff921 tb/geom-repack-with-keep-and-max later to maint).
+
+ * The documentation on the interaction between "--add-file" and
+ "--prefix" options of "git archive" has been improved.
+ (merge a75910602a rs/document-archive-prefix later to maint).
+
+ * A git subcommand like "git add -p" spawns a separate git process
+ while relaying its command line arguments. A pathspec with only
+ negative elements was mistakenly passed with an empty string, which
+ has been corrected.
+ (merge b02fdbc80a jc/all-negative-pathspec later to maint).
+
+ * With a more targeted workaround in http.c in another topic, we may
+ be able to lift this blanket "GCC12 dangling-pointer warning is
+ broken and unsalvageable" workaround.
+ (merge 419141e495 cb/buggy-gcc-12-workaround later to maint).
+
+ * A misconfigured 'branch..remote' led to a bug in configuration
+ parsing.
+ (merge f1dfbd9ee0 gc/zero-length-branch-config-fix later to maint).
+
+ * "git -c diff.submodule=log range-diff" did not show anything for
+ submodules that changed in the ranges being compared, and
+ "git -c diff.submodule=diff range-diff" did not work correctly.
+ Fix this by including the "--submodule=short" output
+ unconditionally to be compared.
+
+ * In Git 2.36 we revamped the way how hooks are invoked. One change
+ that is end-user visible is that the output of a hook is no longer
+ directly connected to the standard output of "git" that spawns the
+ hook, which was noticed post release. This is getting corrected.
+ (merge a082345372 ab/hooks-regression-fix later to maint).
+
+ * Updating the graft information invalidates the list of parents of
+ in-core commit objects that used to be in the graft file.
+
+ * "git show-ref --heads" (and "--tags") still iterated over all the
+ refs only to discard refs outside the specified area, which has
+ been corrected.
+ (merge c0c9d35e27 tb/show-ref-optim later to maint).
+
+ * Remove redundant copying (with index v3 and older) or possible
+ over-reading beyond end of mmapped memory (with index v4) has been
+ corrected.
+ (merge 6d858341d2 zh/read-cache-copy-name-entry-fix later to maint).
+
+ * Sample watchman interface hook sometimes failed to produce
+ correctly formatted JSON message, which has been corrected.
+ (merge 134047b500 sn/fsmonitor-missing-clock later to maint).
+
+ * Use-after-free (with another forget-to-free) fix.
+ (merge 323822c72b ab/remote-free-fix later to maint).
+
+ * Remove a coccinelle rule that is no longer relevant.
+ (merge b1299de4a1 jc/cocci-cleanup later to maint).
+
+ * Other code cleanup, docfix, build fix, etc.
+ (merge e6b2582da3 cm/reftable-0-length-memset later to maint).
+ (merge 0b75e5bf22 ab/misc-cleanup later to maint).
+ (merge 52e1ab8a76 ea/rebase-code-simplify later to maint).
+ (merge 756d15923b sg/safe-directory-tests-and-docs later to maint).
+ (merge d097a23bfa ds/do-not-call-bug-on-bad-refs later to maint).
+ (merge c36c27e75c rs/t7812-pcre2-ws-bug-test later to maint).
+ (merge 1da312742d gf/unused-includes later to maint).
+ (merge 465b30a92d pb/submodule-recurse-mode-enum later to maint).
+ (merge 82b28c4ed8 km/t3501-use-test-helpers later to maint).
+ (merge 72315e431b sa/t1011-use-helpers later to maint).
+ (merge 95b3002201 cg/vscode-with-gdb later to maint).
+ (merge fbe5f6b804 tk/p4-utf8-bom later to maint).
+ (merge 17f273ffba tk/p4-with-explicity-sync later to maint).
+ (merge 944db25c60 kf/p4-multiple-remotes later to maint).
+ (merge b014cee8de jc/update-ozlabs-url later to maint).
+ (merge 4ec5008062 pb/ggg-in-mfc-doc later to maint).
+ (merge af845a604d tb/receive-pack-code-cleanup later to maint).
+ (merge 2acf4cf001 js/ci-gcc-12-fixes later to maint).
+ (merge 05e280c0a6 jc/http-clear-finished-pointer later to maint).
+ (merge 8c49d704ef fh/transport-push-leakfix later to maint).
+ (merge 1d232d38bd tl/ls-tree-oid-only later to maint).
+ (merge db7961e6a6 gc/document-config-worktree-scope later to maint).
+ (merge ce18a30bb7 fs/ssh-default-key-command-doc later to maint).
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index a6121d1..5bd795e 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -452,7 +452,10 @@ repositories.
- `gitk-git/` comes from Paul Mackerras's gitk project:
- git://ozlabs.org/~paulus/gitk
+ git://git.ozlabs.org/~paulus/gitk
+
+ Those who are interested in improve gitk can volunteer to help Paul
+ in maintaining it cf. <YntxL/fTplFm8lr6@cleo>.
- `po/` comes from the localization coordinator, Jiang Xin:
diff --git a/Documentation/ToolsForGit.txt b/Documentation/ToolsForGit.txt
new file mode 100644
index 0000000..5060d0d
--- /dev/null
+++ b/Documentation/ToolsForGit.txt
@@ -0,0 +1,51 @@
+Tools for developing Git
+========================
+:sectanchors:
+
+[[summary]]
+== Summary
+
+This document gathers tips, scripts and configuration file to help people
+working on Git's codebase use their favorite tools while following Git's
+coding style.
+
+[[author]]
+=== Author
+
+The Git community.
+
+[[table_of_contents]]
+== Table of contents
+
+- <<vscode>>
+- <<emacs>>
+
+[[vscode]]
+=== Visual Studio Code (VS Code)
+
+The contrib/vscode/init.sh script creates configuration files that enable
+several valuable VS Code features. See contrib/vscode/README.md for more
+information on using the script.
+
+[[emacs]]
+=== Emacs
+
+This is adapted from Linux's suggestion in its CodingStyle document:
+
+- To follow rules of the CodingGuideline, it's useful to put the following in
+GIT_CHECKOUT/.dir-locals.el, assuming you use cperl-mode:
+----
+;; note the first part is useful for C editing, too
+((nil . ((indent-tabs-mode . t)
+ (tab-width . 8)
+ (fill-column . 80)))
+ (cperl-mode . ((cperl-indent-level . 8)
+ (cperl-extra-newline-before-brace . nil)
+ (cperl-merge-trailing-else . t))))
+----
+
+For a more complete setup, since Git's codebase uses a coding style
+similar to the Linux kernel's style, tips given in Linux's CodingStyle
+document can be applied here too.
+
+==== https://www.kernel.org/doc/html/v4.10/process/coding-style.html#you-ve-made-a-mess-of-it
diff --git a/Documentation/config/add.txt b/Documentation/config/add.txt
index c9f748f..3e859f3 100644
--- a/Documentation/config/add.txt
+++ b/Documentation/config/add.txt
@@ -7,6 +7,6 @@ add.ignore-errors (deprecated)::
variables.
add.interactive.useBuiltin::
- [EXPERIMENTAL] Set to `true` to use the experimental built-in
- implementation of the interactive version of linkgit:git-add[1]
- instead of the Perl script version. Is `false` by default.
+ Set to `false` to fall back to the original Perl implementation of
+ the interactive version of linkgit:git-add[1] instead of the built-in
+ version. Is `true` by default.
diff --git a/Documentation/config/branch.txt b/Documentation/config/branch.txt
index 1e0c7af..445341a 100644
--- a/Documentation/config/branch.txt
+++ b/Documentation/config/branch.txt
@@ -9,7 +9,9 @@ branch.autoSetupMerge::
automatic setup is done when the starting point is either a
local branch or remote-tracking branch; `inherit` -- if the starting point
has a tracking configuration, it is copied to the new
- branch. This option defaults to true.
+ branch; `simple` -- automatic setup is done only when the starting point
+ is a remote-tracking branch and the new branch has the same name as the
+ remote branch. This option defaults to true.
branch.autoSetupRebase::
When a new branch is created with 'git branch', 'git switch' or 'git checkout'
@@ -38,8 +40,9 @@ branch.<name>.remote::
may be overridden with `remote.pushDefault` (for all branches).
The remote to push to, for the current branch, may be further
overridden by `branch.<name>.pushRemote`. If no remote is
- configured, or if you are not on any branch, it defaults to
- `origin` for fetching and `remote.pushDefault` for pushing.
+ configured, or if you are not on any branch and there is more than
+ one remote defined in the repository, it defaults to `origin` for
+ fetching and `remote.pushDefault` for pushing.
Additionally, `.` (a period) is the current local repository
(a dot-repository), see `branch.<name>.merge`'s final note below.
diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt
index e67392c..41e330f 100644
--- a/Documentation/config/core.txt
+++ b/Documentation/config/core.txt
@@ -628,6 +628,14 @@ core.fsyncMethod::
* `writeout-only` issues pagecache writeback requests, but depending on the
filesystem and storage hardware, data added to the repository may not be
durable in the event of a system crash. This is the default mode on macOS.
+* `batch` enables a mode that uses writeout-only flushes to stage multiple
+ updates in the disk writeback cache and then does a single full fsync of
+ a dummy file to trigger the disk cache flush at the end of the operation.
++
+Currently `batch` mode only applies to loose-object files. Other repository
+data is made durable as if `fsync` was specified. This mode is expected to
+be as safe as `fsync` on macOS for repos stored on HFS+ or APFS filesystems
+and on Windows for repos stored on NTFS or ReFS filesystems.
core.fsyncObjectFiles::
This boolean will enable 'fsync()' when writing object files.
@@ -698,8 +706,10 @@ core.sparseCheckout::
core.sparseCheckoutCone::
Enables the "cone mode" of the sparse checkout feature. When the
- sparse-checkout file contains a limited set of patterns, then this
- mode provides significant performance advantages. See
+ sparse-checkout file contains a limited set of patterns, this
+ mode provides significant performance advantages. The "non
+ cone mode" can be requested to allow specifying a more flexible
+ patterns by setting this variable to 'false'. See
linkgit:git-sparse-checkout[1] for more information.
core.abbrev::
diff --git a/Documentation/config/gc.txt b/Documentation/config/gc.txt
index c834e07..38fea07 100644
--- a/Documentation/config/gc.txt
+++ b/Documentation/config/gc.txt
@@ -81,14 +81,21 @@ gc.packRefs::
to enable it within all non-bare repos or it can be set to a
boolean value. The default is `true`.
+gc.cruftPacks::
+ Store unreachable objects in a cruft pack (see
+ linkgit:git-repack[1]) instead of as loose objects. The default
+ is `false`.
+
gc.pruneExpire::
- When 'git gc' is run, it will call 'prune --expire 2.weeks.ago'.
- Override the grace period with this config variable. The value
- "now" may be used to disable this grace period and always prune
- unreachable objects immediately, or "never" may be used to
- suppress pruning. This feature helps prevent corruption when
- 'git gc' runs concurrently with another process writing to the
- repository; see the "NOTES" section of linkgit:git-gc[1].
+ When 'git gc' is run, it will call 'prune --expire 2.weeks.ago'
+ (and 'repack --cruft --cruft-expiration 2.weeks.ago' if using
+ cruft packs via `gc.cruftPacks` or `--cruft`). Override the
+ grace period with this config variable. The value "now" may be
+ used to disable this grace period and always prune unreachable
+ objects immediately, or "never" may be used to suppress pruning.
+ This feature helps prevent corruption when 'git gc' runs
+ concurrently with another process writing to the repository; see
+ the "NOTES" section of linkgit:git-gc[1].
gc.worktreePruneExpire::
When 'git gc' is run, it calls
diff --git a/Documentation/config/gpg.txt b/Documentation/config/gpg.txt
index 86892ad..86f6308 100644
--- a/Documentation/config/gpg.txt
+++ b/Documentation/config/gpg.txt
@@ -36,9 +36,12 @@ gpg.minTrustLevel::
gpg.ssh.defaultKeyCommand::
This command that will be run when user.signingkey is not set and a ssh
- signature is requested. On successful exit a valid ssh public key is
- expected in the first line of its output. To automatically use the first
- available key from your ssh-agent set this to "ssh-add -L".
+ signature is requested. On successful exit a valid ssh public key
+ prefixed with `key::` is expected in the first line of its output.
+ This allows for a script doing a dynamic lookup of the correct public
+ key when it is impractical to statically configure `user.signingKey`.
+ For example when keys or SSH Certificates are rotated frequently or
+ selection of the right key depends on external factors unknown to git.
gpg.ssh.allowedSignersFile::
A file containing ssh public keys which you are willing to trust.
diff --git a/Documentation/config/http.txt b/Documentation/config/http.txt
index 7003661..afeeccf 100644
--- a/Documentation/config/http.txt
+++ b/Documentation/config/http.txt
@@ -98,6 +98,22 @@ http.version::
- HTTP/2
- HTTP/1.1
+http.curloptResolve::
+ Hostname resolution information that will be used first by
+ libcurl when sending HTTP requests. This information should
+ be in one of the following formats:
+
+ - [+]HOST:PORT:ADDRESS[,ADDRESS]
+ - -HOST:PORT
+
++
+The first format redirects all requests to the given `HOST:PORT`
+to the provided `ADDRESS`(s). The second format clears all
+previous config values for that `HOST:PORT` combination. To
+allow easy overriding of all the settings inherited from the
+system config, an empty value will reset all resolution
+information to the empty list.
+
http.sslVersion::
The SSL version to use when negotiating an SSL connection, if you
want to force the default. The available and default version
@@ -187,7 +203,7 @@ http.schannelUseSSLCAInfo::
when the `schannel` backend was configured via `http.sslBackend`,
unless `http.schannelUseSSLCAInfo` overrides this behavior.
-http.pinnedpubkey::
+http.pinnedPubkey::
Public key of the https service. It may either be the filename of
a PEM or DER encoded public key file or a string starting with
'sha256//' followed by the base64 encoded sha256 hash of the
diff --git a/Documentation/config/mergetool.txt b/Documentation/config/mergetool.txt
index cafbbef..90b3809 100644
--- a/Documentation/config/mergetool.txt
+++ b/Documentation/config/mergetool.txt
@@ -45,6 +45,15 @@ mergetool.meld.useAutoMerge::
value of `false` avoids using `--auto-merge` altogether, and is the
default value.
+mergetool.vimdiff.layout::
+ The vimdiff backend uses this variable to control how its split
+ windows look like. Applies even if you are using Neovim (`nvim`) or
+ gVim (`gvim`) as the merge tool. See BACKEND SPECIFIC HINTS section
+ifndef::git-mergetool[]
+ in linkgit:git-mergetool[1].
+endif::[]
+ for details.
+
mergetool.hideResolved::
During a merge Git will automatically resolve as many conflicts as
possible and write the 'MERGED' file containing conflict markers around
diff --git a/Documentation/config/push.txt b/Documentation/config/push.txt
index 6320336..e32801e 100644
--- a/Documentation/config/push.txt
+++ b/Documentation/config/push.txt
@@ -1,3 +1,14 @@
+push.autoSetupRemote::
+ If set to "true" assume `--set-upstream` on default push when no
+ upstream tracking exists for the current branch; this option
+ takes effect with push.default options 'simple', 'upstream',
+ and 'current'. It is useful if by default you want new branches
+ to be pushed to the default remote (like the behavior of
+ 'push.default=current') and you also want the upstream tracking
+ to be set. Workflows most likely to benefit from this option are
+ 'simple' central workflows where all branches are expected to
+ have the same name on the remote.
+
push.default::
Defines the action `git push` should take if no refspec is
given (whether from the command-line, config, or elsewhere).
diff --git a/Documentation/config/repack.txt b/Documentation/config/repack.txt
index 41ac695..c79af6d 100644
--- a/Documentation/config/repack.txt
+++ b/Documentation/config/repack.txt
@@ -30,3 +30,12 @@ repack.updateServerInfo::
If set to false, linkgit:git-repack[1] will not run
linkgit:git-update-server-info[1]. Defaults to true. Can be overridden
when true by the `-n` option of linkgit:git-repack[1].
+
+repack.cruftWindow::
+repack.cruftWindowMemory::
+repack.cruftDepth::
+repack.cruftThreads::
+ Parameters used by linkgit:git-pack-objects[1] when generating
+ a cruft pack and the respective parameters are not given over
+ the command line. See similarly named `pack.*` configuration
+ variables for defaults and meaning.
diff --git a/Documentation/config/safe.txt b/Documentation/config/safe.txt
index 6d764fe..fa02f3c 100644
--- a/Documentation/config/safe.txt
+++ b/Documentation/config/safe.txt
@@ -13,8 +13,8 @@ override any such directories specified in the system config), add a
`safe.directory` entry with an empty value.
+
This config setting is only respected when specified in a system or global
-config, not when it is specified in a repository config or via the command
-line option `-c safe.directory=<path>`.
+config, not when it is specified in a repository config, via the command
+line option `-c safe.directory=<path>`, or in environment variables.
+
The value of this setting is interpolated, i.e. `~/<path>` expands to a
path relative to the home directory and `%(prefix)/<path>` expands to a
@@ -26,3 +26,17 @@ directory was listed in the `safe.directory` list. If `safe.directory=*`
is set in system config and you want to re-enable this protection, then
initialize your list with an empty value before listing the repositories
that you deem safe.
++
+As explained, Git only allows you to access repositories owned by
+yourself, i.e. the user who is running Git, by default. When Git
+is running as 'root' in a non Windows platform that provides sudo,
+however, git checks the SUDO_UID environment variable that sudo creates
+and will allow access to the uid recorded as its value in addition to
+the id from 'root'.
+This is to make it easy to perform a common sequence during installation
+"make && sudo make install". A git process running under 'sudo' runs as
+'root' but the 'sudo' command exports the environment variable to record
+which id the original user has.
+If that is not what you would prefer and want git to only trust
+repositories that are owned by root instead, then you can remove
+the `SUDO_UID` variable from root's environment before invoking git.
diff --git a/Documentation/config/transfer.txt b/Documentation/config/transfer.txt
index b49429e..7ed917f 100644
--- a/Documentation/config/transfer.txt
+++ b/Documentation/config/transfer.txt
@@ -1,3 +1,41 @@
+transfer.credentialsInUrl::
+ A configured URL can contain plaintext credentials in the form
+ `<protocol>://<user>:<password>@<domain>/<path>`. You may want
+ to warn or forbid the use of such configuration (in favor of
+ using linkgit:git-credential[1]). This will be used on
+ linkgit:git-clone[1], linkgit:git-fetch[1], linkgit:git-push[1],
+ and any other direct use of the configured URL.
++
+Note that this is currently limited to detecting credentials in
+`remote.<name>.url` configuration, it won't detect credentials in
+`remote.<name>.pushurl` configuration.
++
+You might want to enable this to prevent inadvertent credentials
+exposure, e.g. because:
++
+* The OS or system where you're running git may not provide way way or
+ otherwise allow you to configure the permissions of the
+ configuration file where the username and/or password are stored.
+* Even if it does, having such data stored "at rest" might expose you
+ in other ways, e.g. a backup process might copy the data to another
+ system.
+* The git programs will pass the full URL to one another as arguments
+ on the command-line, meaning the credentials will be exposed to other
+ users on OS's or systems that allow other users to see the full
+ process list of other users. On linux the "hidepid" setting
+ documented in procfs(5) allows for configuring this behavior.
++
+If such concerns don't apply to you then you probably don't need to be
+concerned about credentials exposure due to storing that sensitive
+data in git's configuration files. If you do want to use this, set
+`transfer.credentialsInUrl` to one of these values:
++
+* `allow` (default): Git will proceed with its activity without warning.
+* `warn`: Git will write a warning message to `stderr` when parsing a URL
+ with a plaintext credential.
+* `die`: Git will write a failure message to `stderr` when parsing a URL
+ with a plaintext credential.
+
transfer.fsckObjects::
When `fetch.fsckObjects` or `receive.fsckObjects` are
not set, the value of this variable is used instead.
diff --git a/Documentation/git-archive.txt b/Documentation/git-archive.txt
index bc4e76a..56989a2 100644
--- a/Documentation/git-archive.txt
+++ b/Documentation/git-archive.txt
@@ -49,7 +49,9 @@ OPTIONS
Report progress to stderr.
--prefix=<prefix>/::
- Prepend <prefix>/ to each filename in the archive.
+ Prepend <prefix>/ to paths in the archive. Can be repeated; its
+ rightmost value is used for all tracked files. See below which
+ value gets used by `--add-file` and `--add-virtual-file`.
-o <file>::
--output=<file>::
@@ -57,9 +59,26 @@ OPTIONS
--add-file=<file>::
Add a non-tracked file to the archive. Can be repeated to add
+ multiple files. The path of the file in the archive is built by
+ concatenating the value of the last `--prefix` option (if any)
+ before this `--add-file` and the basename of <file>.
+
+--add-virtual-file=<path>:<content>::
+ Add the specified contents to the archive. Can be repeated to add
multiple files. The path of the file in the archive is built
- by concatenating the value for `--prefix` (if any) and the
- basename of <file>.
+ by concatenating the value of the last `--prefix` option (if any)
+ before this `--add-virtual-file` and `<path>`.
++
+The `<path>` argument can start and end with a literal double-quote
+character; the contained file name is interpreted as a C-style string,
+i.e. the backslash is interpreted as escape character. The path must
+be quoted if it contains a colon, to avoid the colon from being
+misinterpreted as the separator between the path and the contents, or
+if the path begins or ends with a double-quote character.
++
+The file mode is limited to a regular file, and the option may be
+subject to platform-dependent command-line limits. For non-trivial
+cases, write an untracked file and use `--add-file` instead.
--worktree-attributes::
Look for attributes in .gitattributes files in the working tree
@@ -194,6 +213,12 @@ EXAMPLES
commit on the current branch. Note that the output format is
inferred by the extension of the output file.
+`git archive -o latest.tar --prefix=build/ --add-file=configure --prefix= HEAD`::
+
+ Creates a tar archive that contains the contents of the latest
+ commit on the current branch with no prefix and the untracked
+ file 'configure' with the prefix 'build/'.
+
`git config tar.tar.xz.command "xz -c"`::
Configure a "tar.xz" format for making LZMA-compressed tarfiles.
diff --git a/Documentation/git-branch.txt b/Documentation/git-branch.txt
index c8b4f9c..ae82378 100644
--- a/Documentation/git-branch.txt
+++ b/Documentation/git-branch.txt
@@ -221,13 +221,17 @@ The exact upstream branch is chosen depending on the optional argument:
itself as the upstream; `--track=inherit` means to copy the upstream
configuration of the start-point branch.
+
-`--track=direct` is the default when the start point is a remote-tracking branch.
-Set the branch.autoSetupMerge configuration variable to `false` if you
-want `git switch`, `git checkout` and `git branch` to always behave as if `--no-track`
-were given. Set it to `always` if you want this behavior when the
-start-point is either a local or remote-tracking branch. Set it to
-`inherit` if you want to copy the tracking configuration from the
-branch point.
+The branch.autoSetupMerge configuration variable specifies how `git switch`,
+`git checkout` and `git branch` should behave when neither `--track` nor
+`--no-track` are specified:
++
+The default option, `true`, behaves as though `--track=direct`
+were given whenever the start-point is a remote-tracking branch.
+`false` behaves as if `--no-track` were given. `always` behaves as though
+`--track=direct` were given. `inherit` behaves as though `--track=inherit`
+were given. `simple` behaves as though `--track=direct` were given only when
+the start-point is a remote-tracking branch and the new branch has the same
+name as the remote branch.
+
See linkgit:git-pull[1] and linkgit:git-config[1] for additional discussion on
how the `branch.<name>.remote` and `branch.<name>.merge` options are used.
diff --git a/Documentation/git-config.txt b/Documentation/git-config.txt
index bdcfd94..9376e39 100644
--- a/Documentation/git-config.txt
+++ b/Documentation/git-config.txt
@@ -248,7 +248,7 @@ Valid `<type>`'s include:
--show-scope::
Similar to `--show-origin` in that it augments the output of
all queried config options with the scope of that value
- (local, global, system, command).
+ (worktree, local, global, system, command).
--get-colorbool <name> [<stdout-is-tty>]::
diff --git a/Documentation/git-gc.txt b/Documentation/git-gc.txt
index 853967d..0af7540 100644
--- a/Documentation/git-gc.txt
+++ b/Documentation/git-gc.txt
@@ -54,6 +54,10 @@ other housekeeping tasks (e.g. rerere, working trees, reflog...) will
be performed as well.
+--cruft::
+ When expiring unreachable objects, pack them separately into a
+ cruft pack instead of storing them as loose objects.
+
--prune=<date>::
Prune loose objects older than date (default is 2 weeks ago,
overridable by the config variable `gc.pruneExpire`).
diff --git a/Documentation/git-mergetool.txt b/Documentation/git-mergetool.txt
index e587c77..f784027 100644
--- a/Documentation/git-mergetool.txt
+++ b/Documentation/git-mergetool.txt
@@ -101,6 +101,7 @@ success of the resolution after the custom tool has exited.
CONFIGURATION
-------------
+:git-mergetool: 1
include::config/mergetool.txt[]
TEMPORARY FILES
@@ -113,6 +114,13 @@ Setting the `mergetool.keepBackup` configuration variable to `false`
causes `git mergetool` to automatically remove the backup as files
are successfully merged.
+BACKEND SPECIFIC HINTS
+----------------------
+
+vimdiff
+~~~~~~~
+include::mergetools/vimdiff.txt[]
+
GIT
---
Part of the linkgit:git[1] suite
diff --git a/Documentation/git-p4.txt b/Documentation/git-p4.txt
index e21fcd8..de5ee67 100644
--- a/Documentation/git-p4.txt
+++ b/Documentation/git-p4.txt
@@ -636,7 +636,42 @@ git-p4.pathEncoding::
Git expects paths encoded as UTF-8. Use this config to tell git-p4
what encoding Perforce had used for the paths. This encoding is used
to transcode the paths to UTF-8. As an example, Perforce on Windows
- often uses "cp1252" to encode path names.
+ often uses "cp1252" to encode path names. If this option is passed
+ into a p4 clone request, it is persisted in the resulting new git
+ repo.
+
+git-p4.metadataDecodingStrategy::
+ Perforce keeps the encoding of a changelist descriptions and user
+ full names as stored by the client on a given OS. The p4v client
+ uses the OS-local encoding, and so different users can end up storing
+ different changelist descriptions or user full names in different
+ encodings, in the same depot.
+ Git tolerates inconsistent/incorrect encodings in commit messages
+ and author names, but expects them to be specified in utf-8.
+ git-p4 can use three different decoding strategies in handling the
+ encoding uncertainty in Perforce: 'passthrough' simply passes the
+ original bytes through from Perforce to git, creating usable but
+ incorrectly-encoded data when the Perforce data is encoded as
+ anything other than utf-8. 'strict' expects the Perforce data to be
+ encoded as utf-8, and fails to import when this is not true.
+ 'fallback' attempts to interpret the data as utf-8, and otherwise
+ falls back to using a secondary encoding - by default the common
+ windows encoding 'cp-1252' - with upper-range bytes escaped if
+ decoding with the fallback encoding also fails.
+ Under python2 the default strategy is 'passthrough' for historical
+ reasons, and under python3 the default is 'fallback'.
+ When 'strict' is selected and decoding fails, the error message will
+ propose changing this config parameter as a workaround. If this
+ option is passed into a p4 clone request, it is persisted into the
+ resulting new git repo.
+
+git-p4.metadataFallbackEncoding::
+ Specify the fallback encoding to use when decoding Perforce author
+ names and changelists descriptions using the 'fallback' strategy
+ (see git-p4.metadataDecodingStrategy). The fallback encoding will
+ only be used when decoding as utf-8 fails. This option defaults to
+ cp1252, a common windows encoding. If this option is passed into a
+ p4 clone request, it is persisted into the resulting new git repo.
git-p4.largeFileSystem::
Specify the system that is used for large (binary) files. Please note
diff --git a/Documentation/git-pack-objects.txt b/Documentation/git-pack-objects.txt
index f8344e1..a9995a9 100644
--- a/Documentation/git-pack-objects.txt
+++ b/Documentation/git-pack-objects.txt
@@ -13,6 +13,7 @@ SYNOPSIS
[--no-reuse-delta] [--delta-base-offset] [--non-empty]
[--local] [--incremental] [--window=<n>] [--depth=<n>]
[--revs [--unpacked | --all]] [--keep-pack=<pack-name>]
+ [--cruft] [--cruft-expiration=<time>]
[--stdout [--filter=<filter-spec>] | <base-name>]
[--shallow] [--keep-true-parents] [--[no-]sparse] < <object-list>
@@ -95,6 +96,35 @@ base-name::
Incompatible with `--revs`, or options that imply `--revs` (such as
`--all`), with the exception of `--unpacked`, which is compatible.
+--cruft::
+ Packs unreachable objects into a separate "cruft" pack, denoted
+ by the existence of a `.mtimes` file. Typically used by `git
+ repack --cruft`. Callers provide a list of pack names and
+ indicate which packs will remain in the repository, along with
+ which packs will be deleted (indicated by the `-` prefix). The
+ contents of the cruft pack are all objects not contained in the
+ surviving packs which have not exceeded the grace period (see
+ `--cruft-expiration` below), or which have exceeded the grace
+ period, but are reachable from an other object which hasn't.
++
+When the input lists a pack containing all reachable objects (and lists
+all other packs as pending deletion), the corresponding cruft pack will
+contain all unreachable objects (with mtime newer than the
+`--cruft-expiration`) along with any unreachable objects whose mtime is
+older than the `--cruft-expiration`, but are reachable from an
+unreachable object whose mtime is newer than the `--cruft-expiration`).
++
+Incompatible with `--unpack-unreachable`, `--keep-unreachable`,
+`--pack-loose-unreachable`, `--stdin-packs`, as well as any other
+options which imply `--revs`. Also incompatible with `--max-pack-size`;
+when this option is set, the maximum pack size is not inferred from
+`pack.packSizeLimit`.
+
+--cruft-expiration=<approxidate>::
+ If specified, objects are eliminated from the cruft pack if they
+ have an mtime older than `<approxidate>`. If unspecified (and
+ given `--cruft`), then no objects are eliminated.
+
--window=<n>::
--depth=<n>::
These two options affect how the objects contained in
diff --git a/Documentation/git-read-tree.txt b/Documentation/git-read-tree.txt
index a5356a2..b9bfdc0 100644
--- a/Documentation/git-read-tree.txt
+++ b/Documentation/git-read-tree.txt
@@ -375,10 +375,13 @@ have finished your work-in-progress), attempt the merge again.
SPARSE CHECKOUT
---------------
-Note: The `update-index` and `read-tree` primitives for supporting the
-skip-worktree bit predated the introduction of
-linkgit:git-sparse-checkout[1]. Users are encouraged to use
-`sparse-checkout` in preference to these low-level primitives.
+Note: The skip-worktree capabilities in linkgit:git-update-index[1]
+and `read-tree` predated the introduction of
+linkgit:git-sparse-checkout[1]. Users are encouraged to use the
+`sparse-checkout` command in preference to these plumbing commands for
+sparse-checkout/skip-worktree related needs. However, the information
+below might be useful to users trying to understand the pattern style
+used in non-cone mode of the `sparse-checkout` command.
"Sparse checkout" allows populating the working directory sparsely.
It uses the skip-worktree bit (see linkgit:git-update-index[1]) to
diff --git a/Documentation/git-rebase.txt b/Documentation/git-rebase.txt
index 9da4647..262fb01 100644
--- a/Documentation/git-rebase.txt
+++ b/Documentation/git-rebase.txt
@@ -215,9 +215,10 @@ leave out at most one of A and B, in which case it defaults to HEAD.
--keep-base::
Set the starting point at which to create the new commits to the
- merge base of <upstream> <branch>. Running
+ merge base of <upstream> and <branch>. Running
'git rebase --keep-base <upstream> <branch>' is equivalent to
- running 'git rebase --onto <upstream>... <upstream>'.
+ running
+ 'git rebase --onto <upstream>...<branch> <upstream> <branch>'.
+
This option is useful in the case where one is developing a feature on
top of an upstream branch. While the feature is being worked on, the
diff --git a/Documentation/git-remote.txt b/Documentation/git-remote.txt
index cde9614..1dec314 100644
--- a/Documentation/git-remote.txt
+++ b/Documentation/git-remote.txt
@@ -35,6 +35,8 @@ OPTIONS
-v::
--verbose::
Be a little more verbose and show remote url after name.
+ For promisor remotes, also show which filter (`blob:none` etc.)
+ are configured.
NOTE: This must be placed between `remote` and subcommand.
diff --git a/Documentation/git-repack.txt b/Documentation/git-repack.txt
index ee30edc..0bf1389 100644
--- a/Documentation/git-repack.txt
+++ b/Documentation/git-repack.txt
@@ -63,6 +63,17 @@ to the new separate pack will be written.
Also run 'git prune-packed' to remove redundant
loose object files.
+--cruft::
+ Same as `-a`, unless `-d` is used. Then any unreachable objects
+ are packed into a separate cruft pack. Unreachable objects can
+ be pruned using the normal expiry rules with the next `git gc`
+ invocation (see linkgit:git-gc[1]). Incompatible with `-k`.
+
+--cruft-expiration=<approxidate>::
+ Expire unreachable objects older than `<approxidate>`
+ immediately instead of waiting for the next `git gc` invocation.
+ Only useful with `--cruft -d`.
+
-l::
Pass the `--local` option to 'git pack-objects'. See
linkgit:git-pack-objects[1].
diff --git a/Documentation/git-sparse-checkout.txt b/Documentation/git-sparse-checkout.txt
index 88e55f4..3776705 100644
--- a/Documentation/git-sparse-checkout.txt
+++ b/Documentation/git-sparse-checkout.txt
@@ -15,15 +15,15 @@ SYNOPSIS
DESCRIPTION
-----------
-This command is used to create sparse checkouts, which means that it
-changes the working tree from having all tracked files present, to only
-have a subset of them. It can also switch which subset of files are
-present, or undo and go back to having all tracked files present in the
-working copy.
+This command is used to create sparse checkouts, which change the
+working tree from having all tracked files present to only having a
+subset of those files. It can also switch which subset of files are
+present, or undo and go back to having all tracked files present in
+the working copy.
The subset of files is chosen by providing a list of directories in
-cone mode (which is recommended), or by providing a list of patterns
-in non-cone mode.
+cone mode (the default), or by providing a list of patterns in
+non-cone mode.
When in a sparse-checkout, other Git commands behave a bit differently.
For example, switching branches will not update paths outside the
@@ -44,9 +44,9 @@ COMMANDS
Enable the necessary sparse-checkout config settings
(`core.sparseCheckout`, `core.sparseCheckoutCone`, and
`index.sparse`) if they are not already set to the desired values,
- and write a set of patterns to the sparse-checkout file from the
- list of arguments following the 'set' subcommand. Update the
- working directory to match the new patterns.
+ populate the sparse-checkout file from the list of arguments
+ following the 'set' subcommand, and update the working directory to
+ match.
+
To ensure that adjusting the sparse-checkout settings within a worktree
does not alter the sparse-checkout settings in other worktrees, the 'set'
@@ -60,22 +60,20 @@ When the `--stdin` option is provided, the directories or patterns are
read from standard in as a newline-delimited list instead of from the
arguments.
+
-When `--cone` is passed or `core.sparseCheckoutCone` is enabled, the
-input list is considered a list of directories. This allows for
-better performance with a limited set of patterns (see 'CONE PATTERN
-SET' below). The input format matches the output of `git ls-tree
---name-only`. This includes interpreting pathnames that begin with a
-double quote (") as C-style quoted strings. Note that the set command
-will write patterns to the sparse-checkout file to include all files
-contained in those directories (recursively) as well as files that are
-siblings of ancestor directories. This may become the default in the
-future; --no-cone can be passed to request non-cone mode.
+By default, the input list is considered a list of directories, matching
+the output of `git ls-tree -d --name-only`. This includes interpreting
+pathnames that begin with a double quote (") as C-style quoted strings.
+Note that all files under the specified directories (at any depth) will
+be included in the sparse checkout, as well as files that are siblings
+of either the given directory or any of its ancestors (see 'CONE PATTERN
+SET' below for more details). In the past, this was not the default,
+and `--cone` needed to be specified or `core.sparseCheckoutCone` needed
+to be enabled.
+
-When `--no-cone` is passed or `core.sparseCheckoutCone` is not enabled,
-the input list is considered a list of patterns. This mode is harder
-to use and less performant, and is thus not recommended. See the
-"Sparse Checkout" section of linkgit:git-read-tree[1] and the "Pattern
-Set" sections below for more details.
+When `--no-cone` is passed, the input list is considered a list of
+patterns. This mode has a number of drawbacks, including not working
+with some options like `--sparse-index`. As explained in the
+"Non-cone Problems" section below, we do not recommend using it.
+
Use the `--[no-]sparse-index` option to use a sparse index (the
default is to not use it). A sparse index reduces the size of the
@@ -137,8 +135,45 @@ paths to pass to a subsequent 'set' or 'add' command. However,
the disable command, so the easy restore of calling a plain `init`
decreased in utility.
-SPARSE CHECKOUT
----------------
+EXAMPLES
+--------
+`git sparse-checkout set MY/DIR1 SUB/DIR2`::
+
+ Change to a sparse checkout with all files (at any depth) under
+ MY/DIR1/ and SUB/DIR2/ present in the working copy (plus all
+ files immediately under MY/ and SUB/ and the toplevel
+ directory). If already in a sparse checkout, change which files
+ are present in the working copy to this new selection. Note
+ that this command will also delete all ignored files in any
+ directory that no longer has either tracked or
+ non-ignored-untracked files present.
+
+`git sparse-checkout disable`::
+
+ Repopulate the working directory with all files, disabling sparse
+ checkouts.
+
+`git sparse-checkout add SOME/DIR/ECTORY`::
+
+ Add all files under SOME/DIR/ECTORY/ (at any depth) to the
+ sparse checkout, as well as all files immediately under
+ SOME/DIR/ and immediately under SOME/. Must already be in a
+ sparse checkout before using this command.
+
+`git sparse-checkout reapply`::
+
+ It is possible for commands to update the working tree in a
+ way that does not respect the selected sparsity directories.
+ This can come from tools external to Git writing files, or
+ even affect Git commands because of either special cases (such
+ as hitting conflicts when merging/rebasing), or because some
+ commands didn't fully support sparse checkouts (e.g. the old
+ `recursive` merge backend had only limited support). This
+ command reapplies the existing sparse directory specifications
+ to make the working directory match.
+
+INTERNALS -- SPARSE CHECKOUT
+----------------------------
"Sparse checkout" allows populating the working directory sparsely. It
uses the skip-worktree bit (see linkgit:git-update-index[1]) to tell Git
@@ -155,64 +190,196 @@ directory, it updates the skip-worktree bits in the index based
on this file. The files matching the patterns in the file will
appear in the working directory, and the rest will not.
-To enable the sparse-checkout feature, run `git sparse-checkout set` to
-set the patterns you want to use.
+INTERNALS -- NON-CONE PROBLEMS
+------------------------------
+
+The `$GIT_DIR/info/sparse-checkout` file populated by the `set` and
+`add` subcommands is defined to be a bunch of patterns (one per line)
+using the same syntax as `.gitignore` files. In cone mode, these
+patterns are restricted to matching directories (and users only ever
+need supply or see directory names), while in non-cone mode any
+gitignore-style pattern is permitted. Using the full gitignore-style
+patterns in non-cone mode has a number of shortcomings:
+
+ * Fundamentally, it makes various worktree-updating processes (pull,
+ merge, rebase, switch, reset, checkout, etc.) require O(N*M) pattern
+ matches, where N is the number of patterns and M is the number of
+ paths in the index. This scales poorly.
+
+ * Avoiding the scaling issue has to be done via limiting the number
+ of patterns via specifying leading directory name or glob.
+
+ * Passing globs on the command line is error-prone as users may
+ forget to quote the glob, causing the shell to expand it into all
+ matching files and pass them all individually along to
+ sparse-checkout set/add. While this could also be a problem with
+ e.g. "git grep -- *.c", mistakes with grep/log/status appear in
+ the immediate output. With sparse-checkout, the mistake gets
+ recorded at the time the sparse-checkout command is run and might
+ not be problematic until the user later switches branches or rebases
+ or merges, thus putting a delay between the user's error and when
+ they have a chance to catch/notice it.
+
+ * Related to the previous item, sparse-checkout has an 'add'
+ subcommand but no 'remove' subcommand. Even if a 'remove'
+ subcommand were added, undoing an accidental unquoted glob runs
+ the risk of "removing too much", as it may remove entries that had
+ been included before the accidental add.
+
+ * Non-cone mode uses gitignore-style patterns to select what to
+ *include* (with the exception of negated patterns), while
+ .gitignore files use gitignore-style patterns to select what to
+ *exclude* (with the exception of negated patterns). The
+ documentation on gitignore-style patterns usually does not talk in
+ terms of matching or non-matching, but on what the user wants to
+ "exclude". This can cause confusion for users trying to learn how
+ to specify sparse-checkout patterns to get their desired behavior.
+
+ * Every other git subcommand that wants to provide "special path
+ pattern matching" of some sort uses pathspecs, but non-cone mode
+ for sparse-checkout uses gitignore patterns, which feels
+ inconsistent.
+
+ * It has edge cases where the "right" behavior is unclear. Two examples:
+
+ First, two users are in a subdirectory, and the first runs
+ git sparse-checkout set '/toplevel-dir/*.c'
+ while the second runs
+ git sparse-checkout set relative-dir
+ Should those arguments be transliterated into
+ current/subdirectory/toplevel-dir/*.c
+ and
+ current/subdirectory/relative-dir
+ before inserting into the sparse-checkout file? The user who typed
+ the first command is probably aware that arguments to set/add are
+ supposed to be patterns in non-cone mode, and probably would not be
+ happy with such a transliteration. However, many gitignore-style
+ patterns are just paths, which might be what the user who typed the
+ second command was thinking, and they'd be upset if their argument
+ wasn't transliterated.
+
+ Second, what should bash-completion complete on for set/add commands
+ for non-cone users? If it suggests paths, is it exacerbating the
+ problem above? Also, if it suggests paths, what if the user has a
+ file or directory that begins with either a '!' or '#' or has a '*',
+ '\', '?', '[', or ']' in its name? And if it suggests paths, will
+ it complete "/pro" to "/proc" (in the root filesytem) rather than to
+ "/progress.txt" in the current directory? (Note that users are
+ likely to want to start paths with a leading '/' in non-cone mode,
+ for the same reason that .gitignore files often have one.)
+ Completing on files or directories might give nasty surprises in
+ all these cases.
+
+ * The excessive flexibility made other extensions essentially
+ impractical. `--sparse-index` is likely impossible in non-cone
+ mode; even if it is somehow feasible, it would have been far more
+ work to implement and may have been too slow in practice. Some
+ ideas for adding coupling between partial clones and sparse
+ checkouts are only practical with a more restricted set of paths
+ as well.
+
+For all these reasons, non-cone mode is deprecated. Please switch to
+using cone mode.
+
+
+INTERNALS -- CONE MODE HANDLING
+-------------------------------
+
+The "cone mode", which is the default, lets you specify only what
+directories to include. For any directory specified, all paths below
+that directory will be included, and any paths immediately under
+leading directories (including the toplevel directory) will also be
+included. Thus, if you specified the directory
+ Documentation/technical/
+then your sparse checkout would contain:
+
+ * all files in the toplevel-directory
+ * all files immediately under Documentation/
+ * all files at any depth under Documentation/technical/
+
+Also, in cone mode, even if no directories are specified, then the
+files in the toplevel directory will be included.
-To repopulate the working directory with all files, use the
-`git sparse-checkout disable` command.
+When changing the sparse-checkout patterns in cone mode, Git will inspect each
+tracked directory that is not within the sparse-checkout cone to see if it
+contains any untracked files. If all of those files are ignored due to the
+`.gitignore` patterns, then the directory will be deleted. If any of the
+untracked files within that directory is not ignored, then no deletions will
+occur within that directory and a warning message will appear. If these files
+are important, then reset your sparse-checkout definition so they are included,
+use `git add` and `git commit` to store them, then remove any remaining files
+manually to ensure Git can behave optimally.
+See also the "Internals -- Cone Pattern Set" section to learn how the
+directories are transformed under the hood into a subset of the
+Full Pattern Set of sparse-checkout.
-FULL PATTERN SET
-----------------
-By default, the sparse-checkout file uses the same syntax as `.gitignore`
-files.
+INTERNALS -- FULL PATTERN SET
+-----------------------------
-While `$GIT_DIR/info/sparse-checkout` is usually used to specify what
-files are included, you can also specify what files are _not_ included,
-using negative patterns. For example, to remove the file `unwanted`:
+The full pattern set allows for arbitrary pattern matches and complicated
+inclusion/exclusion rules. These can result in O(N*M) pattern matches when
+updating the index, where N is the number of patterns and M is the number
+of paths in the index. To combat this performance issue, a more restricted
+pattern set is allowed when `core.sparseCheckoutCone` is enabled.
+
+The sparse-checkout file uses the same syntax as `.gitignore` files;
+see linkgit:gitignore[5] for details. Here, though, the patterns are
+usually being used to select which files to include rather than which
+files to exclude. (However, it can get a bit confusing since
+gitignore-style patterns have negations defined by patterns which
+begin with a '!', so you can also select files to _not_ include.)
+
+For example, to select everything, and then to remove the file
+`unwanted` (so that every file will appear in your working tree except
+the file named `unwanted`):
+
+ git sparse-checkout set --no-cone '/*' '!unwanted'
+
+These patterns are just placed into the
+`$GIT_DIR/info/sparse-checkout` as-is, so the contents of that file
+at this point would be
----------------
/*
!unwanted
----------------
+See also the "Sparse Checkout" section of linkgit:git-read-tree[1] to
+learn more about the gitignore-style patterns used in sparse
+checkouts.
-CONE PATTERN SET
-----------------
-The full pattern set allows for arbitrary pattern matches and complicated
-inclusion/exclusion rules. These can result in O(N*M) pattern matches when
-updating the index, where N is the number of patterns and M is the number
-of paths in the index. To combat this performance issue, a more restricted
-pattern set is allowed when `core.sparseCheckoutCone` is enabled.
+INTERNALS -- CONE PATTERN SET
+-----------------------------
-The accepted patterns in the cone pattern set are:
+In cone mode, only directories are accepted, but they are translated into
+the same gitignore-style patterns used in the full pattern set. We refer
+to the particular patterns used in those mode as being of one of two types:
1. *Recursive:* All paths inside a directory are included.
2. *Parent:* All files immediately inside a directory are included.
-In addition to the above two patterns, we also expect that all files in the
-root directory are included. If a recursive pattern is added, then all
-leading directories are added as parent patterns.
-
-By default, when running `git sparse-checkout init`, the root directory is
-added as a parent pattern. At this point, the sparse-checkout file contains
-the following patterns:
+Since cone mode always includes files at the toplevel, when running
+`git sparse-checkout set` with no directories specified, the toplevel
+directory is added as a parent pattern. At this point, the
+sparse-checkout file contains the following patterns:
----------------
/*
!/*/
----------------
-This says "include everything in root, but nothing two levels below root."
+This says "include everything immediately under the toplevel
+directory, but nothing at any level below that."
-When in cone mode, the `git sparse-checkout set` subcommand takes a list of
-directories instead of a list of sparse-checkout patterns. In this mode,
-the command `git sparse-checkout set A/B/C` sets the directory `A/B/C` as
-a recursive pattern, the directories `A` and `A/B` are added as parent
-patterns. The resulting sparse-checkout file is now
+When in cone mode, the `git sparse-checkout set` subcommand takes a
+list of directories. The command `git sparse-checkout set A/B/C` sets
+the directory `A/B/C` as a recursive pattern, the directories `A` and
+`A/B` are added as parent patterns. The resulting sparse-checkout file
+is now
----------------
/*
@@ -227,14 +394,18 @@ patterns. The resulting sparse-checkout file is now
Here, order matters, so the negative patterns are overridden by the positive
patterns that appear lower in the file.
-If `core.sparseCheckoutCone=true`, then Git will parse the sparse-checkout file
-expecting patterns of these types. Git will warn if the patterns do not match.
-If the patterns do match the expected format, then Git will use faster hash-
-based algorithms to compute inclusion in the sparse-checkout.
+Unless `core.sparseCheckoutCone` is explicitly set to `false`, Git will
+parse the sparse-checkout file expecting patterns of these types. Git will
+warn if the patterns do not match. If the patterns do match the expected
+format, then Git will use faster hash-based algorithms to compute inclusion
+in the sparse-checkout. If they do not match, git will behave as though
+`core.sparseCheckoutCone` was false, regardless of its setting.
-In the cone mode case, the `git sparse-checkout list` subcommand will list the
-directories that define the recursive patterns. For the example sparse-checkout
-file above, the output is as follows:
+In the cone mode case, despite the fact that full patterns are written
+to the $GIT_DIR/info/sparse-checkout file, the `git sparse-checkout
+list` subcommand will list the directories that define the recursive
+patterns. For the example sparse-checkout file above, the output is as
+follows:
--------------------------
$ git sparse-checkout list
@@ -246,19 +417,9 @@ case-insensitive check. This corrects for case mismatched filenames in the
'git sparse-checkout set' command to reflect the expected cone in the working
directory.
-When changing the sparse-checkout patterns in cone mode, Git will inspect each
-tracked directory that is not within the sparse-checkout cone to see if it
-contains any untracked files. If all of those files are ignored due to the
-`.gitignore` patterns, then the directory will be deleted. If any of the
-untracked files within that directory is not ignored, then no deletions will
-occur within that directory and a warning message will appear. If these files
-are important, then reset your sparse-checkout definition so they are included,
-use `git add` and `git commit` to store them, then remove any remaining files
-manually to ensure Git can behave optimally.
-
-SUBMODULES
-----------
+INTERNALS -- SUBMODULES
+-----------------------
If your repository contains one or more submodules, then submodules
are populated based on interactions with the `git submodule` command.
diff --git a/Documentation/git.txt b/Documentation/git.txt
index 13f83a2..302607a 100644
--- a/Documentation/git.txt
+++ b/Documentation/git.txt
@@ -9,7 +9,7 @@ git - the stupid content tracker
SYNOPSIS
--------
[verse]
-'git' [--version] [--help] [-C <path>] [-c <name>=<value>]
+'git' [-v | --version] [-h | --help] [-C <path>] [-c <name>=<value>]
[--exec-path[=<path>]] [--html-path] [--man-path] [--info-path]
[-p|--paginate|-P|--no-pager] [--no-replace-objects] [--bare]
[--git-dir=<path>] [--work-tree=<path>] [--namespace=<name>]
@@ -39,6 +39,7 @@ or https://git-scm.com/docs.
OPTIONS
-------
+-v::
--version::
Prints the Git suite version that the 'git' program came from.
+
@@ -46,6 +47,7 @@ This option is internally converted to `git version ...` and accepts
the same options as the linkgit:git-version[1] command. If `--help` is
also given, it takes precedence over `--version`.
+-h::
--help::
Prints the synopsis and a list of the most commonly used
commands. If the option `--all` or `-a` is given then all
diff --git a/Documentation/mergetools/vimdiff.txt b/Documentation/mergetools/vimdiff.txt
new file mode 100644
index 0000000..2d631e9
--- /dev/null
+++ b/Documentation/mergetools/vimdiff.txt
@@ -0,0 +1,194 @@
+Description
+^^^^^^^^^^^
+
+When specifying `--tool=vimdiff` in `git mergetool` Git will open Vim with a 4
+windows layout distributed in the following way:
+....
+------------------------------------------
+| | | |
+| LOCAL | BASE | REMOTE |
+| | | |
+------------------------------------------
+| |
+| MERGED |
+| |
+------------------------------------------
+....
+`LOCAL`, `BASE` and `REMOTE` are read-only buffers showing the contents of the
+conflicting file in specific commits ("commit you are merging into", "common
+ancestor commit" and "commit you are merging from" respectively)
+
+`MERGED` is a writable buffer where you have to resolve the conflicts (using the
+other read-only buffers as a reference). Once you are done, save and exit Vim as
+usual (`:wq`) or, if you want to abort, exit using `:cq`.
+
+Layout configuration
+^^^^^^^^^^^^^^^^^^^^
+
+You can change the windows layout used by Vim by setting configuration variable
+`mergetool.vimdiff.layout` which accepts a string where the following separators
+have special meaning:
+
+ - `+` is used to "open a new tab"
+ - `,` is used to "open a new vertical split"
+ - `/` is used to "open a new horizontal split"
+ - `@` is used to indicate which is the file containing the final version after
+ solving the conflicts. If not present, `MERGED` will be used by default.
+
+The precedence of the operators is this one (you can use parentheses to change
+it):
+
+ `@` > `+` > `/` > `,`
+
+Let's see some examples to understand how it works:
+
+* `layout = "(LOCAL,BASE,REMOTE)/MERGED"`
++
+--
+This is exactly the same as the default layout we have already seen.
+
+Note that `/` has precedence over `,` and thus the parenthesis are not
+needed in this case. The next layout definition is equivalent:
+
+ layout = "LOCAL,BASE,REMOTE / MERGED"
+--
+* `layout = "LOCAL,MERGED,REMOTE"`
++
+--
+If, for some reason, we are not interested in the `BASE` buffer.
+....
+------------------------------------------
+| | | |
+| | | |
+| LOCAL | MERGED | REMOTE |
+| | | |
+| | | |
+------------------------------------------
+....
+--
+* `layout = "MERGED"`
++
+--
+Only the `MERGED` buffer will be shown. Note, however, that all the other
+ones are still loaded in vim, and you can access them with the "buffers"
+command.
+....
+------------------------------------------
+| |
+| |
+| MERGED |
+| |
+| |
+------------------------------------------
+....
+--
+* `layout = "@LOCAL,REMOTE"`
++
+--
+When `MERGED` is not present in the layout, you must "mark" one of the
+buffers with an asterisk. That will become the buffer you need to edit and
+save after resolving the conflicts.
+....
+------------------------------------------
+| | |
+| | |
+| | |
+| LOCAL | REMOTE |
+| | |
+| | |
+| | |
+------------------------------------------
+....
+--
+* `layout = "LOCAL,BASE,REMOTE / MERGED + BASE,LOCAL + BASE,REMOTE"`
++
+--
+Three tabs will open: the first one is a copy of the default layout, while
+the other two only show the differences between (`BASE` and `LOCAL`) and
+(`BASE` and `REMOTE`) respectively.
+....
+------------------------------------------
+| <TAB #1> | TAB #2 | TAB #3 | |
+------------------------------------------
+| | | |
+| LOCAL | BASE | REMOTE |
+| | | |
+------------------------------------------
+| |
+| MERGED |
+| |
+------------------------------------------
+....
+....
+------------------------------------------
+| TAB #1 | <TAB #2> | TAB #3 | |
+------------------------------------------
+| | |
+| | |
+| | |
+| BASE | LOCAL |
+| | |
+| | |
+| | |
+------------------------------------------
+....
+....
+------------------------------------------
+| TAB #1 | TAB #2 | <TAB #3> | |
+------------------------------------------
+| | |
+| | |
+| | |
+| BASE | REMOTE |
+| | |
+| | |
+| | |
+------------------------------------------
+....
+--
+* `layout = "LOCAL,BASE,REMOTE / MERGED + BASE,LOCAL + BASE,REMOTE + (LOCAL/BASE/REMOTE),MERGED"`
++
+--
+Same as the previous example, but adds a fourth tab with the same
+information as the first tab, with a different layout.
+....
+---------------------------------------------
+| TAB #1 | TAB #2 | TAB #3 | <TAB #4> |
+---------------------------------------------
+| LOCAL | |
+|---------------------| |
+| BASE | MERGED |
+|---------------------| |
+| REMOTE | |
+---------------------------------------------
+....
+Note how in the third tab definition we need to use parenthesis to make `,`
+have precedence over `/`.
+--
+
+Variants
+^^^^^^^^
+
+Instead of `--tool=vimdiff`, you can also use one of these other variants:
+
+ * `--tool=gvimdiff`, to open gVim instead of Vim.
+
+ * `--tool=nvimdiff`, to open Neovim instead of Vim.
+
+When using these variants, in order to specify a custom layout you will have to
+set configuration variables `mergetool.gvimdiff.layout` and
+`mergetool.nvimdiff.layout` instead of `mergetool.vimdiff.layout`
+
+In addition, for backwards compatibility with previous Git versions, you can
+also append `1`, `2` or `3` to either `vimdiff` or any of the variants (ex:
+`vimdiff3`, `nvimdiff1`, etc...) to use a predefined layout.
+In other words, using `--tool=[g,n,]vimdiffx` is the same as using
+`--tool=[g,n,]vimdiff` and setting configuration variable
+`mergetool.[g,n,]vimdiff.layout` to...
+
+ * `x=1`: `"@LOCAL, REMOTE"`
+ * `x=2`: `"LOCAL, MERGED, REMOTE"`
+ * `x=3`: `"MERGED"`
+
+Example: using `--tool=gvimdiff2` will open `gvim` with three columns (LOCAL,
+MERGED and REMOTE).
diff --git a/Documentation/rev-list-options.txt b/Documentation/rev-list-options.txt
index fd4f4e2..195e74e 100644
--- a/Documentation/rev-list-options.txt
+++ b/Documentation/rev-list-options.txt
@@ -25,6 +25,11 @@ ordering and formatting options, such as `--reverse`.
--after=<date>::
Show commits more recent than a specific date.
+--since-as-filter=<date>::
+ Show all commits more recent than a specific date. This visits
+ all commits in the range, rather than stopping at the first commit which
+ is older than a specific date.
+
--until=<date>::
--before=<date>::
Show commits older than a specific date.
diff --git a/Documentation/technical/api-error-handling.txt b/Documentation/technical/api-error-handling.txt
index 8be4f4d..70bf1d3 100644
--- a/Documentation/technical/api-error-handling.txt
+++ b/Documentation/technical/api-error-handling.txt
@@ -1,12 +1,34 @@
Error reporting in git
======================
-`BUG`, `die`, `usage`, `error`, and `warning` report errors of
+`BUG`, `bug`, `die`, `usage`, `error`, and `warning` report errors of
various kinds.
- `BUG` is for failed internal assertions that should never happen,
i.e. a bug in git itself.
+- `bug` (lower-case, not `BUG`) is supposed to be used like `BUG` but
+ prints a "BUG" message instead of calling `abort()`.
++
+A call to `bug()` will then result in a "real" call to the `BUG()`
+function, either explicitly by invoking `BUG_if_bug()` after call(s)
+to `bug()`, or implicitly at `exit()` time where we'll check if we
+encountered any outstanding `bug()` invocations.
++
+If there were no prior calls to `bug()` before invoking `BUG_if_bug()`
+the latter is a NOOP. The `BUG_if_bug()` function takes the same
+arguments as `BUG()` itself. Calling `BUG_if_bug()` explicitly isn't
+necessary, but ensures that we die as soon as possible.
++
+If you know you had prior calls to `bug()` then calling `BUG()` itself
+is equivalent to calling `BUG_if_bug()`, the latter being a wrapper
+calling `BUG()` if we've set a flag indicating that we've called
+`bug()`.
++
+This is for the convenience of APIs who'd like to potentially report
+more than one "bug", such as the optbug() validation in
+parse-options.c.
+
- `die` is for fatal application errors. It prints a message to
the user and exits with status 128.
diff --git a/Documentation/technical/api-trace2.txt b/Documentation/technical/api-trace2.txt
index bb13ca3..77a150b 100644
--- a/Documentation/technical/api-trace2.txt
+++ b/Documentation/technical/api-trace2.txt
@@ -5,7 +5,7 @@ information to stderr or a file. The Trace2 feature is inactive unless
explicitly enabled by enabling one or more Trace2 Targets.
The Trace2 API is intended to replace the existing (Trace1)
-printf-style tracing provided by the existing `GIT_TRACE` and
+`printf()`-style tracing provided by the existing `GIT_TRACE` and
`GIT_TRACE_PERFORMANCE` facilities. During initial implementation,
Trace2 and Trace1 may operate in parallel.
@@ -24,8 +24,8 @@ for example.
Trace2 is controlled using `trace2.*` config values in the system and
global config files and `GIT_TRACE2*` environment variables. Trace2 does
-not read from repo local or worktree config files or respect `-c`
-command line config settings.
+not read from repo local or worktree config files, nor does it respect
+`-c` command line config settings.
== Trace2 Targets
@@ -34,8 +34,8 @@ Format details are given in a later section.
=== The Normal Format Target
-The normal format target is a tradition printf format and similar
-to GIT_TRACE format. This format is enabled with the `GIT_TRACE2`
+The normal format target is a traditional `printf()` format and similar
+to the `GIT_TRACE` format. This format is enabled with the `GIT_TRACE2`
environment variable or the `trace2.normalTarget` system or global
config setting.
@@ -69,8 +69,8 @@ $ cat ~/log.normal
=== The Performance Format Target
The performance format target (PERF) is a column-based format to
-replace GIT_TRACE_PERFORMANCE and is suitable for development and
-testing, possibly to complement tools like gprof. This format is
+replace `GIT_TRACE_PERFORMANCE` and is suitable for development and
+testing, possibly to complement tools like `gprof`. This format is
enabled with the `GIT_TRACE2_PERF` environment variable or the
`trace2.perfTarget` system or global config setting.
@@ -128,7 +128,7 @@ yields
------------
$ cat ~/log.event
-{"event":"version","sid":"sid":"20190408T191610.507018Z-H9b68c35f-P000059a8","thread":"main","time":"2019-01-16T17:28:42.620713Z","file":"common-main.c","line":38,"evt":"3","exe":"2.20.1.155.g426c96fcdb"}
+{"event":"version","sid":"20190408T191610.507018Z-H9b68c35f-P000059a8","thread":"main","time":"2019-01-16T17:28:42.620713Z","file":"common-main.c","line":38,"evt":"3","exe":"2.20.1.155.g426c96fcdb"}
{"event":"start","sid":"20190408T191610.507018Z-H9b68c35f-P000059a8","thread":"main","time":"2019-01-16T17:28:42.621027Z","file":"common-main.c","line":39,"t_abs":0.001173,"argv":["git","version"]}
{"event":"cmd_name","sid":"20190408T191610.507018Z-H9b68c35f-P000059a8","thread":"main","time":"2019-01-16T17:28:42.621122Z","file":"git.c","line":432,"name":"version","hierarchy":"version"}
{"event":"exit","sid":"20190408T191610.507018Z-H9b68c35f-P000059a8","thread":"main","time":"2019-01-16T17:28:42.621236Z","file":"git.c","line":662,"t_abs":0.001227,"code":0}
@@ -170,9 +170,9 @@ Some functions have a `_va_fl()` suffix to indicate that they also
take a `va_list` argument.
Some functions have a `_printf_fl()` suffix to indicate that they also
-take a varargs argument.
+take a `printf()` style format with a variable number of arguments.
-There are CPP wrapper macros and ifdefs to hide most of these details.
+There are CPP wrapper macros and `#ifdef`s to hide most of these details.
See `trace2.h` for more details. The following discussion will only
describe the simplified forms.
@@ -234,7 +234,7 @@ Events are written as lines of the form:
is the event name.
`<event-message>`::
- is a free-form printf message intended for human consumption.
+ is a free-form `printf()` message intended for human consumption.
+
Note that this may contain embedded LF or CRLF characters that are
not escaped, so the event may spill across multiple lines.
@@ -300,7 +300,7 @@ This field is in anticipation of in-proc submodules in the future.
indicate a broad category, such as "index" or "status".
`<perf-event-message>`::
- is a free-form printf message intended for human consumption.
+ is a free-form `printf()` message intended for human consumption.
------------
15:33:33.532712 wt-status.c:2310 | d0 | main | region_enter | r1 | 0.126064 | | status | label:print
@@ -465,8 +465,8 @@ completed.)
------------
`"error"`::
- This event is emitted when one of the `BUG()`, `error()`, `die()`,
- `warning()`, or `usage()` functions are called.
+ This event is emitted when one of the `BUG()`, `bug()`, `error()`,
+ `die()`, `warning()`, or `usage()` functions are called.
+
------------
{
@@ -533,7 +533,7 @@ these special values are used:
------------
`"cmd_mode"`::
- This event, when present, describes the command variant This
+ This event, when present, describes the command variant. This
event may be emitted more than once.
+
------------
@@ -588,7 +588,7 @@ with "?".
`"child_exit"`::
This event is generated after the current process has returned
- from the waitpid() and collected the exit information from the
+ from the `waitpid()` and collected the exit information from the
child.
+
------------
@@ -609,7 +609,7 @@ process may be a shell script which doesn't have a session-id.)
+
Note that the `t_rel` field contains the observed run time in seconds
for the child process (starting before the fork/exec/spawn and
-stopping after the waitpid() and includes OS process creation overhead).
+stopping after the `waitpid()` and includes OS process creation overhead).
So this time will be slightly larger than the atexit time reported by
the child process itself.
@@ -635,7 +635,7 @@ process may be a shell script which doesn't have a session-id.)
+
This event is generated after the child is started in the background
and given a little time to boot up and start working. If the child
-startups normally and while the parent is still waiting, the "ready"
+starts up normally while the parent is still waiting, the "ready"
field will have the value "ready".
If the child is too slow to start and the parent times out, the field
will have the value "timeout".
@@ -949,7 +949,7 @@ atexit elapsed:3.868970 code:0
Regions::
- Regions can be use to time an interesting section of code.
+ Regions can be used to time an interesting section of code.
+
----------------
void wt_status_collect(struct wt_status *s)
@@ -1103,9 +1103,9 @@ Thread Events::
Thread messages added to a thread-proc.
+
-For example, the multithreaded preload-index code can be
+For example, the multi-threaded preload-index code can be
instrumented with a region around the thread pool and then
-per-thread start and exit events within the threadproc.
+per-thread start and exit events within the thread-proc.
+
----------------
static void *preload_thread(void *_data)
@@ -1214,11 +1214,11 @@ as each thread starts and allocates TLS storage.
There are a few issues to resolve before we can completely
switch to Trace2.
-* Updating existing tests that assume GIT_TRACE format messages.
+* Updating existing tests that assume `GIT_TRACE` format messages.
-* How to best handle custom GIT_TRACE_<key> messages?
+* How to best handle custom `GIT_TRACE_<key>` messages?
-** The GIT_TRACE_<key> mechanism allows each <key> to write to a
+** The `GIT_TRACE_<key>` mechanism allows each <key> to write to a
different file (in addition to just stderr).
** Do we want to maintain that ability or simply write to the existing
diff --git a/Documentation/technical/cruft-packs.txt b/Documentation/technical/cruft-packs.txt
new file mode 100644
index 0000000..d81f3a8
--- /dev/null
+++ b/Documentation/technical/cruft-packs.txt
@@ -0,0 +1,123 @@
+= Cruft packs
+
+The cruft packs feature offer an alternative to Git's traditional mechanism of
+removing unreachable objects. This document provides an overview of Git's
+pruning mechanism, and how a cruft pack can be used instead to accomplish the
+same.
+
+== Background
+
+To remove unreachable objects from your repository, Git offers `git repack -Ad`
+(see linkgit:git-repack[1]). Quoting from the documentation:
+
+[quote]
+[...] unreachable objects in a previous pack become loose, unpacked objects,
+instead of being left in the old pack. [...] loose unreachable objects will be
+pruned according to normal expiry rules with the next 'git gc' invocation.
+
+Unreachable objects aren't removed immediately, since doing so could race with
+an incoming push which may reference an object which is about to be deleted.
+Instead, those unreachable objects are stored as loose objects and stay that way
+until they are older than the expiration window, at which point they are removed
+by linkgit:git-prune[1].
+
+Git must store these unreachable objects loose in order to keep track of their
+per-object mtimes. If these unreachable objects were written into one big pack,
+then either freshening that pack (because an object contained within it was
+re-written) or creating a new pack of unreachable objects would cause the pack's
+mtime to get updated, and the objects within it would never leave the expiration
+window. Instead, objects are stored loose in order to keep track of the
+individual object mtimes and avoid a situation where all cruft objects are
+freshened at once.
+
+This can lead to undesirable situations when a repository contains many
+unreachable objects which have not yet left the grace period. Having large
+directories in the shards of `.git/objects` can lead to decreased performance in
+the repository. But given enough unreachable objects, this can lead to inode
+starvation and degrade the performance of the whole system. Since we
+can never pack those objects, these repositories often take up a large amount of
+disk space, since we can only zlib compress them, but not store them in delta
+chains.
+
+== Cruft packs
+
+A cruft pack eliminates the need for storing unreachable objects in a loose
+state by including the per-object mtimes in a separate file alongside a single
+pack containing all loose objects.
+
+A cruft pack is written by `git repack --cruft` when generating a new pack.
+linkgit:git-pack-objects[1]'s `--cruft` option. Note that `git repack --cruft`
+is a classic all-into-one repack, meaning that everything in the resulting pack is
+reachable, and everything else is unreachable. Once written, the `--cruft`
+option instructs `git repack` to generate another pack containing only objects
+not packed in the previous step (which equates to packing all unreachable
+objects together). This progresses as follows:
+
+ 1. Enumerate every object, marking any object which is (a) not contained in a
+ kept-pack, and (b) whose mtime is within the grace period as a traversal
+ tip.
+
+ 2. Perform a reachability traversal based on the tips gathered in the previous
+ step, adding every object along the way to the pack.
+
+ 3. Write the pack out, along with a `.mtimes` file that records the per-object
+ timestamps.
+
+This mode is invoked internally by linkgit:git-repack[1] when instructed to
+write a cruft pack. Crucially, the set of in-core kept packs is exactly the set
+of packs which will not be deleted by the repack; in other words, they contain
+all of the repository's reachable objects.
+
+When a repository already has a cruft pack, `git repack --cruft` typically only
+adds objects to it. An exception to this is when `git repack` is given the
+`--cruft-expiration` option, which allows the generated cruft pack to omit
+expired objects instead of waiting for linkgit:git-gc[1] to expire those objects
+later on.
+
+It is linkgit:git-gc[1] that is typically responsible for removing expired
+unreachable objects.
+
+== Caution for mixed-version environments
+
+Repositories that have cruft packs in them will continue to work with any older
+version of Git. Note, however, that previous versions of Git which do not
+understand the `.mtimes` file will use the cruft pack's mtime as the mtime for
+all of the objects in it. In other words, do not expect older (pre-cruft pack)
+versions of Git to interpret or even read the contents of the `.mtimes` file.
+
+Note that having mixed versions of Git GC-ing the same repository can lead to
+unreachable objects never being completely pruned. This can happen under the
+following circumstances:
+
+ - An older version of Git running GC explodes the contents of an existing
+ cruft pack loose, using the cruft pack's mtime.
+ - A newer version running GC collects those loose objects into a cruft pack,
+ where the .mtime file reflects the loose object's actual mtimes, but the
+ cruft pack mtime is "now".
+
+Repeating this process will lead to unreachable objects not getting pruned as a
+result of repeatedly resetting the objects' mtimes to the present time.
+
+If you are GC-ing repositories in a mixed version environment, consider omitting
+the `--cruft` option when using linkgit:git-repack[1] and linkgit:git-gc[1], and
+leaving the `gc.cruftPacks` configuration unset until all writers understand
+cruft packs.
+
+== Alternatives
+
+Notable alternatives to this design include:
+
+ - The location of the per-object mtime data, and
+ - Storing unreachable objects in multiple cruft packs.
+
+On the location of mtime data, a new auxiliary file tied to the pack was chosen
+to avoid complicating the `.idx` format. If the `.idx` format were ever to gain
+support for optional chunks of data, it may make sense to consolidate the
+`.mtimes` format into the `.idx` itself.
+
+Storing unreachable objects among multiple cruft packs (e.g., creating a new
+cruft pack during each repacking operation including only unreachable objects
+which aren't already stored in an earlier cruft pack) is significantly more
+complicated to construct, and so aren't pursued here. The obvious drawback to
+the current implementation is that the entire cruft pack must be re-written from
+scratch.
diff --git a/Documentation/technical/pack-format.txt b/Documentation/technical/pack-format.txt
index 6d3efb7..b520aa9 100644
--- a/Documentation/technical/pack-format.txt
+++ b/Documentation/technical/pack-format.txt
@@ -294,6 +294,25 @@ Pack file entry: <+
All 4-byte numbers are in network order.
+== pack-*.mtimes files have the format:
+
+All 4-byte numbers are in network byte order.
+
+ - A 4-byte magic number '0x4d544d45' ('MTME').
+
+ - A 4-byte version identifier (= 1).
+
+ - A 4-byte hash function identifier (= 1 for SHA-1, 2 for SHA-256).
+
+ - A table of 4-byte unsigned integers. The ith value is the
+ modification time (mtime) of the ith object in the corresponding
+ pack by lexicographic (index) order. The mtimes count standard
+ epoch seconds.
+
+ - A trailer, containing a checksum of the corresponding packfile,
+ and a checksum of all of the above (each having length according
+ to the specified hash function).
+
== multi-pack-index (MIDX) files have the following format:
The multi-pack-index files refer to multiple pack-files and loose objects.
diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
index fdcebd2..120af37 100755
--- a/GIT-VERSION-GEN
+++ b/GIT-VERSION-GEN
@@ -1,7 +1,7 @@
#!/bin/sh
GVF=GIT-VERSION-FILE
-DEF_VER=v2.36.0
+DEF_VER=v2.37.0-rc2
LF='
'
diff --git a/Makefile b/Makefile
index f8bccfa..04d0fd1 100644
--- a/Makefile
+++ b/Makefile
@@ -477,8 +477,14 @@ include shared.mak
#
# If your platform supports a built-in fsmonitor backend, set
# FSMONITOR_DAEMON_BACKEND to the "<name>" of the corresponding
-# `compat/fsmonitor/fsm-listen-<name>.c` that implements the
-# `fsm_listen__*()` routines.
+# `compat/fsmonitor/fsm-listen-<name>.c` and
+# `compat/fsmonitor/fsm-health-<name>.c` files
+# that implement the `fsm_listen__*()` and `fsm_health__*()` routines.
+#
+# If your platform has OS-specific ways to tell if a repo is incompatible with
+# fsmonitor (whether the hook or IPC daemon version), set FSMONITOR_OS_SETTINGS
+# to the "<name>" of the corresponding `compat/fsmonitor/fsm-settings-<name>.c`
+# that implements the `fsm_os_settings__*()` routines.
#
# Define DEVELOPER to enable more compiler warnings. Compiler version
# and family are auto detected, but could be overridden by defining
@@ -569,7 +575,9 @@ INSTALL = install
TCL_PATH = tclsh
TCLTK_PATH = wish
XGETTEXT = xgettext
+MSGCAT = msgcat
MSGFMT = msgfmt
+MSGMERGE = msgmerge
CURL_CONFIG = curl-config
GCOV = gcov
STRIP = strip
@@ -728,6 +736,7 @@ TEST_BUILTINS_OBJS += test-getcwd.o
TEST_BUILTINS_OBJS += test-hash-speed.o
TEST_BUILTINS_OBJS += test-hash.o
TEST_BUILTINS_OBJS += test-hashmap.o
+TEST_BUILTINS_OBJS += test-hexdump.o
TEST_BUILTINS_OBJS += test-index-version.o
TEST_BUILTINS_OBJS += test-json-writer.o
TEST_BUILTINS_OBJS += test-lazy-init-name-hash.o
@@ -738,6 +747,7 @@ TEST_BUILTINS_OBJS += test-oid-array.o
TEST_BUILTINS_OBJS += test-oidmap.o
TEST_BUILTINS_OBJS += test-oidtree.o
TEST_BUILTINS_OBJS += test-online-cpus.o
+TEST_BUILTINS_OBJS += test-pack-mtimes.o
TEST_BUILTINS_OBJS += test-parse-options.o
TEST_BUILTINS_OBJS += test-parse-pathspec-file.o
TEST_BUILTINS_OBJS += test-partial-clone.o
@@ -844,7 +854,7 @@ generated-hdrs: $(GENERATED_H)
## Exhaustive lists of our source files, either dynamically generated,
## or hardcoded.
SOURCES_CMD = ( \
- git ls-files \
+ git ls-files --deduplicate \
'*.[hcS]' \
'*.sh' \
':!*[tp][0-9][0-9][0-9][0-9]*' \
@@ -855,12 +865,13 @@ SOURCES_CMD = ( \
-o \( -name '[tp][0-9][0-9][0-9][0-9]*' -prune \) \
-o \( -name contrib -type d -prune \) \
-o \( -name build -type d -prune \) \
+ -o \( -name .build -type d -prune \) \
-o \( -name 'trash*' -type d -prune \) \
-o \( -name '*.[hcS]' -type f -print \) \
-o \( -name '*.sh' -type f -print \) \
| sed -e 's|^\./||' \
)
-FOUND_SOURCE_FILES := $(shell $(SOURCES_CMD))
+FOUND_SOURCE_FILES := $(filter-out $(GENERATED_H),$(shell $(SOURCES_CMD)))
FOUND_C_SOURCES = $(filter %.c,$(FOUND_SOURCE_FILES))
FOUND_H_SOURCES = $(filter %.h,$(FOUND_SOURCE_FILES))
@@ -993,6 +1004,7 @@ LIB_OBJS += oidtree.o
LIB_OBJS += pack-bitmap-write.o
LIB_OBJS += pack-bitmap.o
LIB_OBJS += pack-check.o
+LIB_OBJS += pack-mtimes.o
LIB_OBJS += pack-objects.o
LIB_OBJS += pack-revindex.o
LIB_OBJS += pack-write.o
@@ -1267,8 +1279,9 @@ PTHREAD_CFLAGS =
SPARSE_FLAGS ?= -std=gnu99
SP_EXTRA_FLAGS = -Wno-universal-initializer
-# For informing GIT-BUILD-OPTIONS of the SANITIZE=leak target
+# For informing GIT-BUILD-OPTIONS of the SANITIZE=leak,address targets
SANITIZE_LEAK =
+SANITIZE_ADDRESS =
# For the 'coccicheck' target; setting SPATCH_BATCH_SIZE higher will
# usually result in less CPU usage at the cost of higher peak memory.
@@ -1314,6 +1327,7 @@ SANITIZE_LEAK = YesCompiledWithIt
endif
ifneq ($(filter address,$(SANITIZERS)),)
NO_REGEX = NeededForASAN
+SANITIZE_ADDRESS = YesCompiledWithIt
endif
endif
@@ -2005,6 +2019,12 @@ endif
ifdef FSMONITOR_DAEMON_BACKEND
COMPAT_CFLAGS += -DHAVE_FSMONITOR_DAEMON_BACKEND
COMPAT_OBJS += compat/fsmonitor/fsm-listen-$(FSMONITOR_DAEMON_BACKEND).o
+ COMPAT_OBJS += compat/fsmonitor/fsm-health-$(FSMONITOR_DAEMON_BACKEND).o
+endif
+
+ifdef FSMONITOR_OS_SETTINGS
+ COMPAT_CFLAGS += -DHAVE_FSMONITOR_OS_SETTINGS
+ COMPAT_OBJS += compat/fsmonitor/fsm-settings-$(FSMONITOR_OS_SETTINGS).o
endif
ifeq ($(TCLTK_PATH),)
@@ -2706,17 +2726,18 @@ XGETTEXT_FLAGS = \
--force-po \
--add-comments=TRANSLATORS: \
--msgid-bugs-address="Git Mailing List <git@vger.kernel.org>" \
- --from-code=UTF-8
+ --package-name=Git
XGETTEXT_FLAGS_C = $(XGETTEXT_FLAGS) --language=C \
--keyword=_ --keyword=N_ --keyword="Q_:1,2"
XGETTEXT_FLAGS_SH = $(XGETTEXT_FLAGS) --language=Shell \
--keyword=gettextln --keyword=eval_gettextln
XGETTEXT_FLAGS_PERL = $(XGETTEXT_FLAGS) --language=Perl \
--keyword=__ --keyword=N__ --keyword="__n:1,2"
-LOCALIZED_C = $(C_OBJ:o=c) $(LIB_H) $(GENERATED_H)
-LOCALIZED_SH = $(SCRIPT_SH)
-LOCALIZED_SH += git-sh-setup.sh
-LOCALIZED_PERL = $(SCRIPT_PERL)
+MSGMERGE_FLAGS = --add-location --backup=off --update
+LOCALIZED_C = $(sort $(FOUND_C_SOURCES) $(FOUND_H_SOURCES) $(SCALAR_SOURCES) \
+ $(GENERATED_H))
+LOCALIZED_SH = $(sort $(SCRIPT_SH) git-sh-setup.sh)
+LOCALIZED_PERL = $(sort $(SCRIPT_PERL))
ifdef XGETTEXT_INCLUDE_TESTS
LOCALIZED_C += t/t0200/test.c
@@ -2724,38 +2745,129 @@ LOCALIZED_SH += t/t0200/test.sh
LOCALIZED_PERL += t/t0200/test.perl
endif
-## Note that this is meant to be run only by the localization coordinator
-## under a very controlled condition, i.e. (1) it is to be run in a
-## Git repository (not a tarball extract), (2) any local modifications
-## will be lost.
+## We generate intermediate .build/pot/po/%.po files containing a
+## extract of the translations we find in each file in the source
+## tree. We will assemble them using msgcat to create the final
+## "po/git.pot" file.
+LOCALIZED_ALL_GEN_PO =
+
+LOCALIZED_C_GEN_PO = $(LOCALIZED_C:%=.build/pot/po/%.po)
+LOCALIZED_ALL_GEN_PO += $(LOCALIZED_C_GEN_PO)
+
+LOCALIZED_SH_GEN_PO = $(LOCALIZED_SH:%=.build/pot/po/%.po)
+LOCALIZED_ALL_GEN_PO += $(LOCALIZED_SH_GEN_PO)
+
+LOCALIZED_PERL_GEN_PO = $(LOCALIZED_PERL:%=.build/pot/po/%.po)
+LOCALIZED_ALL_GEN_PO += $(LOCALIZED_PERL_GEN_PO)
+
## Gettext tools cannot work with our own custom PRItime type, so
## we replace PRItime with PRIuMAX. We need to update this to
## PRIdMAX if we switch to a signed type later.
+$(LOCALIZED_C_GEN_PO): .build/pot/po/%.po: %
+ $(call mkdir_p_parent_template)
+ $(QUIET_XGETTEXT) \
+ if grep -q PRItime $<; then \
+ (\
+ sed -e 's|PRItime|PRIuMAX|g' <$< \
+ >.build/pot/po/$< && \
+ cd .build/pot/po && \
+ $(XGETTEXT) --omit-header \
+ -o $(@:.build/pot/po/%=%) \
+ $(XGETTEXT_FLAGS_C) $< && \
+ rm $<; \
+ ); \
+ else \
+ $(XGETTEXT) --omit-header \
+ -o $@ $(XGETTEXT_FLAGS_C) $<; \
+ fi
-po/git.pot: $(GENERATED_H) FORCE
- # All modifications will be reverted at the end, so we do not
- # want to have any local change.
- git diff --quiet HEAD && git diff --quiet --cached
+$(LOCALIZED_SH_GEN_PO): .build/pot/po/%.po: %
+ $(call mkdir_p_parent_template)
+ $(QUIET_XGETTEXT)$(XGETTEXT) --omit-header \
+ -o$@ $(XGETTEXT_FLAGS_SH) $<
- @for s in $(LOCALIZED_C) $(LOCALIZED_SH) $(LOCALIZED_PERL); \
- do \
- sed -e 's|PRItime|PRIuMAX|g' <"$$s" >"$$s+" && \
- cat "$$s+" >"$$s" && rm "$$s+"; \
- done
+$(LOCALIZED_PERL_GEN_PO): .build/pot/po/%.po: %
+ $(call mkdir_p_parent_template)
+ $(QUIET_XGETTEXT)$(XGETTEXT) --omit-header \
+ -o$@ $(XGETTEXT_FLAGS_PERL) $<
+
+define gen_pot_header
+$(XGETTEXT) $(XGETTEXT_FLAGS_C) \
+ -o - /dev/null | \
+sed -e 's|charset=CHARSET|charset=UTF-8|' \
+ -e 's|\(Last-Translator: \)FULL NAME <.*>|\1make by the Makefile|' \
+ -e 's|\(Language-Team: \)LANGUAGE <.*>|\1Git Mailing List <git@vger.kernel.org>|' \
+ >$@ && \
+echo '"Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\\n"' >>$@
+endef
- $(QUIET_XGETTEXT)$(XGETTEXT) -o$@+ $(XGETTEXT_FLAGS_C) $(LOCALIZED_C)
- $(QUIET_XGETTEXT)$(XGETTEXT) -o$@+ --join-existing $(XGETTEXT_FLAGS_SH) \
- $(LOCALIZED_SH)
- $(QUIET_XGETTEXT)$(XGETTEXT) -o$@+ --join-existing $(XGETTEXT_FLAGS_PERL) \
- $(LOCALIZED_PERL)
+.build/pot/git.header: $(LOCALIZED_ALL_GEN_PO)
+ $(call mkdir_p_parent_template)
+ $(QUIET_GEN)$(gen_pot_header)
- # Reverting the munged source, leaving only the updated $@
- git reset --hard
- mv $@+ $@
+po/git.pot: .build/pot/git.header $(LOCALIZED_ALL_GEN_PO)
+ $(QUIET_GEN)$(MSGCAT) $^ >$@
.PHONY: pot
pot: po/git.pot
+define check_po_file_envvar
+ $(if $(PO_FILE), \
+ $(if $(filter po/%.po,$(PO_FILE)), , \
+ $(error PO_FILE should match pattern: "po/%.po")), \
+ $(error PO_FILE is not defined))
+endef
+
+.PHONY: po-update
+po-update: po/git.pot
+ $(check_po_file_envvar)
+ @if test ! -e $(PO_FILE); then \
+ echo >&2 "error: $(PO_FILE) does not exist"; \
+ echo >&2 'To create an initial po file, use: "make po-init PO_FILE=po/XX.po"'; \
+ exit 1; \
+ fi
+ $(QUIET_MSGMERGE)$(MSGMERGE) $(MSGMERGE_FLAGS) $(PO_FILE) po/git.pot
+
+.PHONY: check-pot
+check-pot: $(LOCALIZED_ALL_GEN_PO)
+
+### TODO FIXME: Translating everything in these files is a bad
+### heuristic for "core", as we'll translate obscure error() messages
+### along with commonly seen i18n messages. A better heuristic would
+### be to e.g. use spatch to first remove error/die/warning
+### etc. messages.
+LOCALIZED_C_CORE =
+LOCALIZED_C_CORE += builtin/checkout.c
+LOCALIZED_C_CORE += builtin/clone.c
+LOCALIZED_C_CORE += builtin/index-pack.c
+LOCALIZED_C_CORE += builtin/push.c
+LOCALIZED_C_CORE += builtin/reset.c
+LOCALIZED_C_CORE += remote.c
+LOCALIZED_C_CORE += wt-status.c
+
+LOCALIZED_C_CORE_GEN_PO = $(LOCALIZED_C_CORE:%=.build/pot/po/%.po)
+
+.build/pot/git-core.header: $(LOCALIZED_C_CORE_GEN_PO)
+ $(call mkdir_p_parent_template)
+ $(QUIET_GEN)$(gen_pot_header)
+
+po/git-core.pot: .build/pot/git-core.header $(LOCALIZED_C_CORE_GEN_PO)
+ $(QUIET_GEN)$(MSGCAT) $^ >$@
+
+.PHONY: po-init
+po-init: po/git-core.pot
+ $(check_po_file_envvar)
+ @if test -e $(PO_FILE); then \
+ echo >&2 "error: $(PO_FILE) exists already"; \
+ exit 1; \
+ fi
+ $(QUIET_MSGINIT)msginit \
+ --input=$< \
+ --output=$(PO_FILE) \
+ --no-translator \
+ --locale=$(PO_FILE:po/%.po=%)
+
+## po/*.po files & their rules
ifdef NO_GETTEXT
POFILES :=
MOFILES :=
@@ -2862,10 +2974,14 @@ GIT-BUILD-OPTIONS: FORCE
@echo PAGER_ENV=\''$(subst ','\'',$(subst ','\'',$(PAGER_ENV)))'\' >>$@+
@echo DC_SHA1=\''$(subst ','\'',$(subst ','\'',$(DC_SHA1)))'\' >>$@+
@echo SANITIZE_LEAK=\''$(subst ','\'',$(subst ','\'',$(SANITIZE_LEAK)))'\' >>$@+
+ @echo SANITIZE_ADDRESS=\''$(subst ','\'',$(subst ','\'',$(SANITIZE_ADDRESS)))'\' >>$@+
@echo X=\'$(X)\' >>$@+
ifdef FSMONITOR_DAEMON_BACKEND
@echo FSMONITOR_DAEMON_BACKEND=\''$(subst ','\'',$(subst ','\'',$(FSMONITOR_DAEMON_BACKEND)))'\' >>$@+
endif
+ifdef FSMONITOR_OS_SETTINGS
+ @echo FSMONITOR_OS_SETTINGS=\''$(subst ','\'',$(subst ','\'',$(FSMONITOR_OS_SETTINGS)))'\' >>$@+
+endif
ifdef TEST_OUTPUT_DIRECTORY
@echo TEST_OUTPUT_DIRECTORY=\''$(subst ','\'',$(subst ','\'',$(TEST_OUTPUT_DIRECTORY)))'\' >>$@+
endif
@@ -3290,6 +3406,8 @@ cocciclean:
$(RM) contrib/coccinelle/*.cocci.patch*
clean: profile-clean coverage-clean cocciclean
+ $(RM) -r .build
+ $(RM) po/git.pot po/git-core.pot
$(RM) *.res
$(RM) $(OBJECTS)
$(RM) $(LIB_FILE) $(XDIFF_LIB) $(REFTABLE_LIB) $(REFTABLE_TEST_LIB)
@@ -3407,6 +3525,7 @@ coverage-clean-results:
$(RM) coverage-untested-functions
$(RM) -r cover_db/
$(RM) -r cover_db_html/
+ $(RM) coverage-test.made
coverage-clean: coverage-clean-results
$(RM) $(addsuffix *.gcno,$(object_dirs))
@@ -3421,13 +3540,17 @@ coverage-compile:
coverage-test: coverage-clean-results coverage-compile
$(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" \
DEFAULT_TEST_TARGET=test -j1 test
+ touch coverage-test.made
+
+coverage-test.made:
+ $(MAKE) coverage-test
coverage-prove: coverage-clean-results coverage-compile
$(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" \
DEFAULT_TEST_TARGET=prove GIT_PROVE_OPTS="$(GIT_PROVE_OPTS) -j1" \
-j1 test
-coverage-report:
+coverage-report: coverage-test.made
$(QUIET_GCOV)for dir in $(object_dirs); do \
$(GCOV) $(GCOVFLAGS) --object-directory=$$dir $$dir*.c || exit; \
done
diff --git a/RelNotes b/RelNotes
index 8105226..51144b6 120000
--- a/RelNotes
+++ b/RelNotes
@@ -1 +1 @@
-Documentation/RelNotes/2.36.0.txt \ No newline at end of file
+Documentation/RelNotes/2.37.0.txt \ No newline at end of file
diff --git a/add-interactive.c b/add-interactive.c
index 7247210..6047e8f 100644
--- a/add-interactive.c
+++ b/add-interactive.c
@@ -568,8 +568,7 @@ static int get_modified_files(struct repository *r,
run_diff_files(&rev, 0);
}
- if (ps)
- clear_pathspec(&rev.prune_data);
+ release_revisions(&rev);
}
hashmap_clear_and_free(&s.file_map, struct pathname_entry, ent);
if (unmerged_count)
diff --git a/add-patch.c b/add-patch.c
index 55d719f..509ca04 100644
--- a/add-patch.c
+++ b/add-patch.c
@@ -305,7 +305,7 @@ static void setup_child_process(struct add_p_state *s,
va_end(ap);
cp->git_cmd = 1;
- strvec_pushf(&cp->env_array,
+ strvec_pushf(&cp->env,
INDEX_ENVIRONMENT "=%s", s->s.r->index_file);
}
diff --git a/alloc.c b/alloc.c
index 957a0af..27f697e 100644
--- a/alloc.c
+++ b/alloc.c
@@ -27,7 +27,6 @@ union any_object {
};
struct alloc_state {
- int count; /* total number of nodes allocated */
int nr; /* number of nodes left in current allocation */
void *p; /* first free node in current allocation */
@@ -63,7 +62,6 @@ static inline void *alloc_node(struct alloc_state *s, size_t node_size)
s->slabs[s->slab_nr++] = s->p;
}
s->nr--;
- s->count++;
ret = s->p;
s->p = (char *)s->p + node_size;
memset(ret, 0, node_size);
@@ -122,22 +120,3 @@ void *alloc_commit_node(struct repository *r)
init_commit_node(c);
return c;
}
-
-static void report(const char *name, unsigned int count, size_t size)
-{
- fprintf(stderr, "%10s: %8u (%"PRIuMAX" kB)\n",
- name, count, (uintmax_t) size);
-}
-
-#define REPORT(name, type) \
- report(#name, r->parsed_objects->name##_state->count, \
- r->parsed_objects->name##_state->count * sizeof(type) >> 10)
-
-void alloc_report(struct repository *r)
-{
- REPORT(blob, struct blob);
- REPORT(tree, struct tree);
- REPORT(commit, struct commit);
- REPORT(tag, struct tag);
- REPORT(object, union any_object);
-}
diff --git a/alloc.h b/alloc.h
index 371d388..3f4a0ad 100644
--- a/alloc.h
+++ b/alloc.h
@@ -13,7 +13,6 @@ void init_commit_node(struct commit *c);
void *alloc_commit_node(struct repository *r);
void *alloc_tag_node(struct repository *r);
void *alloc_object_node(struct repository *r);
-void alloc_report(struct repository *r);
struct alloc_state *allocate_alloc_state(void);
void clear_alloc_state(struct alloc_state *s);
diff --git a/apply.c b/apply.c
index d19c26d..2b7cd93 100644
--- a/apply.c
+++ b/apply.c
@@ -3274,11 +3274,11 @@ static struct patch *in_fn_table(struct apply_state *state, const char *name)
{
struct string_list_item *item;
- if (name == NULL)
+ if (!name)
return NULL;
item = string_list_lookup(&state->fn_table, name);
- if (item != NULL)
+ if (item)
return (struct patch *)item->util;
return NULL;
@@ -3318,7 +3318,7 @@ static void add_to_fn_table(struct apply_state *state, struct patch *patch)
* This should cover the cases for normal diffs,
* file creations and copies
*/
- if (patch->new_name != NULL) {
+ if (patch->new_name) {
item = string_list_insert(&state->fn_table, patch->new_name);
item->util = patch;
}
diff --git a/archive.c b/archive.c
index e29d0e0..d5109ab 100644
--- a/archive.c
+++ b/archive.c
@@ -9,6 +9,7 @@
#include "parse-options.h"
#include "unpack-trees.h"
#include "dir.h"
+#include "quote.h"
static char const * const archive_usage[] = {
N_("git archive [<options>] <tree-ish> [<path>...]"),
@@ -263,6 +264,7 @@ static int queue_or_write_archive_entry(const struct object_id *oid,
struct extra_file_info {
char *base;
struct stat stat;
+ void *content;
};
int write_archive_entries(struct archiver_args *args,
@@ -331,19 +333,27 @@ int write_archive_entries(struct archiver_args *args,
put_be64(fake_oid.hash, i + 1);
- strbuf_reset(&path_in_archive);
- if (info->base)
- strbuf_addstr(&path_in_archive, info->base);
- strbuf_addstr(&path_in_archive, basename(path));
-
- strbuf_reset(&content);
- if (strbuf_read_file(&content, path, info->stat.st_size) < 0)
- err = error_errno(_("cannot read '%s'"), path);
- else
- err = write_entry(args, &fake_oid, path_in_archive.buf,
- path_in_archive.len,
- info->stat.st_mode,
- content.buf, content.len);
+ if (!info->content) {
+ strbuf_reset(&path_in_archive);
+ if (info->base)
+ strbuf_addstr(&path_in_archive, info->base);
+ strbuf_addstr(&path_in_archive, basename(path));
+
+ strbuf_reset(&content);
+ if (strbuf_read_file(&content, path, info->stat.st_size) < 0)
+ err = error_errno(_("cannot read '%s'"), path);
+ else
+ err = write_entry(args, &fake_oid, path_in_archive.buf,
+ path_in_archive.len,
+ canon_mode(info->stat.st_mode),
+ content.buf, content.len);
+ } else {
+ err = write_entry(args, &fake_oid,
+ path, strlen(path),
+ canon_mode(info->stat.st_mode),
+ info->content, info->stat.st_size);
+ }
+
if (err)
break;
}
@@ -465,7 +475,7 @@ static void parse_treeish_arg(const char **argv,
}
tree = parse_tree_indirect(&oid);
- if (tree == NULL)
+ if (!tree)
die(_("not a tree object: %s"), oid_to_hex(&oid));
if (prefix) {
@@ -493,6 +503,7 @@ static void extra_file_info_clear(void *util, const char *str)
{
struct extra_file_info *info = util;
free(info->base);
+ free(info->content);
free(info);
}
@@ -514,14 +525,49 @@ static int add_file_cb(const struct option *opt, const char *arg, int unset)
if (!arg)
return -1;
- path = prefix_filename(args->prefix, arg);
- item = string_list_append_nodup(&args->extra_files, path);
- item->util = info = xmalloc(sizeof(*info));
+ info = xmalloc(sizeof(*info));
info->base = xstrdup_or_null(base);
- if (stat(path, &info->stat))
- die(_("File not found: %s"), path);
- if (!S_ISREG(info->stat.st_mode))
- die(_("Not a regular file: %s"), path);
+
+ if (!strcmp(opt->long_name, "add-file")) {
+ path = prefix_filename(args->prefix, arg);
+ if (stat(path, &info->stat))
+ die(_("File not found: %s"), path);
+ if (!S_ISREG(info->stat.st_mode))
+ die(_("Not a regular file: %s"), path);
+ info->content = NULL; /* read the file later */
+ } else if (!strcmp(opt->long_name, "add-virtual-file")) {
+ struct strbuf buf = STRBUF_INIT;
+ const char *p = arg;
+
+ if (*p != '"')
+ p = strchr(p, ':');
+ else if (unquote_c_style(&buf, p, &p) < 0)
+ die(_("unclosed quote: '%s'"), arg);
+
+ if (!p || *p != ':')
+ die(_("missing colon: '%s'"), arg);
+
+ if (p == arg)
+ die(_("empty file name: '%s'"), arg);
+
+ path = buf.len ?
+ strbuf_detach(&buf, NULL) : xstrndup(arg, p - arg);
+
+ if (args->prefix) {
+ char *save = path;
+ path = prefix_filename(args->prefix, path);
+ free(save);
+ }
+ memset(&info->stat, 0, sizeof(info->stat));
+ info->stat.st_mode = S_IFREG | 0644;
+ info->content = xstrdup(p + 1);
+ info->stat.st_size = strlen(info->content);
+ } else {
+ BUG("add_file_cb() called for %s", opt->long_name);
+ }
+ item = string_list_append_nodup(&args->extra_files, path);
+ item->util = info;
+
return 0;
}
@@ -554,6 +600,9 @@ static int parse_archive_args(int argc, const char **argv,
{ OPTION_CALLBACK, 0, "add-file", args, N_("file"),
N_("add untracked file to archive"), 0, add_file_cb,
(intptr_t)&base },
+ { OPTION_CALLBACK, 0, "add-virtual-file", args,
+ N_("path:content"), N_("add untracked file to archive"), 0,
+ add_file_cb, (intptr_t)&base },
OPT_STRING('o', "output", &output, N_("file"),
N_("write the archive to this file")),
OPT_BOOL(0, "worktree-attributes", &worktree_attributes,
diff --git a/bisect.c b/bisect.c
index 9e6a2b7..b63669c 100644
--- a/bisect.c
+++ b/bisect.c
@@ -884,6 +884,7 @@ static int check_ancestors(struct repository *r, int rev_nr,
/* Clean up objects used, as they will be reused. */
clear_commit_marks_many(rev_nr, rev, ALL_REV_FLAGS);
+ release_revisions(&revs);
return res;
}
@@ -964,6 +965,7 @@ static void show_diff_tree(struct repository *r,
setup_revisions(ARRAY_SIZE(argv) - 1, argv, &opt, NULL);
log_tree_commit(&opt, commit);
+ release_revisions(&opt);
}
/*
@@ -1008,7 +1010,7 @@ void read_bisect_terms(const char **read_bad, const char **read_good)
*/
enum bisect_error bisect_next_all(struct repository *r, const char *prefix)
{
- struct rev_info revs;
+ struct rev_info revs = REV_INFO_INIT;
struct commit_list *tried;
int reaches = 0, all = 0, nr, steps;
enum bisect_error res = BISECT_OK;
@@ -1033,7 +1035,7 @@ enum bisect_error bisect_next_all(struct repository *r, const char *prefix)
res = check_good_are_ancestors_of_bad(r, prefix, no_checkout);
if (res)
- return res;
+ goto cleanup;
bisect_rev_setup(r, &revs, prefix, "%s", "^%s", 1);
@@ -1058,14 +1060,16 @@ enum bisect_error bisect_next_all(struct repository *r, const char *prefix)
term_good,
term_bad);
- return BISECT_FAILED;
+ res = BISECT_FAILED;
+ goto cleanup;
}
if (!all) {
fprintf(stderr, _("No testable commit found.\n"
"Maybe you started with bad path arguments?\n"));
- return BISECT_NO_TESTABLE_COMMIT;
+ res = BISECT_NO_TESTABLE_COMMIT;
+ goto cleanup;
}
bisect_rev = &revs.commits->item->object.oid;
@@ -1085,7 +1089,8 @@ enum bisect_error bisect_next_all(struct repository *r, const char *prefix)
* for negative return values for early returns up
* until the cmd_bisect__helper() caller.
*/
- return BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND;
+ res = BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND;
+ goto cleanup;
}
nr = all - reaches - 1;
@@ -1104,7 +1109,10 @@ enum bisect_error bisect_next_all(struct repository *r, const char *prefix)
/* Clean up objects used, as they will be reused. */
repo_clear_commit_marks(r, ALL_REV_FLAGS);
- return bisect_checkout(bisect_rev, no_checkout);
+ res = bisect_checkout(bisect_rev, no_checkout);
+cleanup:
+ release_revisions(&revs);
+ return res;
}
static inline int log2i(int n)
diff --git a/bisect.h b/bisect.h
index 1015aeb..ee3fd65 100644
--- a/bisect.h
+++ b/bisect.h
@@ -62,6 +62,15 @@ enum bisect_error {
BISECT_INTERNAL_SUCCESS_MERGE_BASE = -11
};
+/*
+ * Stores how many good/bad commits we have stored for a bisect. nr_bad can
+ * only be 0 or 1.
+ */
+struct bisect_state {
+ unsigned int nr_good;
+ unsigned int nr_bad;
+};
+
enum bisect_error bisect_next_all(struct repository *r, const char *prefix);
int estimate_bisect_steps(int all);
diff --git a/blame.c b/blame.c
index 186ad96..da1052a 100644
--- a/blame.c
+++ b/blame.c
@@ -1072,7 +1072,7 @@ static struct blame_entry *blame_merge(struct blame_entry *list1,
if (p1->s_lno <= p2->s_lno) {
do {
tail = &p1->next;
- if ((p1 = *tail) == NULL) {
+ if (!(p1 = *tail)) {
*tail = p2;
return list1;
}
@@ -1082,7 +1082,7 @@ static struct blame_entry *blame_merge(struct blame_entry *list1,
*tail = p2;
do {
tail = &p2->next;
- if ((p2 = *tail) == NULL) {
+ if (!(p2 = *tail)) {
*tail = p1;
return list1;
}
@@ -1090,7 +1090,7 @@ static struct blame_entry *blame_merge(struct blame_entry *list1,
*tail = p1;
do {
tail = &p1->next;
- if ((p1 = *tail) == NULL) {
+ if (!(p1 = *tail)) {
*tail = p2;
return list1;
}
diff --git a/branch.c b/branch.c
index 01ecb81..4c8523c 100644
--- a/branch.c
+++ b/branch.c
@@ -44,9 +44,9 @@ static int find_tracked_branch(struct remote *remote, void *priv)
string_list_clear(tracking->srcs, 0);
break;
}
+ /* remote_find_tracking() searches by src if present */
tracking->spec.src = NULL;
}
-
return 0;
}
@@ -264,15 +264,23 @@ static void setup_tracking(const char *new_ref, const char *orig_ref,
if (!tracking.matches)
switch (track) {
+ /* If ref is not remote, still use local */
case BRANCH_TRACK_ALWAYS:
case BRANCH_TRACK_EXPLICIT:
case BRANCH_TRACK_OVERRIDE:
+ /* Remote matches not evaluated */
case BRANCH_TRACK_INHERIT:
break;
+ /* Otherwise, if no remote don't track */
default:
goto cleanup;
}
+ /*
+ * This check does not apply to BRANCH_TRACK_INHERIT;
+ * that supports multiple entries in tracking_srcs but
+ * leaves tracking.matches at 0.
+ */
if (tracking.matches > 1) {
int status = die_message(_("not tracking: ambiguous information for ref '%s'"),
orig_ref);
@@ -307,6 +315,21 @@ static void setup_tracking(const char *new_ref, const char *orig_ref,
exit(status);
}
+ if (track == BRANCH_TRACK_SIMPLE) {
+ /*
+ * Only track if remote branch name matches.
+ * Reaching into items[0].string is safe because
+ * we know there is at least one and not more than
+ * one entry (because only BRANCH_TRACK_INHERIT can
+ * produce more than one entry).
+ */
+ const char *tracked_branch;
+ if (!skip_prefix(tracking.srcs->items[0].string,
+ "refs/heads/", &tracked_branch) ||
+ strcmp(tracked_branch, new_ref))
+ return;
+ }
+
if (tracking.srcs->nr < 1)
string_list_append(tracking.srcs, orig_ref);
if (install_branch_config_multiple_remotes(config_flags, new_ref,
@@ -466,7 +489,7 @@ static void dwim_branch_start(struct repository *r, const char *start_name,
break;
}
- if ((commit = lookup_commit_reference(r, &oid)) == NULL)
+ if (!(commit = lookup_commit_reference(r, &oid)))
die(_("not a valid branch point: '%s'"), start_name);
if (out_real_ref) {
*out_real_ref = real_ref;
@@ -564,7 +587,7 @@ static int submodule_create_branch(struct repository *r,
child.err = -1;
child.stdout_to_stderr = 1;
- prepare_other_repo_env(&child.env_array, r->gitdir);
+ prepare_other_repo_env(&child.env, r->gitdir);
/*
* submodule_create_branch() is indirectly invoked by "git
* branch", but we cannot invoke "git branch" in the child
@@ -603,6 +626,8 @@ static int submodule_create_branch(struct repository *r,
/* Default for "git checkout". Do not pass --track. */
case BRANCH_TRACK_REMOTE:
/* Default for "git branch". Do not pass --track. */
+ case BRANCH_TRACK_SIMPLE:
+ /* Config-driven only. Do not pass --track. */
break;
}
@@ -653,7 +678,7 @@ void create_branches_recursively(struct repository *r, const char *name,
* be created in every submodule.
*/
for (i = 0; i < submodule_entry_list.entry_nr; i++) {
- if (submodule_entry_list.entries[i].repo == NULL) {
+ if (!submodule_entry_list.entries[i].repo) {
int code = die_message(
_("submodule '%s': unable to find submodule"),
submodule_entry_list.entries[i].submodule->name);
diff --git a/branch.h b/branch.h
index 04df2aa5..560b6b9 100644
--- a/branch.h
+++ b/branch.h
@@ -12,6 +12,7 @@ enum branch_track {
BRANCH_TRACK_EXPLICIT,
BRANCH_TRACK_OVERRIDE,
BRANCH_TRACK_INHERIT,
+ BRANCH_TRACK_SIMPLE,
};
extern enum branch_track git_branch_track;
diff --git a/builtin/add.c b/builtin/add.c
index 3ffb86a..f843729 100644
--- a/builtin/add.c
+++ b/builtin/add.c
@@ -141,8 +141,17 @@ int add_files_to_cache(const char *prefix,
rev.diffopt.format_callback_data = &data;
rev.diffopt.flags.override_submodule_config = 1;
rev.max_count = 0; /* do not compare unmerged paths with stage #2 */
+
+ /*
+ * Use an ODB transaction to optimize adding multiple objects.
+ * This function is invoked from commands other than 'add', which
+ * may not have their own transaction active.
+ */
+ begin_odb_transaction();
run_diff_files(&rev, DIFF_RACY_IS_MODIFIED);
- clear_pathspec(&rev.prune_data);
+ end_odb_transaction();
+
+ release_revisions(&rev);
return !!data.add_errors;
}
@@ -236,17 +245,12 @@ int run_add_interactive(const char *revision, const char *patch_mode,
int use_builtin_add_i =
git_env_bool("GIT_TEST_ADD_I_USE_BUILTIN", -1);
- if (use_builtin_add_i < 0) {
- int experimental;
- if (!git_config_get_bool("add.interactive.usebuiltin",
- &use_builtin_add_i))
- ; /* ok */
- else if (!git_config_get_bool("feature.experimental", &experimental) &&
- experimental)
- use_builtin_add_i = 1;
- }
+ if (use_builtin_add_i < 0 &&
+ git_config_get_bool("add.interactive.usebuiltin",
+ &use_builtin_add_i))
+ use_builtin_add_i = 1;
- if (use_builtin_add_i == 1) {
+ if (use_builtin_add_i != 0) {
enum add_p_mode mode;
if (!patch_mode)
@@ -340,6 +344,7 @@ static int edit_patch(int argc, const char **argv, const char *prefix)
unlink(file);
free(file);
+ release_revisions(&rev);
return 0;
}
@@ -670,7 +675,7 @@ int cmd_add(int argc, const char **argv, const char *prefix)
string_list_clear(&only_match_skip_worktree, 0);
}
- plug_bulk_checkin();
+ begin_odb_transaction();
if (add_renormalize)
exit_status |= renormalize_tracked_files(&pathspec, flags);
@@ -682,7 +687,7 @@ int cmd_add(int argc, const char **argv, const char *prefix)
if (chmod_arg && pathspec.nr)
exit_status |= chmod_pathspec(&pathspec, chmod_arg[0], show_only);
- unplug_bulk_checkin();
+ end_odb_transaction();
finish:
if (write_locked_index(&the_index, &lock_file,
diff --git a/builtin/am.c b/builtin/am.c
index 0f4111b..93bec62 100644
--- a/builtin/am.c
+++ b/builtin/am.c
@@ -1397,6 +1397,7 @@ static void write_commit_patch(const struct am_state *state, struct commit *comm
add_pending_object(&rev_info, &commit->object, "");
diff_setup_done(&rev_info.diffopt);
log_tree_commit(&rev_info, commit);
+ release_revisions(&rev_info);
}
/**
@@ -1429,6 +1430,7 @@ static void write_index_patch(const struct am_state *state)
add_pending_object(&rev_info, &tree->object, "");
diff_setup_done(&rev_info.diffopt);
run_diff_index(&rev_info, 1);
+ release_revisions(&rev_info);
}
/**
@@ -1582,6 +1584,7 @@ static int fall_back_threeway(const struct am_state *state, const char *index_pa
add_pending_oid(&rev_info, "HEAD", &our_tree, 0);
diff_setup_done(&rev_info.diffopt);
run_diff_index(&rev_info, 1);
+ release_revisions(&rev_info);
}
if (run_apply(state, index_path))
diff --git a/builtin/apply.c b/builtin/apply.c
index 3f099b9..555219d 100644
--- a/builtin/apply.c
+++ b/builtin/apply.c
@@ -1,7 +1,6 @@
#include "cache.h"
#include "builtin.h"
#include "parse-options.h"
-#include "lockfile.h"
#include "apply.h"
static const char * const apply_usage[] = {
diff --git a/builtin/bisect--helper.c b/builtin/bisect--helper.c
index 8b2b259..8a052c7 100644
--- a/builtin/bisect--helper.c
+++ b/builtin/bisect--helper.c
@@ -329,12 +329,12 @@ static int check_and_set_terms(struct bisect_terms *terms, const char *cmd)
return 0;
}
-static int mark_good(const char *refname, const struct object_id *oid,
- int flag, void *cb_data)
+static int inc_nr(const char *refname, const struct object_id *oid,
+ int flag, void *cb_data)
{
- int *m_good = (int *)cb_data;
- *m_good = 0;
- return 1;
+ unsigned int *nr = (unsigned int *)cb_data;
+ (*nr)++;
+ return 0;
}
static const char need_bad_and_good_revision_warning[] =
@@ -384,23 +384,64 @@ static int decide_next(const struct bisect_terms *terms,
vocab_good, vocab_bad, vocab_good, vocab_bad);
}
-static int bisect_next_check(const struct bisect_terms *terms,
- const char *current_term)
+static void bisect_status(struct bisect_state *state,
+ const struct bisect_terms *terms)
{
- int missing_good = 1, missing_bad = 1;
char *bad_ref = xstrfmt("refs/bisect/%s", terms->term_bad);
char *good_glob = xstrfmt("%s-*", terms->term_good);
if (ref_exists(bad_ref))
- missing_bad = 0;
+ state->nr_bad = 1;
- for_each_glob_ref_in(mark_good, good_glob, "refs/bisect/",
- (void *) &missing_good);
+ for_each_glob_ref_in(inc_nr, good_glob, "refs/bisect/",
+ (void *) &state->nr_good);
free(good_glob);
free(bad_ref);
+}
- return decide_next(terms, current_term, missing_good, missing_bad);
+__attribute__((format (printf, 1, 2)))
+static void bisect_log_printf(const char *fmt, ...)
+{
+ struct strbuf buf = STRBUF_INIT;
+ va_list ap;
+
+ va_start(ap, fmt);
+ strbuf_vaddf(&buf, fmt, ap);
+ va_end(ap);
+
+ printf("%s", buf.buf);
+ append_to_file(git_path_bisect_log(), "# %s", buf.buf);
+
+ strbuf_release(&buf);
+}
+
+static void bisect_print_status(const struct bisect_terms *terms)
+{
+ struct bisect_state state = { 0 };
+
+ bisect_status(&state, terms);
+
+ /* If we had both, we'd already be started, and shouldn't get here. */
+ if (state.nr_good && state.nr_bad)
+ return;
+
+ if (!state.nr_good && !state.nr_bad)
+ bisect_log_printf(_("status: waiting for both good and bad commits\n"));
+ else if (state.nr_good)
+ bisect_log_printf(Q_("status: waiting for bad commit, %d good commit known\n",
+ "status: waiting for bad commit, %d good commits known\n",
+ state.nr_good), state.nr_good);
+ else
+ bisect_log_printf(_("status: waiting for good commit(s), bad commit known\n"));
+}
+
+static int bisect_next_check(const struct bisect_terms *terms,
+ const char *current_term)
+{
+ struct bisect_state state = { 0 };
+ bisect_status(&state, terms);
+ return decide_next(terms, current_term, !state.nr_good, !state.nr_bad);
}
static int get_terms(struct bisect_terms *terms)
@@ -433,7 +474,7 @@ static int bisect_terms(struct bisect_terms *terms, const char *option)
if (get_terms(terms))
return error(_("no terms defined"));
- if (option == NULL) {
+ if (!option) {
printf(_("Your current terms are %s for the old state\n"
"and %s for the new state.\n"),
terms->term_good, terms->term_bad);
@@ -555,6 +596,7 @@ static int bisect_skipped_commits(struct bisect_terms *terms)
reset_revision_walk();
strbuf_release(&commit_name);
+ release_revisions(&revs);
fclose(fp);
return 0;
}
@@ -606,8 +648,10 @@ static enum bisect_error bisect_next(struct bisect_terms *terms, const char *pre
static enum bisect_error bisect_auto_next(struct bisect_terms *terms, const char *prefix)
{
- if (bisect_next_check(terms, NULL))
+ if (bisect_next_check(terms, NULL)) {
+ bisect_print_status(terms);
return BISECT_OK;
+ }
return bisect_next(terms, prefix);
}
@@ -1041,6 +1085,7 @@ static enum bisect_error bisect_skip(struct bisect_terms *terms, const char **ar
oid_to_hex(&commit->object.oid));
reset_revision_walk();
+ release_revisions(&revs);
} else {
strvec_push(&argv_state, argv[i]);
}
diff --git a/builtin/blame.c b/builtin/blame.c
index 8d15b68..02e3942 100644
--- a/builtin/blame.c
+++ b/builtin/blame.c
@@ -898,6 +898,7 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
unsigned int range_i;
long anchor;
const int hexsz = the_hash_algo->hexsz;
+ long num_lines = 0;
setup_default_color_by_age();
git_config(git_blame_config, &output_option);
@@ -1129,7 +1130,10 @@ parse_done:
for (range_i = ranges.nr; range_i > 0; --range_i) {
const struct range *r = &ranges.ranges[range_i - 1];
ent = blame_entry_prepend(ent, r->start, r->end, o);
+ num_lines += (r->end - r->start);
}
+ if (!num_lines)
+ num_lines = sb.num_lines;
o->suspects = ent;
prio_queue_put(&sb.commits, o->commit);
@@ -1158,7 +1162,7 @@ parse_done:
sb.found_guilty_entry = &found_guilty_entry;
sb.found_guilty_entry_data = &pi;
if (show_progress)
- pi.progress = start_delayed_progress(_("Blaming lines"), sb.num_lines);
+ pi.progress = start_delayed_progress(_("Blaming lines"), num_lines);
assign_blame(&sb, opt);
@@ -1167,7 +1171,7 @@ parse_done:
if (!incremental)
setup_pager();
else
- return 0;
+ goto cleanup;
blame_sort_final(&sb);
@@ -1201,6 +1205,8 @@ parse_done:
printf("num commits: %d\n", sb.num_commits);
}
+cleanup:
cleanup_scoreboard(&sb);
+ release_revisions(&revs);
return 0;
}
diff --git a/builtin/checkout.c b/builtin/checkout.c
index 7976814..2eefda8 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -629,7 +629,7 @@ static void show_local_changes(struct object *head,
diff_setup_done(&rev.diffopt);
add_pending_object(&rev, head, NULL);
run_diff_index(&rev, 0);
- object_array_clear(&rev.pending);
+ release_revisions(&rev);
}
static void describe_detached_head(const char *msg, struct commit *commit)
@@ -834,7 +834,7 @@ static int merge_working_tree(const struct checkout_opts *opts,
if (ret)
return ret;
o.ancestor = old_branch_info->name;
- if (old_branch_info->name == NULL) {
+ if (!old_branch_info->name) {
strbuf_add_unique_abbrev(&old_commit_shortname,
&old_branch_info->commit->object.oid,
DEFAULT_ABBREV);
@@ -1082,6 +1082,7 @@ static void orphaned_commit_warning(struct commit *old_commit, struct commit *ne
/* Clean up objects used, as they will be reused. */
repo_clear_commit_marks(the_repository, ALL_REV_FLAGS);
+ release_revisions(&revs);
}
static int switch_branches(const struct checkout_opts *opts,
diff --git a/builtin/clone.c b/builtin/clone.c
index 5231656..89a91b0 100644
--- a/builtin/clone.c
+++ b/builtin/clone.c
@@ -1106,10 +1106,12 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
* apply the remote name provided by --origin only after this second
* call to git_config, to ensure it overrides all config-based values.
*/
- if (option_origin != NULL)
+ if (option_origin) {
+ free(remote_name);
remote_name = xstrdup(option_origin);
+ }
- if (remote_name == NULL)
+ if (!remote_name)
remote_name = xstrdup("origin");
if (!valid_remote_name(remote_name))
diff --git a/builtin/commit.c b/builtin/commit.c
index 009a1de..fcf9c85 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -861,7 +861,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
}
s->fp = fopen_for_writing(git_path_commit_editmsg());
- if (s->fp == NULL)
+ if (!s->fp)
die_errno(_("could not open '%s'"), git_path_commit_editmsg());
/* Ignore status.displayCommentPrefix: we do need comments in COMMIT_EDITMSG. */
@@ -1100,7 +1100,6 @@ static const char *find_author_by_nickname(const char *name)
struct rev_info revs;
struct commit *commit;
struct strbuf buf = STRBUF_INIT;
- struct string_list mailmap = STRING_LIST_INIT_NODUP;
const char *av[20];
int ac = 0;
@@ -1111,7 +1110,8 @@ static const char *find_author_by_nickname(const char *name)
av[++ac] = buf.buf;
av[++ac] = NULL;
setup_revisions(ac, av, &revs, NULL);
- revs.mailmap = &mailmap;
+ revs.mailmap = xmalloc(sizeof(struct string_list));
+ string_list_init_nodup(revs.mailmap);
read_mailmap(revs.mailmap);
if (prepare_revision_walk(&revs))
@@ -1122,7 +1122,7 @@ static const char *find_author_by_nickname(const char *name)
ctx.date_mode.type = DATE_NORMAL;
strbuf_release(&buf);
format_commit_message(commit, "%aN <%aE>", &buf, &ctx);
- clear_mailmap(&mailmap);
+ release_revisions(&revs);
return strbuf_detach(&buf, NULL);
}
die(_("--author '%s' is not 'Name <email>' and matches no existing author"), name);
@@ -1687,6 +1687,7 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
struct commit *current_head = NULL;
struct commit_extra_header *extra = NULL;
struct strbuf err = STRBUF_INIT;
+ int ret = 0;
if (argc == 2 && !strcmp(argv[1], "-h"))
usage_with_options(builtin_commit_usage, builtin_commit_options);
@@ -1721,8 +1722,9 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
running hooks, writing the trees, and interacting with the user. */
if (!prepare_to_commit(index_file, prefix,
current_head, &s, &author_ident)) {
+ ret = 1;
rollback_index_files();
- return 1;
+ goto cleanup;
}
/* Determine parents */
@@ -1820,7 +1822,6 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
rollback_index_files();
die(_("failed to write commit object"));
}
- strbuf_release(&author_ident);
free_commit_extra_headers(extra);
if (update_head_with_reflog(current_head, &oid, reflog_msg, &sb,
@@ -1862,7 +1863,9 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
apply_autostash(git_path_merge_autostash(the_repository));
+cleanup:
+ UNLEAK(author_ident);
UNLEAK(err);
UNLEAK(sb);
- return 0;
+ return ret;
}
diff --git a/builtin/describe.c b/builtin/describe.c
index 42159cd..a76f1a1 100644
--- a/builtin/describe.c
+++ b/builtin/describe.c
@@ -517,6 +517,7 @@ static void describe_blob(struct object_id oid, struct strbuf *dst)
traverse_commit_list(&revs, process_commit, process_object, &pcd);
reset_revision_walk();
+ release_revisions(&revs);
}
static void describe(const char *arg, int last_one)
@@ -667,6 +668,7 @@ int cmd_describe(int argc, const char **argv, const char *prefix)
suffix = NULL;
else
suffix = dirty;
+ release_revisions(&revs);
}
describe("HEAD", 1);
} else if (dirty) {
diff --git a/builtin/diff-files.c b/builtin/diff-files.c
index 70103c4..2bfaf9b 100644
--- a/builtin/diff-files.c
+++ b/builtin/diff-files.c
@@ -77,8 +77,12 @@ int cmd_diff_files(int argc, const char **argv, const char *prefix)
if (read_cache_preload(&rev.diffopt.pathspec) < 0) {
perror("read_cache_preload");
- return -1;
+ result = -1;
+ goto cleanup;
}
+cleanup:
result = run_diff_files(&rev, options);
- return diff_result_code(&rev.diffopt, result);
+ result = diff_result_code(&rev.diffopt, result);
+ release_revisions(&rev);
+ return result;
}
diff --git a/builtin/diff-index.c b/builtin/diff-index.c
index 5fd23ab..7d158af 100644
--- a/builtin/diff-index.c
+++ b/builtin/diff-index.c
@@ -70,6 +70,7 @@ int cmd_diff_index(int argc, const char **argv, const char *prefix)
return -1;
}
result = run_diff_index(&rev, option);
- UNLEAK(rev);
- return diff_result_code(&rev.diffopt, result);
+ result = diff_result_code(&rev.diffopt, result);
+ release_revisions(&rev);
+ return result;
}
diff --git a/builtin/diff-tree.c b/builtin/diff-tree.c
index 0e0ac1f..116097a 100644
--- a/builtin/diff-tree.c
+++ b/builtin/diff-tree.c
@@ -195,6 +195,7 @@ int cmd_diff_tree(int argc, const char **argv, const char *prefix)
int saved_dcctc = 0;
opt->diffopt.rotate_to_strict = 0;
+ opt->diffopt.no_free = 1;
if (opt->diffopt.detect_rename) {
if (!the_index.cache)
repo_read_index(the_repository);
@@ -217,6 +218,8 @@ int cmd_diff_tree(int argc, const char **argv, const char *prefix)
}
opt->diffopt.degraded_cc_to_c = saved_dcctc;
opt->diffopt.needed_rename_limit = saved_nrl;
+ opt->diffopt.no_free = 0;
+ diff_free(&opt->diffopt);
}
return diff_result_code(&opt->diffopt, 0);
diff --git a/builtin/diff.c b/builtin/diff.c
index bb7fafc..54bb3de 100644
--- a/builtin/diff.c
+++ b/builtin/diff.c
@@ -352,7 +352,7 @@ static void symdiff_prepare(struct rev_info *rev, struct symdiff *sym)
othercount++;
continue;
}
- if (map == NULL)
+ if (!map)
map = bitmap_new();
bitmap_set(map, i);
}
@@ -594,7 +594,7 @@ int cmd_diff(int argc, const char **argv, const char *prefix)
result = diff_result_code(&rev.diffopt, result);
if (1 < rev.diffopt.skip_stat_unmatch)
refresh_index_quietly();
- UNLEAK(rev);
+ release_revisions(&rev);
UNLEAK(ent);
UNLEAK(blob);
return result;
diff --git a/builtin/difftool.c b/builtin/difftool.c
index faa3507..b3c509b 100644
--- a/builtin/difftool.c
+++ b/builtin/difftool.c
@@ -217,7 +217,7 @@ static void changed_files(struct hashmap *result, const char *index_path,
update_index.use_shell = 0;
update_index.clean_on_exit = 1;
update_index.dir = workdir;
- strvec_pushf(&update_index.env_array, "GIT_INDEX_FILE=%s", index_path);
+ strvec_pushf(&update_index.env, "GIT_INDEX_FILE=%s", index_path);
/* Ignore any errors of update-index */
run_command(&update_index);
@@ -230,7 +230,7 @@ static void changed_files(struct hashmap *result, const char *index_path,
diff_files.clean_on_exit = 1;
diff_files.out = -1;
diff_files.dir = workdir;
- strvec_pushf(&diff_files.env_array, "GIT_INDEX_FILE=%s", index_path);
+ strvec_pushf(&diff_files.env, "GIT_INDEX_FILE=%s", index_path);
if (start_command(&diff_files))
die("could not obtain raw diff");
fp = xfdopen(diff_files.out, "r");
@@ -675,7 +675,7 @@ static int run_file_diff(int prompt, const char *prefix,
child->git_cmd = 1;
child->dir = prefix;
- strvec_pushv(&child->env_array, env);
+ strvec_pushv(&child->env, env);
return run_command(child);
}
diff --git a/builtin/fast-export.c b/builtin/fast-export.c
index a7d7269..e1748fb 100644
--- a/builtin/fast-export.c
+++ b/builtin/fast-export.c
@@ -1261,6 +1261,7 @@ int cmd_fast_export(int argc, const char **argv, const char *prefix)
revs.diffopt.format_callback = show_filemodify;
revs.diffopt.format_callback_data = &paths_of_changed_objects;
revs.diffopt.flags.recursive = 1;
+ revs.diffopt.no_free = 1;
while ((commit = get_revision(&revs)))
handle_commit(commit, &revs, &paths_of_changed_objects);
@@ -1275,6 +1276,7 @@ int cmd_fast_export(int argc, const char **argv, const char *prefix)
printf("done\n");
refspec_clear(&refspecs);
+ release_revisions(&revs);
return 0;
}
diff --git a/builtin/fast-import.c b/builtin/fast-import.c
index 28d3193..14113cf 100644
--- a/builtin/fast-import.c
+++ b/builtin/fast-import.c
@@ -3465,7 +3465,7 @@ static void git_pack_config(void)
pack_idx_opts.version = indexversion_value;
if (pack_idx_opts.version > 2)
git_die_config("pack.indexversion",
- "bad pack.indexversion=%"PRIu32, pack_idx_opts.version);
+ "bad pack.indexVersion=%"PRIu32, pack_idx_opts.version);
}
if (!git_config_get_ulong("pack.packsizelimit", &packsizelimit_value))
max_packsize = packsizelimit_value;
diff --git a/builtin/fetch.c b/builtin/fetch.c
index e3791f0..ac29c2b 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -1440,6 +1440,7 @@ static void check_not_current_branch(struct ref *ref_map,
const struct worktree *wt;
for (; ref_map; ref_map = ref_map->next)
if (ref_map->peer_ref &&
+ starts_with(ref_map->peer_ref->name, "refs/heads/") &&
(wt = find_shared_symref(worktrees, "HEAD",
ref_map->peer_ref->name)) &&
!wt->is_bare)
@@ -2187,6 +2188,10 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
else if (argc > 1)
die(_("fetch --all does not make sense with refspecs"));
(void) for_each_remote(get_one_remote_for_fetch, &list);
+
+ /* do not do fetch_multiple() of one */
+ if (list.nr == 1)
+ remote = remote_get(list.items[0].string);
} else if (argc == 0) {
/* No arguments -- use default remote */
remote = remote_get(NULL);
@@ -2261,7 +2266,17 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
result = fetch_multiple(&list, max_children);
}
- if (!result && (recurse_submodules != RECURSE_SUBMODULES_OFF)) {
+
+ /*
+ * This is only needed after fetch_one(), which does not fetch
+ * submodules by itself.
+ *
+ * When we fetch from multiple remotes, fetch_multiple() has
+ * already updated submodules to grab commits necessary for
+ * the fetched history from each remote, so there is no need
+ * to fetch submodules from here.
+ */
+ if (!result && remote && (recurse_submodules != RECURSE_SUBMODULES_OFF)) {
struct strvec options = STRVEC_INIT;
int max_children = max_jobs;
diff --git a/builtin/fsmonitor--daemon.c b/builtin/fsmonitor--daemon.c
index 46be55a..2c109cf 100644
--- a/builtin/fsmonitor--daemon.c
+++ b/builtin/fsmonitor--daemon.c
@@ -3,6 +3,7 @@
#include "parse-options.h"
#include "fsmonitor.h"
#include "fsmonitor-ipc.h"
+#include "compat/fsmonitor/fsm-health.h"
#include "compat/fsmonitor/fsm-listen.h"
#include "fsmonitor--daemon.h"
#include "simple-ipc.h"
@@ -1136,6 +1137,18 @@ void fsmonitor_publish(struct fsmonitor_daemon_state *state,
pthread_mutex_unlock(&state->main_lock);
}
+static void *fsm_health__thread_proc(void *_state)
+{
+ struct fsmonitor_daemon_state *state = _state;
+
+ trace2_thread_start("fsm-health");
+
+ fsm_health__loop(state);
+
+ trace2_thread_exit();
+ return NULL;
+}
+
static void *fsm_listen__thread_proc(void *_state)
{
struct fsmonitor_daemon_state *state = _state;
@@ -1174,6 +1187,9 @@ static int fsmonitor_run_daemon_1(struct fsmonitor_daemon_state *state)
*/
.uds_disallow_chdir = 0
};
+ int health_started = 0;
+ int listener_started = 0;
+ int err = 0;
/*
* Start the IPC thread pool before the we've started the file
@@ -1181,11 +1197,11 @@ static int fsmonitor_run_daemon_1(struct fsmonitor_daemon_state *state)
* before we need it.
*/
if (ipc_server_run_async(&state->ipc_server_data,
- fsmonitor_ipc__get_path(), &ipc_opts,
+ state->path_ipc.buf, &ipc_opts,
handle_client, state))
return error_errno(
_("could not start IPC thread pool on '%s'"),
- fsmonitor_ipc__get_path());
+ state->path_ipc.buf);
/*
* Start the fsmonitor listener thread to collect filesystem
@@ -1194,15 +1210,31 @@ static int fsmonitor_run_daemon_1(struct fsmonitor_daemon_state *state)
if (pthread_create(&state->listener_thread, NULL,
fsm_listen__thread_proc, state) < 0) {
ipc_server_stop_async(state->ipc_server_data);
- ipc_server_await(state->ipc_server_data);
+ err = error(_("could not start fsmonitor listener thread"));
+ goto cleanup;
+ }
+ listener_started = 1;
- return error(_("could not start fsmonitor listener thread"));
+ /*
+ * Start the health thread to watch over our process.
+ */
+ if (pthread_create(&state->health_thread, NULL,
+ fsm_health__thread_proc, state) < 0) {
+ ipc_server_stop_async(state->ipc_server_data);
+ err = error(_("could not start fsmonitor health thread"));
+ goto cleanup;
}
+ health_started = 1;
/*
* The daemon is now fully functional in background threads.
+ * Our primary thread should now just wait while the threads
+ * do all the work.
+ */
+cleanup:
+ /*
* Wait for the IPC thread pool to shutdown (whether by client
- * request or from filesystem activity).
+ * request, from filesystem activity, or an error).
*/
ipc_server_await(state->ipc_server_data);
@@ -1211,15 +1243,29 @@ static int fsmonitor_run_daemon_1(struct fsmonitor_daemon_state *state)
* event from the IPC thread pool, but it doesn't hurt to tell
* it again. And wait for it to shutdown.
*/
- fsm_listen__stop_async(state);
- pthread_join(state->listener_thread, NULL);
+ if (listener_started) {
+ fsm_listen__stop_async(state);
+ pthread_join(state->listener_thread, NULL);
+ }
- return state->error_code;
+ if (health_started) {
+ fsm_health__stop_async(state);
+ pthread_join(state->health_thread, NULL);
+ }
+
+ if (err)
+ return err;
+ if (state->listen_error_code)
+ return state->listen_error_code;
+ if (state->health_error_code)
+ return state->health_error_code;
+ return 0;
}
static int fsmonitor_run_daemon(void)
{
struct fsmonitor_daemon_state state;
+ const char *home;
int err;
memset(&state, 0, sizeof(state));
@@ -1227,7 +1273,8 @@ static int fsmonitor_run_daemon(void)
hashmap_init(&state.cookies, cookies_cmp, NULL, 0);
pthread_mutex_init(&state.main_lock, NULL);
pthread_cond_init(&state.cookies_cond, NULL);
- state.error_code = 0;
+ state.listen_error_code = 0;
+ state.health_error_code = 0;
state.current_token_data = fsmonitor_new_token_data();
/* Prepare to (recursively) watch the <worktree-root> directory. */
@@ -1290,6 +1337,15 @@ static int fsmonitor_run_daemon(void)
strbuf_addch(&state.path_cookie_prefix, '/');
/*
+ * We create a named-pipe or unix domain socket inside of the
+ * ".git" directory. (Well, on Windows, we base our named
+ * pipe in the NPFS on the absolute path of the git
+ * directory.)
+ */
+ strbuf_init(&state.path_ipc, 0);
+ strbuf_addstr(&state.path_ipc, absolute_path(fsmonitor_ipc__get_path()));
+
+ /*
* Confirm that we can create platform-specific resources for the
* filesystem listener before we bother starting all the threads.
*/
@@ -1298,18 +1354,42 @@ static int fsmonitor_run_daemon(void)
goto done;
}
+ if (fsm_health__ctor(&state)) {
+ err = error(_("could not initialize health thread"));
+ goto done;
+ }
+
+ /*
+ * CD out of the worktree root directory.
+ *
+ * The common Git startup mechanism causes our CWD to be the
+ * root of the worktree. On Windows, this causes our process
+ * to hold a locked handle on the CWD. This prevents the
+ * worktree from being moved or deleted while the daemon is
+ * running.
+ *
+ * We assume that our FS and IPC listener threads have either
+ * opened all of the handles that they need or will do
+ * everything using absolute paths.
+ */
+ home = getenv("HOME");
+ if (home && *home && chdir(home))
+ die_errno(_("could not cd home '%s'"), home);
+
err = fsmonitor_run_daemon_1(&state);
done:
pthread_cond_destroy(&state.cookies_cond);
pthread_mutex_destroy(&state.main_lock);
fsm_listen__dtor(&state);
+ fsm_health__dtor(&state);
ipc_server_free(state.ipc_server_data);
strbuf_release(&state.path_worktree_watch);
strbuf_release(&state.path_gitdir_watch);
strbuf_release(&state.path_cookie_prefix);
+ strbuf_release(&state.path_ipc);
return err;
}
@@ -1423,6 +1503,7 @@ static int try_to_start_background_daemon(void)
int cmd_fsmonitor__daemon(int argc, const char **argv, const char *prefix)
{
const char *subcmd;
+ enum fsmonitor_reason reason;
int detach_console = 0;
struct option options[] = {
@@ -1449,6 +1530,23 @@ int cmd_fsmonitor__daemon(int argc, const char **argv, const char *prefix)
die(_("invalid 'ipc-threads' value (%d)"),
fsmonitor__ipc_threads);
+ prepare_repo_settings(the_repository);
+ /*
+ * If the repo is fsmonitor-compatible, explicitly set IPC-mode
+ * (without bothering to load the `core.fsmonitor` config settings).
+ *
+ * If the repo is not compatible, the repo-settings will be set to
+ * incompatible rather than IPC, so we can use one of the __get
+ * routines to detect the discrepancy.
+ */
+ fsm_settings__set_ipc(the_repository);
+
+ reason = fsm_settings__get_reason(the_repository);
+ if (reason > FSMONITOR_REASON_OK)
+ die("%s",
+ fsm_settings__get_incompatible_msg(the_repository,
+ reason));
+
if (!strcmp(subcmd, "start"))
return !!try_to_start_background_daemon();
diff --git a/builtin/gc.c b/builtin/gc.c
index b335cff..021e925 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -42,6 +42,7 @@ static const char * const builtin_gc_usage[] = {
static int pack_refs = 1;
static int prune_reflogs = 1;
+static int cruft_packs = 0;
static int aggressive_depth = 50;
static int aggressive_window = 250;
static int gc_auto_threshold = 6700;
@@ -152,6 +153,7 @@ static void gc_config(void)
git_config_get_int("gc.auto", &gc_auto_threshold);
git_config_get_int("gc.autopacklimit", &gc_auto_pack_limit);
git_config_get_bool("gc.autodetach", &detach_auto);
+ git_config_get_bool("gc.cruftpacks", &cruft_packs);
git_config_get_expiry("gc.pruneexpire", &prune_expire);
git_config_get_expiry("gc.worktreepruneexpire", &prune_worktrees_expire);
git_config_get_expiry("gc.logexpiry", &gc_log_expire);
@@ -331,7 +333,11 @@ static void add_repack_all_option(struct string_list *keep_pack)
{
if (prune_expire && !strcmp(prune_expire, "now"))
strvec_push(&repack, "-a");
- else {
+ else if (cruft_packs) {
+ strvec_push(&repack, "--cruft");
+ if (prune_expire)
+ strvec_pushf(&repack, "--cruft-expiration=%s", prune_expire);
+ } else {
strvec_push(&repack, "-A");
if (prune_expire)
strvec_pushf(&repack, "--unpack-unreachable=%s", prune_expire);
@@ -446,7 +452,7 @@ static const char *lock_repo_for_gc(int force, pid_t* ret_pid)
fscanf(fp, scan_fmt, &pid, locking_host) == 2 &&
/* be gentle to concurrent "gc" on remote hosts */
(strcmp(locking_host, my_host) || !kill(pid, 0) || errno == EPERM);
- if (fp != NULL)
+ if (fp)
fclose(fp);
if (should_exit) {
if (fd >= 0)
@@ -551,6 +557,7 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
{ OPTION_STRING, 0, "prune", &prune_expire, N_("date"),
N_("prune unreferenced objects"),
PARSE_OPT_OPTARG, NULL, (intptr_t)prune_expire },
+ OPT_BOOL(0, "cruft", &cruft_packs, N_("pack unreferenced objects separately")),
OPT_BOOL(0, "aggressive", &aggressive, N_("be more thorough (increased runtime)")),
OPT_BOOL_F(0, "auto", &auto_gc, N_("enable auto-gc mode"),
PARSE_OPT_NOCOMPLETE),
@@ -574,7 +581,7 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
/* default expiry time, overwritten in gc_config */
gc_config();
if (parse_expiry_date(gc_log_expire, &gc_log_expire_time))
- die(_("failed to parse gc.logexpiry value %s"), gc_log_expire);
+ die(_("failed to parse gc.logExpiry value %s"), gc_log_expire);
if (pack_refs < 0)
pack_refs = !is_bare_repository();
@@ -670,6 +677,7 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
die(FAILED_RUN, repack.v[0]);
if (prune_expire) {
+ /* run `git prune` even if using cruft packs */
strvec_push(&prune, prune_expire);
if (quiet)
strvec_push(&prune, "--no-progress");
@@ -2238,7 +2246,7 @@ static int systemd_timer_write_unit_templates(const char *exec_path)
goto error;
}
file = fopen_or_warn(filename, "w");
- if (file == NULL)
+ if (!file)
goto error;
unit = "# This file was created and is maintained by Git.\n"
@@ -2267,7 +2275,7 @@ static int systemd_timer_write_unit_templates(const char *exec_path)
filename = xdg_config_home_systemd("git-maintenance@.service");
file = fopen_or_warn(filename, "w");
- if (file == NULL)
+ if (!file)
goto error;
unit = "# This file was created and is maintained by Git.\n"
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index 680b66b..6648f2d 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -1575,7 +1575,7 @@ static int git_index_pack_config(const char *k, const char *v, void *cb)
if (!strcmp(k, "pack.indexversion")) {
opts->version = git_config_int(k, v);
if (opts->version > 2)
- die(_("bad pack.indexversion=%"PRIu32), opts->version);
+ die(_("bad pack.indexVersion=%"PRIu32), opts->version);
return 0;
}
if (!strcmp(k, "pack.threads")) {
@@ -1942,11 +1942,11 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
free(objects);
strbuf_release(&index_name_buf);
strbuf_release(&rev_index_name_buf);
- if (pack_name == NULL)
+ if (!pack_name)
free((void *) curr_pack);
- if (index_name == NULL)
+ if (!index_name)
free((void *) curr_index);
- if (rev_index_name == NULL)
+ if (!rev_index_name)
free((void *) curr_rev_index);
/*
diff --git a/builtin/log.c b/builtin/log.c
index c211d66..88a5e98 100644
--- a/builtin/log.c
+++ b/builtin/log.c
@@ -231,7 +231,8 @@ static void cmd_log_init_finish(int argc, const char **argv, const char *prefix,
}
if (mailmap) {
- rev->mailmap = xcalloc(1, sizeof(struct string_list));
+ rev->mailmap = xmalloc(sizeof(struct string_list));
+ string_list_init_nodup(rev->mailmap);
read_mailmap(rev->mailmap);
}
@@ -294,6 +295,12 @@ static void cmd_log_init(int argc, const char **argv, const char *prefix,
cmd_log_init_finish(argc, argv, prefix, rev, opt);
}
+static int cmd_log_deinit(int ret, struct rev_info *rev)
+{
+ release_revisions(rev);
+ return ret;
+}
+
/*
* This gives a rough estimate for how many commits we
* will print out in the list.
@@ -417,7 +424,7 @@ static void finish_early_output(struct rev_info *rev)
show_early_header(rev, "done", n);
}
-static int cmd_log_walk(struct rev_info *rev)
+static int cmd_log_walk_no_free(struct rev_info *rev)
{
struct commit *commit;
int saved_nrl = 0;
@@ -444,7 +451,6 @@ static int cmd_log_walk(struct rev_info *rev)
* and HAS_CHANGES being accumulated in rev->diffopt, so be careful to
* retain that state information if replacing rev->diffopt in this loop
*/
- rev->diffopt.no_free = 1;
while ((commit = get_revision(rev)) != NULL) {
if (!log_tree_commit(rev, commit) && rev->max_count >= 0)
/*
@@ -469,8 +475,6 @@ static int cmd_log_walk(struct rev_info *rev)
}
rev->diffopt.degraded_cc_to_c = saved_dcctc;
rev->diffopt.needed_rename_limit = saved_nrl;
- rev->diffopt.no_free = 0;
- diff_free(&rev->diffopt);
if (rev->remerge_diff) {
tmp_objdir_destroy(rev->remerge_objdir);
@@ -484,6 +488,17 @@ static int cmd_log_walk(struct rev_info *rev)
return diff_result_code(&rev->diffopt, 0);
}
+static int cmd_log_walk(struct rev_info *rev)
+{
+ int retval;
+
+ rev->diffopt.no_free = 1;
+ retval = cmd_log_walk_no_free(rev);
+ rev->diffopt.no_free = 0;
+ diff_free(&rev->diffopt);
+ return retval;
+}
+
static int git_log_config(const char *var, const char *value, void *cb)
{
const char *slot_name;
@@ -557,7 +572,7 @@ int cmd_whatchanged(int argc, const char **argv, const char *prefix)
cmd_log_init(argc, argv, prefix, &rev, &opt);
if (!rev.diffopt.output_format)
rev.diffopt.output_format = DIFF_FORMAT_RAW;
- return cmd_log_walk(&rev);
+ return cmd_log_deinit(cmd_log_walk(&rev), &rev);
}
static void show_tagger(const char *buf, struct rev_info *rev)
@@ -661,6 +676,11 @@ int cmd_show(int argc, const char **argv, const char *prefix)
init_log_defaults();
git_config(git_log_config, NULL);
+ if (the_repository->gitdir) {
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+ }
+
memset(&match_all, 0, sizeof(match_all));
repo_init_revisions(the_repository, &rev, prefix);
git_config(grep_config, &rev.grep_filter);
@@ -676,10 +696,11 @@ int cmd_show(int argc, const char **argv, const char *prefix)
cmd_log_init(argc, argv, prefix, &rev, &opt);
if (!rev.no_walk)
- return cmd_log_walk(&rev);
+ return cmd_log_deinit(cmd_log_walk(&rev), &rev);
count = rev.pending.nr;
objects = rev.pending.objects;
+ rev.diffopt.no_free = 1;
for (i = 0; i < count && !ret; i++) {
struct object *o = objects[i].item;
const char *name = objects[i].name;
@@ -725,14 +746,17 @@ int cmd_show(int argc, const char **argv, const char *prefix)
rev.pending.nr = rev.pending.alloc = 0;
rev.pending.objects = NULL;
add_object_array(o, name, &rev.pending);
- ret = cmd_log_walk(&rev);
+ ret = cmd_log_walk_no_free(&rev);
break;
default:
ret = error(_("unknown type: %d"), o->type);
}
}
- free(objects);
- return ret;
+
+ rev.diffopt.no_free = 0;
+ diff_free(&rev.diffopt);
+
+ return cmd_log_deinit(ret, &rev);
}
/*
@@ -760,7 +784,7 @@ int cmd_log_reflog(int argc, const char **argv, const char *prefix)
rev.always_show_header = 1;
cmd_log_init_finish(argc, argv, prefix, &rev, &opt);
- return cmd_log_walk(&rev);
+ return cmd_log_deinit(cmd_log_walk(&rev), &rev);
}
static void log_setup_revisions_tweak(struct rev_info *rev,
@@ -791,7 +815,7 @@ int cmd_log(int argc, const char **argv, const char *prefix)
opt.revarg_opt = REVARG_COMMITTISH;
opt.tweak = log_setup_revisions_tweak;
cmd_log_init(argc, argv, prefix, &rev, &opt);
- return cmd_log_walk(&rev);
+ return cmd_log_deinit(cmd_log_walk(&rev), &rev);
}
/* format-patch */
@@ -1012,7 +1036,7 @@ static int open_next_file(struct commit *commit, const char *subject,
if (!quiet)
printf("%s\n", filename.buf + outdir_offset);
- if ((rev->diffopt.file = fopen(filename.buf, "w")) == NULL) {
+ if (!(rev->diffopt.file = fopen(filename.buf, "w"))) {
error_errno(_("cannot open patch file %s"), filename.buf);
strbuf_release(&filename);
return -1;
@@ -1746,6 +1770,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
struct commit *commit;
struct commit **list = NULL;
struct rev_info rev;
+ char *to_free = NULL;
struct setup_revision_opt s_r_opt;
int nr = 0, total, i;
int use_stdout = 0;
@@ -1883,6 +1908,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
rev.diff = 1;
rev.max_parents = 1;
rev.diffopt.flags.recursive = 1;
+ rev.diffopt.no_free = 1;
rev.subject_prefix = fmt_patch_subject_prefix;
memset(&s_r_opt, 0, sizeof(s_r_opt));
s_r_opt.def = "HEAD";
@@ -1946,7 +1972,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
strbuf_addch(&buf, '\n');
}
- rev.extra_headers = strbuf_detach(&buf, NULL);
+ rev.extra_headers = to_free = strbuf_detach(&buf, NULL);
if (from) {
if (split_ident_line(&rev.from_ident, from, strlen(from)))
@@ -2008,13 +2034,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
if (use_stdout) {
setup_pager();
- } else if (rev.diffopt.close_file) {
- /*
- * The diff code parsed --output; it has already opened the
- * file, but we must instruct it not to close after each diff.
- */
- rev.diffopt.no_free = 1;
- } else {
+ } else if (!rev.diffopt.close_file) {
int saved;
if (!output_directory)
@@ -2173,8 +2193,10 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
prepare_bases(&bases, base, list, nr);
}
- if (in_reply_to || thread || cover_letter)
- rev.ref_message_ids = xcalloc(1, sizeof(struct string_list));
+ if (in_reply_to || thread || cover_letter) {
+ rev.ref_message_ids = xmalloc(sizeof(*rev.ref_message_ids));
+ string_list_init_nodup(rev.ref_message_ids);
+ }
if (in_reply_to) {
const char *msgid = clean_message_id(in_reply_to);
string_list_append(rev.ref_message_ids, msgid);
@@ -2281,8 +2303,11 @@ done:
strbuf_release(&rdiff1);
strbuf_release(&rdiff2);
strbuf_release(&rdiff_title);
- UNLEAK(rev);
- return 0;
+ free(to_free);
+ if (rev.ref_message_ids)
+ string_list_clear(rev.ref_message_ids, 0);
+ free(rev.ref_message_ids);
+ return cmd_log_deinit(0, &rev);
}
static int add_pending_commit(const char *arg, struct rev_info *revs, int flags)
diff --git a/builtin/ls-remote.c b/builtin/ls-remote.c
index d856085..df44e5c 100644
--- a/builtin/ls-remote.c
+++ b/builtin/ls-remote.c
@@ -114,7 +114,7 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
}
transport = transport_get(remote, NULL);
- if (uploadpack != NULL)
+ if (uploadpack)
transport_set_option(transport, TRANS_OPT_UPLOADPACK, uploadpack);
if (server_options.nr)
transport->server_options = &server_options;
diff --git a/builtin/mailsplit.c b/builtin/mailsplit.c
index 3095235..73509f6 100644
--- a/builtin/mailsplit.c
+++ b/builtin/mailsplit.c
@@ -120,7 +120,7 @@ static int populate_maildir_list(struct string_list *list, const char *path)
for (sub = subs; *sub; ++sub) {
free(name);
name = xstrfmt("%s/%s", path, *sub);
- if ((dir = opendir(name)) == NULL) {
+ if (!(dir = opendir(name))) {
if (errno == ENOENT)
continue;
error_errno("cannot opendir %s", name);
diff --git a/builtin/merge.c b/builtin/merge.c
index f178f5a..d9784d4 100644
--- a/builtin/merge.c
+++ b/builtin/merge.c
@@ -443,6 +443,7 @@ static void squash_message(struct commit *commit, struct commit_list *remotehead
}
write_file_buf(git_path_squash_msg(the_repository), out.buf, out.len);
strbuf_release(&out);
+ release_revisions(&rev);
}
static void finish(struct commit *head_commit,
@@ -998,6 +999,7 @@ static int evaluate_result(void)
*/
cnt += count_unmerged_entries();
+ release_revisions(&rev);
return cnt;
}
diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c
index 4480ba3..5edbb7f 100644
--- a/builtin/multi-pack-index.c
+++ b/builtin/multi-pack-index.c
@@ -44,7 +44,7 @@ static char const * const builtin_multi_pack_index_usage[] = {
};
static struct opts_multi_pack_index {
- const char *object_dir;
+ char *object_dir;
const char *preferred_pack;
const char *refs_snapshot;
unsigned long batch_size;
@@ -52,9 +52,23 @@ static struct opts_multi_pack_index {
int stdin_packs;
} opts;
+
+static int parse_object_dir(const struct option *opt, const char *arg,
+ int unset)
+{
+ free(opts.object_dir);
+ if (unset)
+ opts.object_dir = xstrdup(get_object_directory());
+ else
+ opts.object_dir = real_pathdup(arg, 1);
+ return 0;
+}
+
static struct option common_opts[] = {
- OPT_FILENAME(0, "object-dir", &opts.object_dir,
- N_("object directory containing set of packfile and pack-index pairs")),
+ OPT_CALLBACK(0, "object-dir", &opts.object_dir,
+ N_("directory"),
+ N_("object directory containing set of packfile and pack-index pairs"),
+ parse_object_dir),
OPT_END(),
};
@@ -232,31 +246,40 @@ static int cmd_multi_pack_index_repack(int argc, const char **argv)
int cmd_multi_pack_index(int argc, const char **argv,
const char *prefix)
{
+ int res;
struct option *builtin_multi_pack_index_options = common_opts;
git_config(git_default_config, NULL);
+ if (the_repository &&
+ the_repository->objects &&
+ the_repository->objects->odb)
+ opts.object_dir = xstrdup(the_repository->objects->odb->path);
+
argc = parse_options(argc, argv, prefix,
builtin_multi_pack_index_options,
builtin_multi_pack_index_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- if (!opts.object_dir)
- opts.object_dir = get_object_directory();
-
if (!argc)
goto usage;
if (!strcmp(argv[0], "repack"))
- return cmd_multi_pack_index_repack(argc, argv);
+ res = cmd_multi_pack_index_repack(argc, argv);
else if (!strcmp(argv[0], "write"))
- return cmd_multi_pack_index_write(argc, argv);
+ res = cmd_multi_pack_index_write(argc, argv);
else if (!strcmp(argv[0], "verify"))
- return cmd_multi_pack_index_verify(argc, argv);
+ res = cmd_multi_pack_index_verify(argc, argv);
else if (!strcmp(argv[0], "expire"))
- return cmd_multi_pack_index_expire(argc, argv);
+ res = cmd_multi_pack_index_expire(argc, argv);
+ else {
+ error(_("unrecognized subcommand: %s"), argv[0]);
+ goto usage;
+ }
+
+ free(opts.object_dir);
+ return res;
- error(_("unrecognized subcommand: %s"), argv[0]);
usage:
usage_with_options(builtin_multi_pack_index_usage,
builtin_multi_pack_index_options);
diff --git a/builtin/name-rev.c b/builtin/name-rev.c
index c59b569..580b1eb 100644
--- a/builtin/name-rev.c
+++ b/builtin/name-rev.c
@@ -18,7 +18,7 @@
#define CUTOFF_DATE_SLOP 86400
struct rev_name {
- char *tip_name;
+ const char *tip_name;
timestamp_t taggerdate;
int generation;
int distance;
@@ -84,7 +84,7 @@ static int commit_is_before_cutoff(struct commit *commit)
static int is_valid_rev_name(const struct rev_name *name)
{
- return name && (name->generation || name->tip_name);
+ return name && name->tip_name;
}
static struct rev_name *get_commit_rev_name(const struct commit *commit)
@@ -146,20 +146,9 @@ static struct rev_name *create_or_update_name(struct commit *commit,
{
struct rev_name *name = commit_rev_name_at(&rev_names, commit);
- if (is_valid_rev_name(name)) {
- if (!is_better_name(name, taggerdate, generation, distance, from_tag))
- return NULL;
-
- /*
- * This string might still be shared with ancestors
- * (generation > 0). We can release it here regardless,
- * because the new name that has just won will be better
- * for them as well, so name_rev() will replace these
- * stale pointers when it processes the parents.
- */
- if (!name->generation)
- free(name->tip_name);
- }
+ if (is_valid_rev_name(name) &&
+ !is_better_name(name, taggerdate, generation, distance, from_tag))
+ return NULL;
name->taggerdate = taggerdate;
name->generation = generation;
@@ -588,7 +577,7 @@ int cmd_name_rev(int argc, const char **argv, const char *prefix)
N_("ignore refs matching <pattern>")),
OPT_GROUP(""),
OPT_BOOL(0, "all", &all, N_("list all commits reachable from all refs")),
- OPT_BOOL(0, "stdin", &transform_stdin, N_("deprecated: use annotate-stdin instead")),
+ OPT_BOOL(0, "stdin", &transform_stdin, N_("deprecated: use --annotate-stdin instead")),
OPT_BOOL(0, "annotate-stdin", &annotate_stdin, N_("annotate text from stdin")),
OPT_BOOL(0, "undefined", &allow_undefined, N_("allow to print `undefined` names (default)")),
OPT_BOOL(0, "always", &always,
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 014dcd4b..39e28cf 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -36,6 +36,7 @@
#include "trace2.h"
#include "shallow.h"
#include "promisor-remote.h"
+#include "pack-mtimes.h"
/*
* Objects we are going to pack are collected in the `to_pack` structure.
@@ -194,6 +195,8 @@ static int reuse_delta = 1, reuse_object = 1;
static int keep_unreachable, unpack_unreachable, include_tag;
static timestamp_t unpack_unreachable_expiration;
static int pack_loose_unreachable;
+static int cruft;
+static timestamp_t cruft_expiration;
static int local;
static int have_non_local_packs;
static int incremental;
@@ -1260,9 +1263,13 @@ static void write_pack_file(void)
&to_pack, written_list, nr_written);
}
+ if (cruft)
+ pack_idx_opts.flags |= WRITE_MTIMES;
+
stage_tmp_packfiles(&tmpname, pack_tmp_name,
written_list, nr_written,
- &pack_idx_opts, hash, &idx_tmp_name);
+ &to_pack, &pack_idx_opts, hash,
+ &idx_tmp_name);
if (write_bitmap_index) {
size_t tmpname_len = tmpname.len;
@@ -1357,6 +1364,9 @@ static int want_found_object(const struct object_id *oid, int exclude,
if (incremental)
return 0;
+ if (!is_pack_valid(p))
+ return -1;
+
/*
* When asked to do --local (do not include an object that appears in a
* pack we borrow from elsewhere) or --honor-pack-keep (do not include
@@ -1472,6 +1482,9 @@ static int want_object_in_pack(const struct object_id *oid,
want = want_found_object(oid, exclude, *found_pack);
if (want != -1)
return want;
+
+ *found_pack = NULL;
+ *found_offset = 0;
}
for (m = get_multi_pack_index(the_repository); m; m = m->next) {
@@ -1515,13 +1528,13 @@ static int want_object_in_pack(const struct object_id *oid,
return 1;
}
-static void create_object_entry(const struct object_id *oid,
- enum object_type type,
- uint32_t hash,
- int exclude,
- int no_try_delta,
- struct packed_git *found_pack,
- off_t found_offset)
+static struct object_entry *create_object_entry(const struct object_id *oid,
+ enum object_type type,
+ uint32_t hash,
+ int exclude,
+ int no_try_delta,
+ struct packed_git *found_pack,
+ off_t found_offset)
{
struct object_entry *entry;
@@ -1538,6 +1551,8 @@ static void create_object_entry(const struct object_id *oid,
}
entry->no_try_delta = no_try_delta;
+
+ return entry;
}
static const char no_closure_warning[] = N_(
@@ -3155,7 +3170,7 @@ static int git_pack_config(const char *k, const char *v, void *cb)
if (!strcmp(k, "pack.indexversion")) {
pack_idx_opts.version = git_config_int(k, v);
if (pack_idx_opts.version > 2)
- die(_("bad pack.indexversion=%"PRIu32),
+ die(_("bad pack.indexVersion=%"PRIu32),
pack_idx_opts.version);
return 0;
}
@@ -3201,10 +3216,8 @@ static int add_object_entry_from_pack(const struct object_id *oid,
uint32_t pos,
void *_data)
{
- struct rev_info *revs = _data;
- struct object_info oi = OBJECT_INFO_INIT;
off_t ofs;
- enum object_type type;
+ enum object_type type = OBJ_NONE;
display_progress(progress_state, ++nr_seen);
@@ -3215,19 +3228,24 @@ static int add_object_entry_from_pack(const struct object_id *oid,
if (!want_object_in_pack(oid, 0, &p, &ofs))
return 0;
- oi.typep = &type;
- if (packed_object_info(the_repository, p, ofs, &oi) < 0)
- die(_("could not get type of object %s in pack %s"),
- oid_to_hex(oid), p->pack_name);
- else if (type == OBJ_COMMIT) {
- /*
- * commits in included packs are used as starting points for the
- * subsequent revision walk
- */
- add_pending_oid(revs, NULL, oid, 0);
- }
+ if (p) {
+ struct rev_info *revs = _data;
+ struct object_info oi = OBJECT_INFO_INIT;
- stdin_packs_found_nr++;
+ oi.typep = &type;
+ if (packed_object_info(the_repository, p, ofs, &oi) < 0) {
+ die(_("could not get type of object %s in pack %s"),
+ oid_to_hex(oid), p->pack_name);
+ } else if (type == OBJ_COMMIT) {
+ /*
+ * commits in included packs are used as starting points for the
+ * subsequent revision walk
+ */
+ add_pending_oid(revs, NULL, oid, 0);
+ }
+
+ stdin_packs_found_nr++;
+ }
create_object_entry(oid, type, 0, 0, 0, p, ofs);
@@ -3346,6 +3364,8 @@ static void read_packs_list_from_stdin(void)
struct packed_git *p = item->util;
if (!p)
die(_("could not find pack '%s'"), item->string);
+ if (!is_pack_valid(p))
+ die(_("packfile %s cannot be accessed"), p->pack_name);
}
/*
@@ -3369,8 +3389,6 @@ static void read_packs_list_from_stdin(void)
for_each_string_list_item(item, &include_packs) {
struct packed_git *p = item->util;
- if (!p)
- die(_("could not find pack '%s'"), item->string);
for_each_object_in_pack(p,
add_object_entry_from_pack,
&revs,
@@ -3394,6 +3412,217 @@ static void read_packs_list_from_stdin(void)
string_list_clear(&exclude_packs, 0);
}
+static void add_cruft_object_entry(const struct object_id *oid, enum object_type type,
+ struct packed_git *pack, off_t offset,
+ const char *name, uint32_t mtime)
+{
+ struct object_entry *entry;
+
+ display_progress(progress_state, ++nr_seen);
+
+ entry = packlist_find(&to_pack, oid);
+ if (entry) {
+ if (name) {
+ entry->hash = pack_name_hash(name);
+ entry->no_try_delta = no_try_delta(name);
+ }
+ } else {
+ if (!want_object_in_pack(oid, 0, &pack, &offset))
+ return;
+ if (!pack && type == OBJ_BLOB && !has_loose_object(oid)) {
+ /*
+ * If a traversed tree has a missing blob then we want
+ * to avoid adding that missing object to our pack.
+ *
+ * This only applies to missing blobs, not trees,
+ * because the traversal needs to parse sub-trees but
+ * not blobs.
+ *
+ * Note we only perform this check when we couldn't
+ * already find the object in a pack, so we're really
+ * limited to "ensure non-tip blobs which don't exist in
+ * packs do exist via loose objects". Confused?
+ */
+ return;
+ }
+
+ entry = create_object_entry(oid, type, pack_name_hash(name),
+ 0, name && no_try_delta(name),
+ pack, offset);
+ }
+
+ if (mtime > oe_cruft_mtime(&to_pack, entry))
+ oe_set_cruft_mtime(&to_pack, entry, mtime);
+ return;
+}
+
+static void show_cruft_object(struct object *obj, const char *name, void *data)
+{
+ /*
+ * if we did not record it earlier, it's at least as old as our
+ * expiration value. Rather than find it exactly, just use that
+ * value. This may bump it forward from its real mtime, but it
+ * will still be "too old" next time we run with the same
+ * expiration.
+ *
+ * if obj does appear in the packing list, this call is a noop (or may
+ * set the namehash).
+ */
+ add_cruft_object_entry(&obj->oid, obj->type, NULL, 0, name, cruft_expiration);
+}
+
+static void show_cruft_commit(struct commit *commit, void *data)
+{
+ show_cruft_object((struct object*)commit, NULL, data);
+}
+
+static int cruft_include_check_obj(struct object *obj, void *data)
+{
+ return !has_object_kept_pack(&obj->oid, IN_CORE_KEEP_PACKS);
+}
+
+static int cruft_include_check(struct commit *commit, void *data)
+{
+ return cruft_include_check_obj((struct object*)commit, data);
+}
+
+static void set_cruft_mtime(const struct object *object,
+ struct packed_git *pack,
+ off_t offset, time_t mtime)
+{
+ add_cruft_object_entry(&object->oid, object->type, pack, offset, NULL,
+ mtime);
+}
+
+static void mark_pack_kept_in_core(struct string_list *packs, unsigned keep)
+{
+ struct string_list_item *item = NULL;
+ for_each_string_list_item(item, packs) {
+ struct packed_git *p = item->util;
+ if (!p)
+ die(_("could not find pack '%s'"), item->string);
+ p->pack_keep_in_core = keep;
+ }
+}
+
+static void add_unreachable_loose_objects(void);
+static void add_objects_in_unpacked_packs(void);
+
+static void enumerate_cruft_objects(void)
+{
+ if (progress)
+ progress_state = start_progress(_("Enumerating cruft objects"), 0);
+
+ add_objects_in_unpacked_packs();
+ add_unreachable_loose_objects();
+
+ stop_progress(&progress_state);
+}
+
+static void enumerate_and_traverse_cruft_objects(struct string_list *fresh_packs)
+{
+ struct packed_git *p;
+ struct rev_info revs;
+ int ret;
+
+ repo_init_revisions(the_repository, &revs, NULL);
+
+ revs.tag_objects = 1;
+ revs.tree_objects = 1;
+ revs.blob_objects = 1;
+
+ revs.include_check = cruft_include_check;
+ revs.include_check_obj = cruft_include_check_obj;
+
+ revs.ignore_missing_links = 1;
+
+ if (progress)
+ progress_state = start_progress(_("Enumerating cruft objects"), 0);
+ ret = add_unseen_recent_objects_to_traversal(&revs, cruft_expiration,
+ set_cruft_mtime, 1);
+ stop_progress(&progress_state);
+
+ if (ret)
+ die(_("unable to add cruft objects"));
+
+ /*
+ * Re-mark only the fresh packs as kept so that objects in
+ * unknown packs do not halt the reachability traversal early.
+ */
+ for (p = get_all_packs(the_repository); p; p = p->next)
+ p->pack_keep_in_core = 0;
+ mark_pack_kept_in_core(fresh_packs, 1);
+
+ if (prepare_revision_walk(&revs))
+ die(_("revision walk setup failed"));
+ if (progress)
+ progress_state = start_progress(_("Traversing cruft objects"), 0);
+ nr_seen = 0;
+ traverse_commit_list(&revs, show_cruft_commit, show_cruft_object, NULL);
+
+ stop_progress(&progress_state);
+}
+
+static void read_cruft_objects(void)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct string_list discard_packs = STRING_LIST_INIT_DUP;
+ struct string_list fresh_packs = STRING_LIST_INIT_DUP;
+ struct packed_git *p;
+
+ ignore_packed_keep_in_core = 1;
+
+ while (strbuf_getline(&buf, stdin) != EOF) {
+ if (!buf.len)
+ continue;
+
+ if (*buf.buf == '-')
+ string_list_append(&discard_packs, buf.buf + 1);
+ else
+ string_list_append(&fresh_packs, buf.buf);
+ strbuf_reset(&buf);
+ }
+
+ string_list_sort(&discard_packs);
+ string_list_sort(&fresh_packs);
+
+ for (p = get_all_packs(the_repository); p; p = p->next) {
+ const char *pack_name = pack_basename(p);
+ struct string_list_item *item;
+
+ item = string_list_lookup(&fresh_packs, pack_name);
+ if (!item)
+ item = string_list_lookup(&discard_packs, pack_name);
+
+ if (item) {
+ item->util = p;
+ } else {
+ /*
+ * This pack wasn't mentioned in either the "fresh" or
+ * "discard" list, so the caller didn't know about it.
+ *
+ * Mark it as kept so that its objects are ignored by
+ * add_unseen_recent_objects_to_traversal(). We'll
+ * unmark it before starting the traversal so it doesn't
+ * halt the traversal early.
+ */
+ p->pack_keep_in_core = 1;
+ }
+ }
+
+ mark_pack_kept_in_core(&fresh_packs, 1);
+ mark_pack_kept_in_core(&discard_packs, 0);
+
+ if (cruft_expiration)
+ enumerate_and_traverse_cruft_objects(&fresh_packs);
+ else
+ enumerate_cruft_objects();
+
+ strbuf_release(&buf);
+ string_list_clear(&discard_packs, 0);
+ string_list_clear(&fresh_packs, 0);
+}
+
static void read_object_list_from_stdin(void)
{
char line[GIT_MAX_HEXSZ + 1 + PATH_MAX + 2];
@@ -3526,7 +3755,24 @@ static int add_object_in_unpacked_pack(const struct object_id *oid,
uint32_t pos,
void *_data)
{
- add_object_entry(oid, OBJ_NONE, "", 0);
+ if (cruft) {
+ off_t offset;
+ time_t mtime;
+
+ if (pack->is_cruft) {
+ if (load_pack_mtimes(pack) < 0)
+ die(_("could not load cruft pack .mtimes"));
+ mtime = nth_packed_mtime(pack, pos);
+ } else {
+ mtime = pack->mtime;
+ }
+ offset = nth_packed_object_offset(pack, pos);
+
+ add_cruft_object_entry(oid, OBJ_NONE, pack, offset,
+ NULL, mtime);
+ } else {
+ add_object_entry(oid, OBJ_NONE, "", 0);
+ }
return 0;
}
@@ -3550,7 +3796,19 @@ static int add_loose_object(const struct object_id *oid, const char *path,
return 0;
}
- add_object_entry(oid, type, "", 0);
+ if (cruft) {
+ struct stat st;
+ if (stat(path, &st) < 0) {
+ if (errno == ENOENT)
+ return 0;
+ return error_errno("unable to stat %s", oid_to_hex(oid));
+ }
+
+ add_cruft_object_entry(oid, type, NULL, 0, NULL,
+ st.st_mtime);
+ } else {
+ add_object_entry(oid, type, "", 0);
+ }
return 0;
}
@@ -3790,7 +4048,7 @@ static void get_object_list(struct rev_info *revs, int ac, const char **av)
if (unpack_unreachable_expiration) {
revs->ignore_missing_links = 1;
if (add_unseen_recent_objects_to_traversal(revs,
- unpack_unreachable_expiration))
+ unpack_unreachable_expiration, NULL, 0))
die(_("unable to add recent objects"));
if (prepare_revision_walk(revs))
die(_("revision walk setup failed"));
@@ -3867,6 +4125,20 @@ static int option_parse_unpack_unreachable(const struct option *opt,
return 0;
}
+static int option_parse_cruft_expiration(const struct option *opt,
+ const char *arg, int unset)
+{
+ if (unset) {
+ cruft = 0;
+ cruft_expiration = 0;
+ } else {
+ cruft = 1;
+ if (arg)
+ cruft_expiration = approxidate(arg);
+ }
+ return 0;
+}
+
struct po_filter_data {
unsigned have_revs:1;
struct rev_info revs;
@@ -3956,6 +4228,10 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
OPT_CALLBACK_F(0, "unpack-unreachable", NULL, N_("time"),
N_("unpack unreachable objects newer than <time>"),
PARSE_OPT_OPTARG, option_parse_unpack_unreachable),
+ OPT_BOOL(0, "cruft", &cruft, N_("create a cruft pack")),
+ OPT_CALLBACK_F(0, "cruft-expiration", NULL, N_("time"),
+ N_("expire cruft objects older than <time>"),
+ PARSE_OPT_OPTARG, option_parse_cruft_expiration),
OPT_BOOL(0, "sparse", &sparse,
N_("use the sparse reachability algorithm")),
OPT_BOOL(0, "thin", &thin,
@@ -4082,7 +4358,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (!HAVE_THREADS && delta_search_threads != 1)
warning(_("no threads support, ignoring --threads"));
- if (!pack_to_stdout && !pack_size_limit)
+ if (!pack_to_stdout && !pack_size_limit && !cruft)
pack_size_limit = pack_size_limit_cfg;
if (pack_to_stdout && pack_size_limit)
die(_("--max-pack-size cannot be used to build a pack for transfer"));
@@ -4109,6 +4385,15 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (stdin_packs && use_internal_rev_list)
die(_("cannot use internal rev list with --stdin-packs"));
+ if (cruft) {
+ if (use_internal_rev_list)
+ die(_("cannot use internal rev list with --cruft"));
+ if (stdin_packs)
+ die(_("cannot use --stdin-packs with --cruft"));
+ if (pack_size_limit)
+ die(_("cannot use --max-pack-size with --cruft"));
+ }
+
/*
* "soft" reasons not to use bitmaps - for on-disk repack by default we want
*
@@ -4165,7 +4450,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
the_repository);
prepare_packing_data(the_repository, &to_pack);
- if (progress)
+ if (progress && !cruft)
progress_state = start_progress(_("Enumerating objects"), 0);
if (stdin_packs) {
/* avoids adding objects in excluded packs */
@@ -4173,15 +4458,19 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
read_packs_list_from_stdin();
if (rev_list_unpacked)
add_unreachable_loose_objects();
+ } else if (cruft) {
+ read_cruft_objects();
} else if (!use_internal_rev_list) {
read_object_list_from_stdin();
} else if (pfd.have_revs) {
get_object_list(&pfd.revs, rp.nr, rp.v);
+ release_revisions(&pfd.revs);
} else {
struct rev_info revs;
repo_init_revisions(the_repository, &revs, NULL);
get_object_list(&revs, rp.nr, rp.v);
+ release_revisions(&revs);
}
cleanup_preferred_base();
if (include_tag && nr_result)
diff --git a/builtin/pack-redundant.c b/builtin/pack-redundant.c
index 8bf5c0a..ed9b901 100644
--- a/builtin/pack-redundant.c
+++ b/builtin/pack-redundant.c
@@ -101,7 +101,7 @@ static inline struct llist_item *llist_insert(struct llist *list,
oidread(&new_item->oid, oid);
new_item->next = NULL;
- if (after != NULL) {
+ if (after) {
new_item->next = after->next;
after->next = new_item;
if (after == list->back)
@@ -157,7 +157,7 @@ redo_from_start:
if (cmp > 0) /* not in list, since sorted */
return prev;
if (!cmp) { /* found */
- if (prev == NULL) {
+ if (!prev) {
if (hint != NULL && hint != list->front) {
/* we don't know the previous element */
hint = NULL;
@@ -219,7 +219,7 @@ static struct pack_list * pack_list_difference(const struct pack_list *A,
struct pack_list *ret;
const struct pack_list *pl;
- if (A == NULL)
+ if (!A)
return NULL;
pl = B;
@@ -317,7 +317,7 @@ static size_t get_pack_redundancy(struct pack_list *pl)
struct pack_list *subset;
size_t ret = 0;
- if (pl == NULL)
+ if (!pl)
return 0;
while ((subset = pl->next)) {
@@ -611,7 +611,7 @@ int cmd_pack_redundant(int argc, const char **argv, const char *prefix)
while (*(argv + i) != NULL)
add_pack_file(*(argv + i++));
- if (local_packs == NULL)
+ if (!local_packs)
die("Zero packs found!");
load_all_objects();
diff --git a/builtin/prune.c b/builtin/prune.c
index c2bcdc0..df376b2 100644
--- a/builtin/prune.c
+++ b/builtin/prune.c
@@ -196,5 +196,6 @@ int cmd_prune(int argc, const char **argv, const char *prefix)
prune_shallow(show_only ? PRUNE_SHOW_ONLY : 0);
}
+ release_revisions(&revs);
return 0;
}
diff --git a/builtin/pull.c b/builtin/pull.c
index 4d667ab..01155ba 100644
--- a/builtin/pull.c
+++ b/builtin/pull.c
@@ -72,6 +72,7 @@ static const char * const pull_usage[] = {
static int opt_verbosity;
static char *opt_progress;
static int recurse_submodules = RECURSE_SUBMODULES_DEFAULT;
+static int recurse_submodules_cli = RECURSE_SUBMODULES_DEFAULT;
/* Options passed to git-merge or git-rebase */
static enum rebase_type opt_rebase = -1;
@@ -120,7 +121,7 @@ static struct option pull_options[] = {
N_("force progress reporting"),
PARSE_OPT_NOARG),
OPT_CALLBACK_F(0, "recurse-submodules",
- &recurse_submodules, N_("on-demand"),
+ &recurse_submodules_cli, N_("on-demand"),
N_("control for recursive fetching of submodules"),
PARSE_OPT_OPTARG, option_fetch_parse_recurse_submodules),
@@ -536,8 +537,8 @@ static int run_fetch(const char *repo, const char **refspecs)
strvec_push(&args, opt_tags);
if (opt_prune)
strvec_push(&args, opt_prune);
- if (recurse_submodules != RECURSE_SUBMODULES_DEFAULT)
- switch (recurse_submodules) {
+ if (recurse_submodules_cli != RECURSE_SUBMODULES_DEFAULT)
+ switch (recurse_submodules_cli) {
case RECURSE_SUBMODULES_ON:
strvec_push(&args, "--recurse-submodules=on");
break;
@@ -1001,6 +1002,9 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, pull_options, pull_usage, 0);
+ if (recurse_submodules_cli != RECURSE_SUBMODULES_DEFAULT)
+ recurse_submodules = recurse_submodules_cli;
+
if (cleanup_arg)
/*
* this only checks the validity of cleanup_arg; we don't need
diff --git a/builtin/push.c b/builtin/push.c
index cad9979..df0d68e 100644
--- a/builtin/push.c
+++ b/builtin/push.c
@@ -2,6 +2,7 @@
* "git push"
*/
#include "cache.h"
+#include "branch.h"
#include "config.h"
#include "refs.h"
#include "refspec.h"
@@ -151,7 +152,8 @@ static NORETURN void die_push_simple(struct branch *branch,
* upstream to a non-branch, we should probably be showing
* them the big ugly fully qualified ref.
*/
- const char *advice_maybe = "";
+ const char *advice_pushdefault_maybe = "";
+ const char *advice_automergesimple_maybe = "";
const char *short_upstream = branch->merge[0]->src;
skip_prefix(short_upstream, "refs/heads/", &short_upstream);
@@ -161,9 +163,16 @@ static NORETURN void die_push_simple(struct branch *branch,
* push.default.
*/
if (push_default == PUSH_DEFAULT_UNSPECIFIED)
- advice_maybe = _("\n"
+ advice_pushdefault_maybe = _("\n"
"To choose either option permanently, "
- "see push.default in 'git help config'.");
+ "see push.default in 'git help config'.\n");
+ if (git_branch_track != BRANCH_TRACK_SIMPLE)
+ advice_automergesimple_maybe = _("\n"
+ "To avoid automatically configuring "
+ "upstream branches when their name\n"
+ "doesn't match the local branch, see option "
+ "'simple' of branch.autoSetupMerge\n"
+ "in 'git help config'.\n");
die(_("The upstream branch of your current branch does not match\n"
"the name of your current branch. To push to the upstream branch\n"
"on the remote, use\n"
@@ -173,9 +182,10 @@ static NORETURN void die_push_simple(struct branch *branch,
"To push to the branch of the same name on the remote, use\n"
"\n"
" git push %s HEAD\n"
- "%s"),
+ "%s%s"),
remote->name, short_upstream,
- remote->name, advice_maybe);
+ remote->name, advice_pushdefault_maybe,
+ advice_automergesimple_maybe);
}
static const char message_detached_head_die[] =
@@ -185,16 +195,32 @@ static const char message_detached_head_die[] =
"\n"
" git push %s HEAD:<name-of-remote-branch>\n");
-static const char *get_upstream_ref(struct branch *branch, const char *remote_name)
+static const char *get_upstream_ref(int flags, struct branch *branch, const char *remote_name)
{
- if (!branch->merge_nr || !branch->merge || !branch->remote_name)
+ if (branch->merge_nr == 0 && (flags & TRANSPORT_PUSH_AUTO_UPSTREAM)) {
+ /* if missing, assume same; set_upstream will be defined later */
+ return branch->refname;
+ }
+
+ if (!branch->merge_nr || !branch->merge || !branch->remote_name) {
+ const char *advice_autosetup_maybe = "";
+ if (!(flags & TRANSPORT_PUSH_AUTO_UPSTREAM)) {
+ advice_autosetup_maybe = _("\n"
+ "To have this happen automatically for "
+ "branches without a tracking\n"
+ "upstream, see 'push.autoSetupRemote' "
+ "in 'git help config'.\n");
+ }
die(_("The current branch %s has no upstream branch.\n"
"To push the current branch and set the remote as upstream, use\n"
"\n"
- " git push --set-upstream %s %s\n"),
+ " git push --set-upstream %s %s\n"
+ "%s"),
branch->name,
remote_name,
- branch->name);
+ branch->name,
+ advice_autosetup_maybe);
+ }
if (branch->merge_nr != 1)
die(_("The current branch %s has multiple upstream branches, "
"refusing to push."), branch->name);
@@ -202,7 +228,7 @@ static const char *get_upstream_ref(struct branch *branch, const char *remote_na
return branch->merge[0]->src;
}
-static void setup_default_push_refspecs(struct remote *remote)
+static void setup_default_push_refspecs(int *flags, struct remote *remote)
{
struct branch *branch;
const char *dst;
@@ -234,7 +260,7 @@ static void setup_default_push_refspecs(struct remote *remote)
case PUSH_DEFAULT_SIMPLE:
if (!same_remote)
break;
- if (strcmp(branch->refname, get_upstream_ref(branch, remote->name)))
+ if (strcmp(branch->refname, get_upstream_ref(*flags, branch, remote->name)))
die_push_simple(branch, remote);
break;
@@ -244,13 +270,21 @@ static void setup_default_push_refspecs(struct remote *remote)
"your current branch '%s', without telling me what to push\n"
"to update which remote branch."),
remote->name, branch->name);
- dst = get_upstream_ref(branch, remote->name);
+ dst = get_upstream_ref(*flags, branch, remote->name);
break;
case PUSH_DEFAULT_CURRENT:
break;
}
+ /*
+ * this is a default push - if auto-upstream is enabled and there is
+ * no upstream defined, then set it (with options 'simple', 'upstream',
+ * and 'current').
+ */
+ if ((*flags & TRANSPORT_PUSH_AUTO_UPSTREAM) && branch->merge_nr == 0)
+ *flags |= TRANSPORT_PUSH_SET_UPSTREAM;
+
refspec_appendf(&rs, "%s:%s", branch->refname, dst);
}
@@ -401,7 +435,7 @@ static int do_push(int flags,
if (remote->push.nr) {
push_refspec = &remote->push;
} else if (!(flags & TRANSPORT_PUSH_MIRROR))
- setup_default_push_refspecs(remote);
+ setup_default_push_refspecs(&flags, remote);
}
errs = 0;
url_nr = push_url_of_remote(remote, &url);
@@ -472,6 +506,10 @@ static int git_push_config(const char *k, const char *v, void *cb)
else
*flags &= ~TRANSPORT_PUSH_FOLLOW_TAGS;
return 0;
+ } else if (!strcmp(k, "push.autosetupremote")) {
+ if (git_config_bool(k, v))
+ *flags |= TRANSPORT_PUSH_AUTO_UPSTREAM;
+ return 0;
} else if (!strcmp(k, "push.gpgsign")) {
const char *value;
if (!git_config_get_value("push.gpgsign", &value)) {
diff --git a/builtin/rebase.c b/builtin/rebase.c
index 27fde7b..70aa7c8 100644
--- a/builtin/rebase.c
+++ b/builtin/rebase.c
@@ -1110,8 +1110,8 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
PARSE_OPT_NOARG | PARSE_OPT_NONEG,
parse_opt_interactive),
OPT_SET_INT_F('p', "preserve-merges", &preserve_merges_selected,
- N_("(DEPRECATED) try to recreate merges instead of "
- "ignoring them"),
+ N_("(REMOVED) was: try to recreate merges "
+ "instead of ignoring them"),
1, PARSE_OPT_HIDDEN),
OPT_RERERE_AUTOUPDATE(&options.allow_rerere_autoupdate),
OPT_CALLBACK_F(0, "empty", &options, "{drop,keep,ask}",
@@ -1182,16 +1182,16 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
} else if (is_directory(merge_dir())) {
strbuf_reset(&buf);
strbuf_addf(&buf, "%s/rewritten", merge_dir());
- if (is_directory(buf.buf)) {
- die("`rebase -p` is no longer supported");
+ if (!(action == ACTION_ABORT) && is_directory(buf.buf)) {
+ die(_("`rebase --preserve-merges` (-p) is no longer supported.\n"
+ "Use `git rebase --abort` to terminate current rebase.\n"
+ "Or downgrade to v2.33, or earlier, to complete the rebase."));
} else {
strbuf_reset(&buf);
strbuf_addf(&buf, "%s/interactive", merge_dir());
- if(file_exists(buf.buf)) {
- options.type = REBASE_MERGE;
+ options.type = REBASE_MERGE;
+ if (file_exists(buf.buf))
options.flags |= REBASE_INTERACTIVE_EXPLICIT;
- } else
- options.type = REBASE_MERGE;
}
options.state_dir = merge_dir();
}
@@ -1205,7 +1205,9 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
builtin_rebase_usage, 0);
if (preserve_merges_selected)
- die(_("--preserve-merges was replaced by --rebase-merges"));
+ die(_("--preserve-merges was replaced by --rebase-merges\n"
+ "Note: Your `pull.rebase` configuration may also be set to 'preserve',\n"
+ "which is no longer supported; use 'merges' instead"));
if (action != ACTION_NONE && total_argc != 2) {
usage_with_options(builtin_rebase_usage,
@@ -1583,33 +1585,6 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
options.upstream_arg = "--root";
}
- /* Make sure the branch to rebase onto is valid. */
- if (keep_base) {
- strbuf_reset(&buf);
- strbuf_addstr(&buf, options.upstream_name);
- strbuf_addstr(&buf, "...");
- options.onto_name = xstrdup(buf.buf);
- } else if (!options.onto_name)
- options.onto_name = options.upstream_name;
- if (strstr(options.onto_name, "...")) {
- if (get_oid_mb(options.onto_name, &merge_base) < 0) {
- if (keep_base)
- die(_("'%s': need exactly one merge base with branch"),
- options.upstream_name);
- else
- die(_("'%s': need exactly one merge base"),
- options.onto_name);
- }
- options.onto = lookup_commit_or_die(&merge_base,
- options.onto_name);
- } else {
- options.onto =
- lookup_commit_reference_by_name(options.onto_name);
- if (!options.onto)
- die(_("Does not point to a valid commit '%s'"),
- options.onto_name);
- }
-
/*
* If the branch to rebase is given, that is the branch we will rebase
* branch_name -- branch/commit being rebased, or
@@ -1659,6 +1634,34 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
} else
BUG("unexpected number of arguments left to parse");
+ /* Make sure the branch to rebase onto is valid. */
+ if (keep_base) {
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, options.upstream_name);
+ strbuf_addstr(&buf, "...");
+ strbuf_addstr(&buf, branch_name);
+ options.onto_name = xstrdup(buf.buf);
+ } else if (!options.onto_name)
+ options.onto_name = options.upstream_name;
+ if (strstr(options.onto_name, "...")) {
+ if (get_oid_mb(options.onto_name, &merge_base) < 0) {
+ if (keep_base)
+ die(_("'%s': need exactly one merge base with branch"),
+ options.upstream_name);
+ else
+ die(_("'%s': need exactly one merge base"),
+ options.onto_name);
+ }
+ options.onto = lookup_commit_or_die(&merge_base,
+ options.onto_name);
+ } else {
+ options.onto =
+ lookup_commit_reference_by_name(options.onto_name);
+ if (!options.onto)
+ die(_("Does not point to a valid commit '%s'"),
+ options.onto_name);
+ }
+
if (options.fork_point > 0) {
struct commit *head =
lookup_commit_reference(the_repository,
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
index 9aabffa..31b48e7 100644
--- a/builtin/receive-pack.c
+++ b/builtin/receive-pack.c
@@ -764,23 +764,23 @@ static void prepare_push_cert_sha1(struct child_process *proc)
nonce_status = check_nonce(push_cert.buf, bogs);
}
if (!is_null_oid(&push_cert_oid)) {
- strvec_pushf(&proc->env_array, "GIT_PUSH_CERT=%s",
+ strvec_pushf(&proc->env, "GIT_PUSH_CERT=%s",
oid_to_hex(&push_cert_oid));
- strvec_pushf(&proc->env_array, "GIT_PUSH_CERT_SIGNER=%s",
+ strvec_pushf(&proc->env, "GIT_PUSH_CERT_SIGNER=%s",
sigcheck.signer ? sigcheck.signer : "");
- strvec_pushf(&proc->env_array, "GIT_PUSH_CERT_KEY=%s",
+ strvec_pushf(&proc->env, "GIT_PUSH_CERT_KEY=%s",
sigcheck.key ? sigcheck.key : "");
- strvec_pushf(&proc->env_array, "GIT_PUSH_CERT_STATUS=%c",
+ strvec_pushf(&proc->env, "GIT_PUSH_CERT_STATUS=%c",
sigcheck.result);
if (push_cert_nonce) {
- strvec_pushf(&proc->env_array,
+ strvec_pushf(&proc->env,
"GIT_PUSH_CERT_NONCE=%s",
push_cert_nonce);
- strvec_pushf(&proc->env_array,
+ strvec_pushf(&proc->env,
"GIT_PUSH_CERT_NONCE_STATUS=%s",
nonce_status);
if (nonce_status == NONCE_SLOP)
- strvec_pushf(&proc->env_array,
+ strvec_pushf(&proc->env,
"GIT_PUSH_CERT_NONCE_SLOP=%ld",
nonce_stamp_slop);
}
@@ -815,17 +815,17 @@ static int run_and_feed_hook(const char *hook_name, feed_fn feed,
if (feed_state->push_options) {
size_t i;
for (i = 0; i < feed_state->push_options->nr; i++)
- strvec_pushf(&proc.env_array,
+ strvec_pushf(&proc.env,
"GIT_PUSH_OPTION_%"PRIuMAX"=%s",
(uintmax_t)i,
feed_state->push_options->items[i].string);
- strvec_pushf(&proc.env_array, "GIT_PUSH_OPTION_COUNT=%"PRIuMAX"",
+ strvec_pushf(&proc.env, "GIT_PUSH_OPTION_COUNT=%"PRIuMAX"",
(uintmax_t)feed_state->push_options->nr);
} else
- strvec_pushf(&proc.env_array, "GIT_PUSH_OPTION_COUNT");
+ strvec_pushf(&proc.env, "GIT_PUSH_OPTION_COUNT");
if (tmp_objdir)
- strvec_pushv(&proc.env_array, tmp_objdir_env(tmp_objdir));
+ strvec_pushv(&proc.env, tmp_objdir_env(tmp_objdir));
if (use_sideband) {
memset(&muxer, 0, sizeof(muxer));
@@ -1357,7 +1357,7 @@ static const char *push_to_deploy(unsigned char *sha1,
strvec_pushl(&child.args, "update-index", "-q", "--ignore-submodules",
"--refresh", NULL);
- strvec_pushv(&child.env_array, env->v);
+ strvec_pushv(&child.env, env->v);
child.dir = work_tree;
child.no_stdin = 1;
child.stdout_to_stderr = 1;
@@ -1369,7 +1369,7 @@ static const char *push_to_deploy(unsigned char *sha1,
child_process_init(&child);
strvec_pushl(&child.args, "diff-files", "--quiet",
"--ignore-submodules", "--", NULL);
- strvec_pushv(&child.env_array, env->v);
+ strvec_pushv(&child.env, env->v);
child.dir = work_tree;
child.no_stdin = 1;
child.stdout_to_stderr = 1;
@@ -1383,7 +1383,7 @@ static const char *push_to_deploy(unsigned char *sha1,
/* diff-index with either HEAD or an empty tree */
head_has_history() ? "HEAD" : empty_tree_oid_hex(),
"--", NULL);
- strvec_pushv(&child.env_array, env->v);
+ strvec_pushv(&child.env, env->v);
child.no_stdin = 1;
child.no_stdout = 1;
child.stdout_to_stderr = 0;
@@ -1394,7 +1394,7 @@ static const char *push_to_deploy(unsigned char *sha1,
child_process_init(&child);
strvec_pushl(&child.args, "read-tree", "-u", "-m", hash_to_hex(sha1),
NULL);
- strvec_pushv(&child.env_array, env->v);
+ strvec_pushv(&child.env, env->v);
child.dir = work_tree;
child.no_stdin = 1;
child.no_stdout = 1;
@@ -1664,7 +1664,7 @@ static void check_aliased_update_internal(struct command *cmd,
}
dst_name = strip_namespace(dst_name);
- if ((item = string_list_lookup(list, dst_name)) == NULL)
+ if (!(item = string_list_lookup(list, dst_name)))
return;
cmd->skip_update = 1;
@@ -1810,21 +1810,17 @@ static int should_process_cmd(struct command *cmd)
return !cmd->error_string && !cmd->skip_update;
}
-static void warn_if_skipped_connectivity_check(struct command *commands,
+static void BUG_if_skipped_connectivity_check(struct command *commands,
struct shallow_info *si)
{
struct command *cmd;
- int checked_connectivity = 1;
for (cmd = commands; cmd; cmd = cmd->next) {
- if (should_process_cmd(cmd) && si->shallow_ref[cmd->index]) {
- error("BUG: connectivity check has not been run on ref %s",
- cmd->ref_name);
- checked_connectivity = 0;
- }
+ if (should_process_cmd(cmd) && si->shallow_ref[cmd->index])
+ bug("connectivity check has not been run on ref %s",
+ cmd->ref_name);
}
- if (!checked_connectivity)
- BUG("connectivity check skipped???");
+ BUG_if_bug("connectivity check skipped???");
}
static void execute_commands_non_atomic(struct command *commands,
@@ -2005,7 +2001,7 @@ static void execute_commands(struct command *commands,
execute_commands_non_atomic(commands, si);
if (shallow_update)
- warn_if_skipped_connectivity_check(commands, si);
+ BUG_if_skipped_connectivity_check(commands, si);
}
static struct command **queue_command(struct command **tail,
@@ -2214,8 +2210,7 @@ static const char *unpack(int err_fd, struct shallow_info *si)
close(err_fd);
return "unable to create temporary object directory";
}
- if (tmp_objdir)
- strvec_pushv(&child.env_array, tmp_objdir_env(tmp_objdir));
+ strvec_pushv(&child.env, tmp_objdir_env(tmp_objdir));
/*
* Normally we just pass the tmp_objdir environment to the child
@@ -2538,7 +2533,7 @@ int cmd_receive_pack(int argc, const char **argv, const char *prefix)
PACKET_READ_CHOMP_NEWLINE |
PACKET_READ_DIE_ON_ERR_PACKET);
- if ((commands = read_head_info(&reader, &shallow)) != NULL) {
+ if ((commands = read_head_info(&reader, &shallow))) {
const char *unpack_status = NULL;
struct string_list push_options = STRING_LIST_INIT_DUP;
diff --git a/builtin/reflog.c b/builtin/reflog.c
index c943c2a..4dd297d 100644
--- a/builtin/reflog.c
+++ b/builtin/reflog.c
@@ -293,6 +293,7 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix)
if (verbose)
printf(_("Marking reachable objects..."));
mark_reachable_objects(&revs, 0, 0, NULL);
+ release_revisions(&revs);
if (verbose)
putchar('\n');
}
diff --git a/builtin/remote.c b/builtin/remote.c
index 5f4cde9..d4b69fe 100644
--- a/builtin/remote.c
+++ b/builtin/remote.c
@@ -1185,14 +1185,22 @@ static int show_push_info_item(struct string_list_item *item, void *cb_data)
static int get_one_entry(struct remote *remote, void *priv)
{
struct string_list *list = priv;
- struct strbuf url_buf = STRBUF_INIT;
+ struct strbuf remote_info_buf = STRBUF_INIT;
const char **url;
int i, url_nr;
if (remote->url_nr > 0) {
- strbuf_addf(&url_buf, "%s (fetch)", remote->url[0]);
+ struct strbuf promisor_config = STRBUF_INIT;
+ const char *partial_clone_filter = NULL;
+
+ strbuf_addf(&promisor_config, "remote.%s.partialclonefilter", remote->name);
+ strbuf_addf(&remote_info_buf, "%s (fetch)", remote->url[0]);
+ if (!git_config_get_string_tmp(promisor_config.buf, &partial_clone_filter))
+ strbuf_addf(&remote_info_buf, " [%s]", partial_clone_filter);
+
+ strbuf_release(&promisor_config);
string_list_append(list, remote->name)->util =
- strbuf_detach(&url_buf, NULL);
+ strbuf_detach(&remote_info_buf, NULL);
} else
string_list_append(list, remote->name)->util = NULL;
if (remote->pushurl_nr) {
@@ -1204,9 +1212,9 @@ static int get_one_entry(struct remote *remote, void *priv)
}
for (i = 0; i < url_nr; i++)
{
- strbuf_addf(&url_buf, "%s (push)", url[i]);
+ strbuf_addf(&remote_info_buf, "%s (push)", url[i]);
string_list_append(list, remote->name)->util =
- strbuf_detach(&url_buf, NULL);
+ strbuf_detach(&remote_info_buf, NULL);
}
return 0;
diff --git a/builtin/repack.c b/builtin/repack.c
index d1a563d..4a7ae4c 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -18,12 +18,21 @@
#include "pack-bitmap.h"
#include "refs.h"
+#define ALL_INTO_ONE 1
+#define LOOSEN_UNREACHABLE 2
+#define PACK_CRUFT 4
+
+#define DELETE_PACK 1
+#define CRUFT_PACK 2
+
+static int pack_everything;
static int delta_base_offset = 1;
static int pack_kept_objects = -1;
static int write_bitmaps = -1;
static int use_delta_islands;
static int run_update_server_info = 1;
static char *packdir, *packtmp_name, *packtmp;
+static char *cruft_expiration;
static const char *const git_repack_usage[] = {
N_("git repack [<options>]"),
@@ -32,12 +41,24 @@ static const char *const git_repack_usage[] = {
static const char incremental_bitmap_conflict_error[] = N_(
"Incremental repacks are incompatible with bitmap indexes. Use\n"
-"--no-write-bitmap-index or disable the pack.writebitmaps configuration."
+"--no-write-bitmap-index or disable the pack.writeBitmaps configuration."
);
+struct pack_objects_args {
+ const char *window;
+ const char *window_memory;
+ const char *depth;
+ const char *threads;
+ const char *max_pack_size;
+ int no_reuse_delta;
+ int no_reuse_object;
+ int quiet;
+ int local;
+};
static int repack_config(const char *var, const char *value, void *cb)
{
+ struct pack_objects_args *cruft_po_args = cb;
if (!strcmp(var, "repack.usedeltabaseoffset")) {
delta_base_offset = git_config_bool(var, value);
return 0;
@@ -59,6 +80,14 @@ static int repack_config(const char *var, const char *value, void *cb)
run_update_server_info = git_config_bool(var, value);
return 0;
}
+ if (!strcmp(var, "repack.cruftwindow"))
+ return git_config_string(&cruft_po_args->window, var, value);
+ if (!strcmp(var, "repack.cruftwindowmemory"))
+ return git_config_string(&cruft_po_args->window_memory, var, value);
+ if (!strcmp(var, "repack.cruftdepth"))
+ return git_config_string(&cruft_po_args->depth, var, value);
+ if (!strcmp(var, "repack.cruftthreads"))
+ return git_config_string(&cruft_po_args->threads, var, value);
return git_default_config(var, value, cb);
}
@@ -131,12 +160,19 @@ static void collect_pack_filenames(struct string_list *fname_nonkept_list,
fname = xmemdupz(e->d_name, len);
if ((extra_keep->nr > 0 && i < extra_keep->nr) ||
- (file_exists(mkpath("%s/%s.keep", packdir, fname))))
+ (file_exists(mkpath("%s/%s.keep", packdir, fname)))) {
string_list_append_nodup(fname_kept_list, fname);
- else
- string_list_append_nodup(fname_nonkept_list, fname);
+ } else {
+ struct string_list_item *item;
+ item = string_list_append_nodup(fname_nonkept_list,
+ fname);
+ if (file_exists(mkpath("%s/%s.mtimes", packdir, fname)))
+ item->util = (void*)(uintptr_t)CRUFT_PACK;
+ }
}
closedir(dir);
+
+ string_list_sort(fname_kept_list);
}
static void remove_redundant_pack(const char *dir_name, const char *base_name)
@@ -151,18 +187,6 @@ static void remove_redundant_pack(const char *dir_name, const char *base_name)
strbuf_release(&buf);
}
-struct pack_objects_args {
- const char *window;
- const char *window_memory;
- const char *depth;
- const char *threads;
- const char *max_pack_size;
- int no_reuse_delta;
- int no_reuse_object;
- int quiet;
- int local;
-};
-
static void prepare_pack_objects(struct child_process *cmd,
const struct pack_objects_args *args)
{
@@ -217,6 +241,7 @@ static struct {
} exts[] = {
{".pack"},
{".rev", 1},
+ {".mtimes", 1},
{".bitmap", 1},
{".promisor", 1},
{".idx"},
@@ -304,9 +329,6 @@ static void repack_promisor_objects(const struct pack_objects_args *args,
die(_("could not finish pack-objects to repack promisor objects"));
}
-#define ALL_INTO_ONE 1
-#define LOOSEN_UNREACHABLE 2
-
struct pack_geometry {
struct packed_git **pack;
uint32_t pack_nr, pack_alloc;
@@ -332,16 +354,39 @@ static int geometry_cmp(const void *va, const void *vb)
return 0;
}
-static void init_pack_geometry(struct pack_geometry **geometry_p)
+static void init_pack_geometry(struct pack_geometry **geometry_p,
+ struct string_list *existing_kept_packs)
{
struct packed_git *p;
struct pack_geometry *geometry;
+ struct strbuf buf = STRBUF_INIT;
*geometry_p = xcalloc(1, sizeof(struct pack_geometry));
geometry = *geometry_p;
for (p = get_all_packs(the_repository); p; p = p->next) {
- if (!pack_kept_objects && p->pack_keep)
+ if (!pack_kept_objects) {
+ /*
+ * Any pack that has its pack_keep bit set will appear
+ * in existing_kept_packs below, but this saves us from
+ * doing a more expensive check.
+ */
+ if (p->pack_keep)
+ continue;
+
+ /*
+ * The pack may be kept via the --keep-pack option;
+ * check 'existing_kept_packs' to determine whether to
+ * ignore it.
+ */
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, pack_basename(p));
+ strbuf_strip_suffix(&buf, ".pack");
+
+ if (string_list_has_string(existing_kept_packs, buf.buf))
+ continue;
+ }
+ if (p->is_cruft)
continue;
ALLOC_GROW(geometry->pack,
@@ -353,6 +398,7 @@ static void init_pack_geometry(struct pack_geometry **geometry_p)
}
QSORT(geometry->pack, geometry->pack_nr, geometry_cmp);
+ strbuf_release(&buf);
}
static void split_pack_geometry(struct pack_geometry *geometry, int factor)
@@ -548,9 +594,20 @@ static void midx_included_packs(struct string_list *include,
string_list_insert(include, strbuf_detach(&buf, NULL));
}
+
+ for_each_string_list_item(item, existing_nonkept_packs) {
+ if (!((uintptr_t)item->util & CRUFT_PACK)) {
+ /*
+ * no need to check DELETE_PACK, since we're not
+ * doing an ALL_INTO_ONE repack
+ */
+ continue;
+ }
+ string_list_insert(include, xstrfmt("%s.idx", item->string));
+ }
} else {
for_each_string_list_item(item, existing_nonkept_packs) {
- if (item->util)
+ if ((uintptr_t)item->util & DELETE_PACK)
continue;
string_list_insert(include, xstrfmt("%s.idx", item->string));
}
@@ -604,6 +661,67 @@ static int write_midx_included_packs(struct string_list *include,
return finish_command(&cmd);
}
+static int write_cruft_pack(const struct pack_objects_args *args,
+ const char *pack_prefix,
+ struct string_list *names,
+ struct string_list *existing_packs,
+ struct string_list *existing_kept_packs)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ struct strbuf line = STRBUF_INIT;
+ struct string_list_item *item;
+ FILE *in, *out;
+ int ret;
+
+ prepare_pack_objects(&cmd, args);
+
+ strvec_push(&cmd.args, "--cruft");
+ if (cruft_expiration)
+ strvec_pushf(&cmd.args, "--cruft-expiration=%s",
+ cruft_expiration);
+
+ strvec_push(&cmd.args, "--honor-pack-keep");
+ strvec_push(&cmd.args, "--non-empty");
+ strvec_push(&cmd.args, "--max-pack-size=0");
+
+ cmd.in = -1;
+
+ ret = start_command(&cmd);
+ if (ret)
+ return ret;
+
+ /*
+ * names has a confusing double use: it both provides the list
+ * of just-written new packs, and accepts the name of the cruft
+ * pack we are writing.
+ *
+ * By the time it is read here, it contains only the pack(s)
+ * that were just written, which is exactly the set of packs we
+ * want to consider kept.
+ */
+ in = xfdopen(cmd.in, "w");
+ for_each_string_list_item(item, names)
+ fprintf(in, "%s-%s.pack\n", pack_prefix, item->string);
+ for_each_string_list_item(item, existing_packs)
+ fprintf(in, "-%s.pack\n", item->string);
+ for_each_string_list_item(item, existing_kept_packs)
+ fprintf(in, "%s.pack\n", item->string);
+ fclose(in);
+
+ out = xfdopen(cmd.out, "r");
+ while (strbuf_getline_lf(&line, out) != EOF) {
+ if (line.len != the_hash_algo->hexsz)
+ die(_("repack: Expecting full hex object ID lines only "
+ "from pack-objects."));
+ string_list_append(names, line.buf);
+ }
+ fclose(out);
+
+ strbuf_release(&line);
+
+ return finish_command(&cmd);
+}
+
int cmd_repack(int argc, const char **argv, const char *prefix)
{
struct child_process cmd = CHILD_PROCESS_INIT;
@@ -620,12 +738,12 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
int show_progress;
/* variables to be filled by option parsing */
- int pack_everything = 0;
int delete_redundant = 0;
const char *unpack_unreachable = NULL;
int keep_unreachable = 0;
struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
struct pack_objects_args po_args = {NULL};
+ struct pack_objects_args cruft_po_args = {NULL};
int geometric_factor = 0;
int write_midx = 0;
@@ -635,6 +753,11 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
OPT_BIT('A', NULL, &pack_everything,
N_("same as -a, and turn unreachable objects loose"),
LOOSEN_UNREACHABLE | ALL_INTO_ONE),
+ OPT_BIT(0, "cruft", &pack_everything,
+ N_("same as -a, pack unreachable cruft objects separately"),
+ PACK_CRUFT),
+ OPT_STRING(0, "cruft-expiration", &cruft_expiration, N_("approxidate"),
+ N_("with -C, expire objects older than this")),
OPT_BOOL('d', NULL, &delete_redundant,
N_("remove redundant packs, and run git-prune-packed")),
OPT_BOOL('f', NULL, &po_args.no_reuse_delta,
@@ -675,7 +798,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
OPT_END()
};
- git_config(repack_config, NULL);
+ git_config(repack_config, &cruft_po_args);
argc = parse_options(argc, argv, prefix, builtin_repack_options,
git_repack_usage, 0);
@@ -687,6 +810,15 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
(unpack_unreachable || (pack_everything & LOOSEN_UNREACHABLE)))
die(_("options '%s' and '%s' cannot be used together"), "--keep-unreachable", "-A");
+ if (pack_everything & PACK_CRUFT) {
+ pack_everything |= ALL_INTO_ONE;
+
+ if (unpack_unreachable || (pack_everything & LOOSEN_UNREACHABLE))
+ die(_("options '%s' and '%s' cannot be used together"), "--cruft", "-A");
+ if (keep_unreachable)
+ die(_("options '%s' and '%s' cannot be used together"), "--cruft", "-k");
+ }
+
if (write_bitmaps < 0) {
if (!write_midx &&
(!(pack_everything & ALL_INTO_ONE) || !is_bare_repository()))
@@ -714,17 +846,20 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
strbuf_release(&path);
}
+ packdir = mkpathdup("%s/pack", get_object_directory());
+ packtmp_name = xstrfmt(".tmp-%d-pack", (int)getpid());
+ packtmp = mkpathdup("%s/%s", packdir, packtmp_name);
+
+ collect_pack_filenames(&existing_nonkept_packs, &existing_kept_packs,
+ &keep_pack_list);
+
if (geometric_factor) {
if (pack_everything)
die(_("options '%s' and '%s' cannot be used together"), "--geometric", "-A/-a");
- init_pack_geometry(&geometry);
+ init_pack_geometry(&geometry, &existing_kept_packs);
split_pack_geometry(geometry, geometric_factor);
}
- packdir = mkpathdup("%s/pack", get_object_directory());
- packtmp_name = xstrfmt(".tmp-%d-pack", (int)getpid());
- packtmp = mkpathdup("%s/%s", packdir, packtmp_name);
-
sigchain_push_common(remove_pack_on_signal);
prepare_pack_objects(&cmd, &po_args);
@@ -764,13 +899,11 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
if (use_delta_islands)
strvec_push(&cmd.args, "--delta-islands");
- collect_pack_filenames(&existing_nonkept_packs, &existing_kept_packs,
- &keep_pack_list);
-
if (pack_everything & ALL_INTO_ONE) {
repack_promisor_objects(&po_args, &names);
- if (existing_nonkept_packs.nr && delete_redundant) {
+ if (existing_nonkept_packs.nr && delete_redundant &&
+ !(pack_everything & PACK_CRUFT)) {
for_each_string_list_item(item, &names) {
strvec_pushf(&cmd.args, "--keep-pack=%s-%s.pack",
packtmp_name, item->string);
@@ -832,6 +965,35 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
if (!names.nr && !po_args.quiet)
printf_ln(_("Nothing new to pack."));
+ if (pack_everything & PACK_CRUFT) {
+ const char *pack_prefix;
+ if (!skip_prefix(packtmp, packdir, &pack_prefix))
+ die(_("pack prefix %s does not begin with objdir %s"),
+ packtmp, packdir);
+ if (*pack_prefix == '/')
+ pack_prefix++;
+
+ if (!cruft_po_args.window)
+ cruft_po_args.window = po_args.window;
+ if (!cruft_po_args.window_memory)
+ cruft_po_args.window_memory = po_args.window_memory;
+ if (!cruft_po_args.depth)
+ cruft_po_args.depth = po_args.depth;
+ if (!cruft_po_args.threads)
+ cruft_po_args.threads = po_args.threads;
+
+ cruft_po_args.local = po_args.local;
+ cruft_po_args.quiet = po_args.quiet;
+
+ ret = write_cruft_pack(&cruft_po_args, pack_prefix, &names,
+ &existing_nonkept_packs,
+ &existing_kept_packs);
+ if (ret)
+ return ret;
+ }
+
+ string_list_sort(&names);
+
for_each_string_list_item(item, &names) {
item->util = (void *)(uintptr_t)populate_pack_exts(item->string);
}
@@ -872,7 +1034,6 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
if (delete_redundant && pack_everything & ALL_INTO_ONE) {
const int hexsz = the_hash_algo->hexsz;
- string_list_sort(&names);
for_each_string_list_item(item, &existing_nonkept_packs) {
char *sha1;
size_t len = strlen(item->string);
@@ -885,7 +1046,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
* was given) and that we will actually delete this pack
* (if `-d` was given).
*/
- item->util = (void*)(intptr_t)!string_list_has_string(&names, sha1);
+ if (!string_list_has_string(&names, sha1))
+ item->util = (void*)(uintptr_t)((size_t)item->util | DELETE_PACK);
}
}
@@ -909,7 +1071,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
if (delete_redundant) {
int opts = 0;
for_each_string_list_item(item, &existing_nonkept_packs) {
- if (!item->util)
+ if (!((uintptr_t)item->util & DELETE_PACK))
continue;
remove_redundant_pack(packdir, item->string);
}
diff --git a/builtin/replace.c b/builtin/replace.c
index 5068f4f..583702a 100644
--- a/builtin/replace.c
+++ b/builtin/replace.c
@@ -72,7 +72,7 @@ static int list_replace_refs(const char *pattern, const char *format)
{
struct show_data data;
- if (pattern == NULL)
+ if (!pattern)
pattern = "*";
data.pattern = pattern;
diff --git a/builtin/rev-list.c b/builtin/rev-list.c
index 572da14..30fd8e8 100644
--- a/builtin/rev-list.c
+++ b/builtin/rev-list.c
@@ -213,10 +213,8 @@ static void show_commit(struct commit *commit, void *data)
static void finish_commit(struct commit *commit)
{
- if (commit->parents) {
- free_commit_list(commit->parents);
- commit->parents = NULL;
- }
+ free_commit_list(commit->parents);
+ commit->parents = NULL;
free_commit_buffer(the_repository->parsed_objects,
commit);
}
@@ -502,6 +500,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
int use_bitmap_index = 0;
int filter_provided_objects = 0;
const char *show_progress = NULL;
+ int ret = 0;
if (argc == 2 && !strcmp(argv[1], "-h"))
usage(rev_list_usage);
@@ -585,7 +584,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
}
if (!strcmp(arg, "--test-bitmap")) {
test_bitmap_walk(&revs);
- return 0;
+ goto cleanup;
}
if (skip_prefix(arg, "--progress=", &arg)) {
show_progress = arg;
@@ -674,11 +673,11 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
if (use_bitmap_index) {
if (!try_bitmap_count(&revs, filter_provided_objects))
- return 0;
+ goto cleanup;
if (!try_bitmap_disk_usage(&revs, filter_provided_objects))
- return 0;
+ goto cleanup;
if (!try_bitmap_traversal(&revs, filter_provided_objects))
- return 0;
+ goto cleanup;
}
if (prepare_revision_walk(&revs))
@@ -698,8 +697,10 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
find_bisection(&revs.commits, &reaches, &all, bisect_flags);
- if (bisect_show_vars)
- return show_bisect_vars(&info, reaches, all);
+ if (bisect_show_vars) {
+ ret = show_bisect_vars(&info, reaches, all);
+ goto cleanup;
+ }
}
if (filter_provided_objects) {
@@ -754,5 +755,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
if (show_disk_usage)
printf("%"PRIuMAX"\n", (uintmax_t)total_disk_usage);
- return 0;
+cleanup:
+ release_revisions(&revs);
+ return ret;
}
diff --git a/builtin/rev-parse.c b/builtin/rev-parse.c
index 8480a59..b259d89 100644
--- a/builtin/rev-parse.c
+++ b/builtin/rev-parse.c
@@ -476,7 +476,7 @@ static int cmd_parseopt(int argc, const char **argv, const char *prefix)
/* name(s) */
s = strpbrk(sb.buf, flag_chars);
- if (s == NULL)
+ if (!s)
s = help;
if (s - sb.buf == 1) /* short option only */
@@ -723,6 +723,9 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
prefix = setup_git_directory();
git_config(git_default_config, NULL);
did_repo_setup = 1;
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
}
if (!strcmp(arg, "--")) {
diff --git a/builtin/shortlog.c b/builtin/shortlog.c
index 26c5c0c..35825f0 100644
--- a/builtin/shortlog.c
+++ b/builtin/shortlog.c
@@ -81,8 +81,10 @@ static void insert_one_record(struct shortlog *log,
format_subject(&subject, oneline, " ");
buffer = strbuf_detach(&subject, NULL);
- if (item->util == NULL)
- item->util = xcalloc(1, sizeof(struct string_list));
+ if (!item->util) {
+ item->util = xmalloc(sizeof(struct string_list));
+ string_list_init_nodup(item->util);
+ }
string_list_append(item->util, buffer);
}
}
@@ -420,6 +422,8 @@ parse_done:
else
get_from_rev(&rev, &log);
+ release_revisions(&rev);
+
shortlog_output(&log);
if (log.file != stdout)
fclose(log.file);
diff --git a/builtin/show-branch.c b/builtin/show-branch.c
index 330b055..64c649c 100644
--- a/builtin/show-branch.c
+++ b/builtin/show-branch.c
@@ -712,6 +712,10 @@ int cmd_show_branch(int ac, const char **av, const char *prefix)
"--all/--remotes/--independent/--merge-base");
}
+ if (with_current_branch && reflog)
+ die(_("options '%s' and '%s' cannot be used together"),
+ "--reflog", "--current");
+
/* If nothing is specified, show all branches by default */
if (ac <= topics && all_heads + all_remotes == 0)
all_heads = 1;
diff --git a/builtin/show-ref.c b/builtin/show-ref.c
index 7f8a533..5fa207a 100644
--- a/builtin/show-ref.c
+++ b/builtin/show-ref.c
@@ -52,14 +52,6 @@ static int show_ref(const char *refname, const struct object_id *oid,
if (show_head && !strcmp(refname, "HEAD"))
goto match;
- if (tags_only || heads_only) {
- int match;
-
- match = heads_only && starts_with(refname, "refs/heads/");
- match |= tags_only && starts_with(refname, "refs/tags/");
- if (!match)
- return 0;
- }
if (pattern) {
int reflen = strlen(refname);
const char **p = pattern, *m;
@@ -216,7 +208,14 @@ int cmd_show_ref(int argc, const char **argv, const char *prefix)
if (show_head)
head_ref(show_ref, NULL);
- for_each_ref(show_ref, NULL);
+ if (heads_only || tags_only) {
+ if (heads_only)
+ for_each_fullref_in("refs/heads/", show_ref, NULL);
+ if (tags_only)
+ for_each_fullref_in("refs/tags/", show_ref, NULL);
+ } else {
+ for_each_ref(show_ref, NULL);
+ }
if (!found_match) {
if (verify && !quiet)
die("No match");
diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c
index 0217d44..f91e29b 100644
--- a/builtin/sparse-checkout.c
+++ b/builtin/sparse-checkout.c
@@ -128,7 +128,7 @@ static void clean_tracked_sparse_directories(struct repository *r)
* sparse index will not delete directories that contain
* conflicted entries or submodules.
*/
- if (!r->index->sparse_index) {
+ if (r->index->sparse_index == INDEX_EXPANDED) {
/*
* If something, such as a merge conflict or other concern,
* prevents us from converting to a sparse index, then do
@@ -395,7 +395,7 @@ static int update_modes(int *cone_mode, int *sparse_index)
/* Set cone/non-cone mode appropriately */
core_apply_sparse_checkout = 1;
- if (*cone_mode == 1) {
+ if (*cone_mode == 1 || *cone_mode == -1) {
mode = MODE_CONE_PATTERNS;
core_sparse_checkout_cone = 1;
} else {
@@ -413,6 +413,9 @@ static int update_modes(int *cone_mode, int *sparse_index)
/* force an index rewrite */
repo_read_index(the_repository);
the_repository->index->updated_workdir = 1;
+
+ if (!*sparse_index)
+ ensure_full_index(the_repository->index);
}
return 0;
@@ -934,6 +937,9 @@ int cmd_sparse_checkout(int argc, const char **argv, const char *prefix)
git_config(git_default_config, NULL);
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
if (argc > 0) {
if (!strcmp(argv[0], "list"))
return sparse_checkout_list(argc, argv);
diff --git a/builtin/stash.c b/builtin/stash.c
index 0c7b6a9..30fa101 100644
--- a/builtin/stash.c
+++ b/builtin/stash.c
@@ -7,6 +7,7 @@
#include "cache-tree.h"
#include "unpack-trees.h"
#include "merge-recursive.h"
+#include "merge-ort-wrappers.h"
#include "strvec.h"
#include "run-command.h"
#include "dir.h"
@@ -116,6 +117,10 @@ struct stash_info {
int has_u;
};
+#define STASH_INFO_INIT { \
+ .revision = STRBUF_INIT, \
+}
+
static void free_stash_info(struct stash_info *info)
{
strbuf_release(&info->revision);
@@ -157,10 +162,8 @@ static int get_stash_info(struct stash_info *info, int argc, const char **argv)
if (argc == 1)
commit = argv[0];
- strbuf_init(&info->revision, 0);
if (!commit) {
if (!ref_exists(ref_stash)) {
- free_stash_info(info);
fprintf_ln(stderr, _("No stash entries found."));
return -1;
}
@@ -174,11 +177,8 @@ static int get_stash_info(struct stash_info *info, int argc, const char **argv)
revision = info->revision.buf;
- if (get_oid(revision, &info->w_commit)) {
- error(_("%s is not a valid reference"), revision);
- free_stash_info(info);
- return -1;
- }
+ if (get_oid(revision, &info->w_commit))
+ return error(_("%s is not a valid reference"), revision);
assert_stash_like(info, revision);
@@ -197,7 +197,7 @@ static int get_stash_info(struct stash_info *info, int argc, const char **argv)
info->is_stash_ref = !strcmp(expanded_ref, ref_stash);
break;
default: /* Invalid or ambiguous */
- free_stash_info(info);
+ break;
}
free(expanded_ref);
@@ -356,7 +356,7 @@ static int restore_untracked(struct object_id *u_tree)
cp.git_cmd = 1;
strvec_push(&cp.args, "read-tree");
strvec_push(&cp.args, oid_to_hex(u_tree));
- strvec_pushf(&cp.env_array, "GIT_INDEX_FILE=%s",
+ strvec_pushf(&cp.env, "GIT_INDEX_FILE=%s",
stash_index_path.buf);
if (run_command(&cp)) {
remove_path(stash_index_path.buf);
@@ -366,7 +366,7 @@ static int restore_untracked(struct object_id *u_tree)
child_process_init(&cp);
cp.git_cmd = 1;
strvec_pushl(&cp.args, "checkout-index", "--all", NULL);
- strvec_pushf(&cp.env_array, "GIT_INDEX_FILE=%s",
+ strvec_pushf(&cp.env, "GIT_INDEX_FILE=%s",
stash_index_path.buf);
res = run_command(&cp);
@@ -492,13 +492,13 @@ static void unstage_changes_unless_new(struct object_id *orig_tree)
static int do_apply_stash(const char *prefix, struct stash_info *info,
int index, int quiet)
{
- int ret;
+ int clean, ret;
int has_index = index;
struct merge_options o;
struct object_id c_tree;
struct object_id index_tree;
- struct commit *result;
- const struct object_id *bases[1];
+ struct tree *head, *merge, *merge_base;
+ struct lock_file lock = LOCK_INIT;
read_cache_preload(NULL);
if (refresh_and_write_cache(REFRESH_QUIET, 0, 0))
@@ -541,6 +541,7 @@ static int do_apply_stash(const char *prefix, struct stash_info *info,
o.branch1 = "Updated upstream";
o.branch2 = "Stashed changes";
+ o.ancestor = "Stash base";
if (oideq(&info->b_tree, &c_tree))
o.branch1 = "Version stash was based on";
@@ -551,10 +552,26 @@ static int do_apply_stash(const char *prefix, struct stash_info *info,
if (o.verbosity >= 3)
printf_ln(_("Merging %s with %s"), o.branch1, o.branch2);
- bases[0] = &info->b_tree;
+ head = lookup_tree(o.repo, &c_tree);
+ merge = lookup_tree(o.repo, &info->w_tree);
+ merge_base = lookup_tree(o.repo, &info->b_tree);
+
+ repo_hold_locked_index(o.repo, &lock, LOCK_DIE_ON_ERROR);
+ clean = merge_ort_nonrecursive(&o, head, merge, merge_base);
+
+ /*
+ * If 'clean' >= 0, reverse the value for 'ret' so 'ret' is 0 when the
+ * merge was clean, and nonzero if the merge was unclean or encountered
+ * an error.
+ */
+ ret = clean >= 0 ? !clean : clean;
+
+ if (ret < 0)
+ rollback_lock_file(&lock);
+ else if (write_locked_index(o.repo->index, &lock,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
+ ret = error(_("could not write index"));
- ret = merge_recursive_generic(&o, &c_tree, &info->w_tree, 1, bases,
- &result);
if (ret) {
rerere(0);
@@ -585,9 +602,9 @@ restore_untracked:
*/
cp.git_cmd = 1;
cp.dir = prefix;
- strvec_pushf(&cp.env_array, GIT_WORK_TREE_ENVIRONMENT"=%s",
+ strvec_pushf(&cp.env, GIT_WORK_TREE_ENVIRONMENT"=%s",
absolute_path(get_git_work_tree()));
- strvec_pushf(&cp.env_array, GIT_DIR_ENVIRONMENT"=%s",
+ strvec_pushf(&cp.env, GIT_DIR_ENVIRONMENT"=%s",
absolute_path(get_git_dir()));
strvec_push(&cp.args, "status");
run_command(&cp);
@@ -598,10 +615,10 @@ restore_untracked:
static int apply_stash(int argc, const char **argv, const char *prefix)
{
- int ret;
+ int ret = -1;
int quiet = 0;
int index = 0;
- struct stash_info info;
+ struct stash_info info = STASH_INFO_INIT;
struct option options[] = {
OPT__QUIET(&quiet, N_("be quiet, only report errors")),
OPT_BOOL(0, "index", &index,
@@ -613,9 +630,10 @@ static int apply_stash(int argc, const char **argv, const char *prefix)
git_stash_apply_usage, 0);
if (get_stash_info(&info, argc, argv))
- return -1;
+ goto cleanup;
ret = do_apply_stash(prefix, &info, index, quiet);
+cleanup:
free_stash_info(&info);
return ret;
}
@@ -651,20 +669,25 @@ static int do_drop_stash(struct stash_info *info, int quiet)
return 0;
}
-static void assert_stash_ref(struct stash_info *info)
+static int get_stash_info_assert(struct stash_info *info, int argc,
+ const char **argv)
{
- if (!info->is_stash_ref) {
- error(_("'%s' is not a stash reference"), info->revision.buf);
- free_stash_info(info);
- exit(1);
- }
+ int ret = get_stash_info(info, argc, argv);
+
+ if (ret < 0)
+ return ret;
+
+ if (!info->is_stash_ref)
+ return error(_("'%s' is not a stash reference"), info->revision.buf);
+
+ return 0;
}
static int drop_stash(int argc, const char **argv, const char *prefix)
{
- int ret;
+ int ret = -1;
int quiet = 0;
- struct stash_info info;
+ struct stash_info info = STASH_INFO_INIT;
struct option options[] = {
OPT__QUIET(&quiet, N_("be quiet, only report errors")),
OPT_END()
@@ -673,22 +696,21 @@ static int drop_stash(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, options,
git_stash_drop_usage, 0);
- if (get_stash_info(&info, argc, argv))
- return -1;
-
- assert_stash_ref(&info);
+ if (get_stash_info_assert(&info, argc, argv))
+ goto cleanup;
ret = do_drop_stash(&info, quiet);
+cleanup:
free_stash_info(&info);
return ret;
}
static int pop_stash(int argc, const char **argv, const char *prefix)
{
- int ret;
+ int ret = -1;
int index = 0;
int quiet = 0;
- struct stash_info info;
+ struct stash_info info = STASH_INFO_INIT;
struct option options[] = {
OPT__QUIET(&quiet, N_("be quiet, only report errors")),
OPT_BOOL(0, "index", &index,
@@ -699,25 +721,25 @@ static int pop_stash(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, options,
git_stash_pop_usage, 0);
- if (get_stash_info(&info, argc, argv))
- return -1;
+ if (get_stash_info_assert(&info, argc, argv))
+ goto cleanup;
- assert_stash_ref(&info);
if ((ret = do_apply_stash(prefix, &info, index, quiet)))
printf_ln(_("The stash entry is kept in case "
"you need it again."));
else
ret = do_drop_stash(&info, quiet);
+cleanup:
free_stash_info(&info);
return ret;
}
static int branch_stash(int argc, const char **argv, const char *prefix)
{
- int ret;
+ int ret = -1;
const char *branch = NULL;
- struct stash_info info;
+ struct stash_info info = STASH_INFO_INIT;
struct child_process cp = CHILD_PROCESS_INIT;
struct option options[] = {
OPT_END()
@@ -734,7 +756,7 @@ static int branch_stash(int argc, const char **argv, const char *prefix)
branch = argv[0];
if (get_stash_info(&info, argc - 1, argv + 1))
- return -1;
+ goto cleanup;
cp.git_cmd = 1;
strvec_pushl(&cp.args, "checkout", "-b", NULL);
@@ -746,8 +768,8 @@ static int branch_stash(int argc, const char **argv, const char *prefix)
if (!ret && info.is_stash_ref)
ret = do_drop_stash(&info, 0);
+cleanup:
free_stash_info(&info);
-
return ret;
}
@@ -825,8 +847,8 @@ static void diff_include_untracked(const struct stash_info *info, struct diff_op
static int show_stash(int argc, const char **argv, const char *prefix)
{
int i;
- int ret = 0;
- struct stash_info info;
+ int ret = -1;
+ struct stash_info info = STASH_INFO_INIT;
struct rev_info rev;
struct strvec stash_args = STRVEC_INIT;
struct strvec revision_args = STRVEC_INIT;
@@ -844,6 +866,7 @@ static int show_stash(int argc, const char **argv, const char *prefix)
UNTRACKED_ONLY, PARSE_OPT_NONEG),
OPT_END()
};
+ int do_usage = 0;
init_diff_ui_defaults();
git_config(git_diff_ui_config, NULL);
@@ -861,10 +884,8 @@ static int show_stash(int argc, const char **argv, const char *prefix)
strvec_push(&revision_args, argv[i]);
}
- ret = get_stash_info(&info, stash_args.nr, stash_args.v);
- strvec_clear(&stash_args);
- if (ret)
- return -1;
+ if (get_stash_info(&info, stash_args.nr, stash_args.v))
+ goto cleanup;
/*
* The config settings are applied only if there are not passed
@@ -878,16 +899,14 @@ static int show_stash(int argc, const char **argv, const char *prefix)
rev.diffopt.output_format |= DIFF_FORMAT_PATCH;
if (!show_stat && !show_patch) {
- free_stash_info(&info);
- return 0;
+ ret = 0;
+ goto cleanup;
}
}
argc = setup_revisions(revision_args.nr, revision_args.v, &rev, NULL);
- if (argc > 1) {
- free_stash_info(&info);
- usage_with_options(git_stash_show_usage, options);
- }
+ if (argc > 1)
+ goto usage;
if (!rev.diffopt.output_format) {
rev.diffopt.output_format = DIFF_FORMAT_PATCH;
diff_setup_done(&rev.diffopt);
@@ -912,8 +931,17 @@ static int show_stash(int argc, const char **argv, const char *prefix)
}
log_tree_diff_flush(&rev);
+ ret = diff_result_code(&rev.diffopt, 0);
+cleanup:
+ strvec_clear(&stash_args);
free_stash_info(&info);
- return diff_result_code(&rev.diffopt, 0);
+ release_revisions(&rev);
+ if (do_usage)
+ usage_with_options(git_stash_show_usage, options);
+ return ret;
+usage:
+ do_usage = 1;
+ goto cleanup;
}
static int do_store_stash(const struct object_id *w_commit, const char *stash_msg,
@@ -1047,7 +1075,6 @@ static int check_changes_tracked_files(const struct pathspec *ps)
goto done;
}
- object_array_clear(&rev.pending);
result = run_diff_files(&rev, 0);
if (diff_result_code(&rev.diffopt, result)) {
ret = 1;
@@ -1055,7 +1082,7 @@ static int check_changes_tracked_files(const struct pathspec *ps)
}
done:
- clear_pathspec(&rev.prune_data);
+ release_revisions(&rev);
return ret;
}
@@ -1088,7 +1115,7 @@ static int save_untracked_files(struct stash_info *info, struct strbuf *msg,
cp_upd_index.git_cmd = 1;
strvec_pushl(&cp_upd_index.args, "update-index", "-z", "--add",
"--remove", "--stdin", NULL);
- strvec_pushf(&cp_upd_index.env_array, "GIT_INDEX_FILE=%s",
+ strvec_pushf(&cp_upd_index.env, "GIT_INDEX_FILE=%s",
stash_index_path.buf);
strbuf_addf(&untracked_msg, "untracked files on %s\n", msg->buf);
@@ -1162,7 +1189,7 @@ static int stash_patch(struct stash_info *info, const struct pathspec *ps,
cp_read_tree.git_cmd = 1;
strvec_pushl(&cp_read_tree.args, "read-tree", "HEAD", NULL);
- strvec_pushf(&cp_read_tree.env_array, "GIT_INDEX_FILE=%s",
+ strvec_pushf(&cp_read_tree.env, "GIT_INDEX_FILE=%s",
stash_index_path.buf);
if (run_command(&cp_read_tree)) {
ret = -1;
@@ -1249,7 +1276,7 @@ static int stash_working_tree(struct stash_info *info, const struct pathspec *ps
strvec_pushl(&cp_upd_index.args, "update-index",
"--ignore-skip-worktree-entries",
"-z", "--add", "--remove", "--stdin", NULL);
- strvec_pushf(&cp_upd_index.env_array, "GIT_INDEX_FILE=%s",
+ strvec_pushf(&cp_upd_index.env, "GIT_INDEX_FILE=%s",
stash_index_path.buf);
if (pipe_command(&cp_upd_index, diff_output.buf, diff_output.len,
@@ -1266,9 +1293,7 @@ static int stash_working_tree(struct stash_info *info, const struct pathspec *ps
done:
discard_index(&istate);
- UNLEAK(rev);
- object_array_clear(&rev.pending);
- clear_pathspec(&rev.prune_data);
+ release_revisions(&rev);
strbuf_release(&diff_output);
remove_path(stash_index_path.buf);
return ret;
@@ -1410,9 +1435,9 @@ done:
static int create_stash(int argc, const char **argv, const char *prefix)
{
- int ret = 0;
+ int ret;
struct strbuf stash_msg_buf = STRBUF_INIT;
- struct stash_info info;
+ struct stash_info info = STASH_INFO_INIT;
struct pathspec ps;
/* Starting with argv[1], since argv[0] is "create" */
@@ -1427,6 +1452,7 @@ static int create_stash(int argc, const char **argv, const char *prefix)
if (!ret)
printf_ln("%s", oid_to_hex(&info.w_commit));
+ free_stash_info(&info);
strbuf_release(&stash_msg_buf);
return ret;
}
@@ -1435,7 +1461,7 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q
int keep_index, int patch_mode, int include_untracked, int only_staged)
{
int ret = 0;
- struct stash_info info;
+ struct stash_info info = STASH_INFO_INIT;
struct strbuf patch = STRBUF_INIT;
struct strbuf stash_msg_buf = STRBUF_INIT;
struct strbuf untracked_files = STRBUF_INIT;
@@ -1525,7 +1551,7 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q
cp.git_cmd = 1;
if (startup_info->original_cwd) {
cp.dir = startup_info->original_cwd;
- strvec_pushf(&cp.env_array, "%s=%s",
+ strvec_pushf(&cp.env, "%s=%s",
GIT_WORK_TREE_ENVIRONMENT,
the_repository->worktree);
}
@@ -1634,6 +1660,7 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q
}
done:
+ free_stash_info(&info);
strbuf_release(&stash_msg_buf);
return ret;
}
@@ -1770,6 +1797,9 @@ int cmd_stash(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, options, git_stash_usage,
PARSE_OPT_KEEP_UNKNOWN | PARSE_OPT_KEEP_DASHDASH);
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
index_file = get_index_file();
strbuf_addf(&stash_index_path, "%s.stash.%" PRIuMAX, index_file,
(uintmax_t)pid);
diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c
index 2c87ef9..c597df7 100644
--- a/builtin/submodule--helper.c
+++ b/builtin/submodule--helper.c
@@ -72,135 +72,6 @@ static char *get_default_remote(void)
return repo_get_default_remote(the_repository);
}
-static int starts_with_dot_slash(const char *str)
-{
- return str[0] == '.' && is_dir_sep(str[1]);
-}
-
-static int starts_with_dot_dot_slash(const char *str)
-{
- return str[0] == '.' && str[1] == '.' && is_dir_sep(str[2]);
-}
-
-/*
- * Returns 1 if it was the last chop before ':'.
- */
-static int chop_last_dir(char **remoteurl, int is_relative)
-{
- char *rfind = find_last_dir_sep(*remoteurl);
- if (rfind) {
- *rfind = '\0';
- return 0;
- }
-
- rfind = strrchr(*remoteurl, ':');
- if (rfind) {
- *rfind = '\0';
- return 1;
- }
-
- if (is_relative || !strcmp(".", *remoteurl))
- die(_("cannot strip one component off url '%s'"),
- *remoteurl);
-
- free(*remoteurl);
- *remoteurl = xstrdup(".");
- return 0;
-}
-
-/*
- * The `url` argument is the URL that navigates to the submodule origin
- * repo. When relative, this URL is relative to the superproject origin
- * URL repo. The `up_path` argument, if specified, is the relative
- * path that navigates from the submodule working tree to the superproject
- * working tree. Returns the origin URL of the submodule.
- *
- * Return either an absolute URL or filesystem path (if the superproject
- * origin URL is an absolute URL or filesystem path, respectively) or a
- * relative file system path (if the superproject origin URL is a relative
- * file system path).
- *
- * When the output is a relative file system path, the path is either
- * relative to the submodule working tree, if up_path is specified, or to
- * the superproject working tree otherwise.
- *
- * NEEDSWORK: This works incorrectly on the domain and protocol part.
- * remote_url url outcome expectation
- * http://a.com/b ../c http://a.com/c as is
- * http://a.com/b/ ../c http://a.com/c same as previous line, but
- * ignore trailing slash in url
- * http://a.com/b ../../c http://c error out
- * http://a.com/b ../../../c http:/c error out
- * http://a.com/b ../../../../c http:c error out
- * http://a.com/b ../../../../../c .:c error out
- * NEEDSWORK: Given how chop_last_dir() works, this function is broken
- * when a local part has a colon in its path component, too.
- */
-static char *relative_url(const char *remote_url,
- const char *url,
- const char *up_path)
-{
- int is_relative = 0;
- int colonsep = 0;
- char *out;
- char *remoteurl = xstrdup(remote_url);
- struct strbuf sb = STRBUF_INIT;
- size_t len = strlen(remoteurl);
-
- if (is_dir_sep(remoteurl[len-1]))
- remoteurl[len-1] = '\0';
-
- if (!url_is_local_not_ssh(remoteurl) || is_absolute_path(remoteurl))
- is_relative = 0;
- else {
- is_relative = 1;
- /*
- * Prepend a './' to ensure all relative
- * remoteurls start with './' or '../'
- */
- if (!starts_with_dot_slash(remoteurl) &&
- !starts_with_dot_dot_slash(remoteurl)) {
- strbuf_reset(&sb);
- strbuf_addf(&sb, "./%s", remoteurl);
- free(remoteurl);
- remoteurl = strbuf_detach(&sb, NULL);
- }
- }
- /*
- * When the url starts with '../', remove that and the
- * last directory in remoteurl.
- */
- while (url) {
- if (starts_with_dot_dot_slash(url)) {
- url += 3;
- colonsep |= chop_last_dir(&remoteurl, is_relative);
- } else if (starts_with_dot_slash(url))
- url += 2;
- else
- break;
- }
- strbuf_reset(&sb);
- strbuf_addf(&sb, "%s%s%s", remoteurl, colonsep ? ":" : "/", url);
- if (ends_with(url, "/"))
- strbuf_setlen(&sb, sb.len - 1);
- free(remoteurl);
-
- if (starts_with_dot_slash(sb.buf))
- out = xstrdup(sb.buf + 2);
- else
- out = xstrdup(sb.buf);
-
- if (!up_path || !is_relative) {
- strbuf_release(&sb);
- return out;
- }
-
- strbuf_reset(&sb);
- strbuf_addf(&sb, "%s%s", up_path, out);
- free(out);
- return strbuf_detach(&sb, NULL);
-}
-
static char *resolve_relative_url(const char *rel_url, const char *up_path, int quiet)
{
char *remoteurl, *resolved_url;
@@ -292,7 +163,7 @@ static char *compute_rev_name(const char *sub_path, const char* object_id)
for (d = describe_argv; *d; d++) {
struct child_process cp = CHILD_PROCESS_INIT;
- prepare_submodule_repo_env(&cp.env_array);
+ prepare_submodule_repo_env(&cp.env);
cp.dir = sub_path;
cp.git_cmd = 1;
cp.no_stderr = 1;
@@ -479,7 +350,7 @@ static void runcommand_in_submodule_cb(const struct cache_entry *list_item,
if (!is_submodule_populated_gently(path, NULL))
goto cleanup;
- prepare_submodule_repo_env(&cp.env_array);
+ prepare_submodule_repo_env(&cp.env);
/*
* For the purpose of executing <command> in the submodule,
@@ -499,12 +370,12 @@ static void runcommand_in_submodule_cb(const struct cache_entry *list_item,
char *toplevel = xgetcwd();
struct strbuf sb = STRBUF_INIT;
- strvec_pushf(&cp.env_array, "name=%s", sub->name);
- strvec_pushf(&cp.env_array, "sm_path=%s", path);
- strvec_pushf(&cp.env_array, "displaypath=%s", displaypath);
- strvec_pushf(&cp.env_array, "sha1=%s",
+ strvec_pushf(&cp.env, "name=%s", sub->name);
+ strvec_pushf(&cp.env, "sm_path=%s", path);
+ strvec_pushf(&cp.env, "displaypath=%s", displaypath);
+ strvec_pushf(&cp.env, "sha1=%s",
oid_to_hex(ce_oid));
- strvec_pushf(&cp.env_array, "toplevel=%s", toplevel);
+ strvec_pushf(&cp.env, "toplevel=%s", toplevel);
/*
* Since the path variable was accessible from the script
@@ -513,7 +384,7 @@ static void runcommand_in_submodule_cb(const struct cache_entry *list_item,
* on windows. And since environment variables are
* case-insensitive in windows, it interferes with the
* existing PATH variable. Hence, to avoid that, we expose
- * path via the args strvec and not via env_array.
+ * path via the args strvec and not via env.
*/
sq_quote_buf(&sb, path);
strvec_pushf(&cp.args, "path=%s; %s",
@@ -536,7 +407,7 @@ static void runcommand_in_submodule_cb(const struct cache_entry *list_item,
cpr.git_cmd = 1;
cpr.dir = path;
- prepare_submodule_repo_env(&cpr.env_array);
+ prepare_submodule_repo_env(&cpr.env);
strvec_pushl(&cpr.args, "--super-prefix", NULL);
strvec_pushf(&cpr.args, "%s/", displaypath);
@@ -592,6 +463,18 @@ static int module_foreach(int argc, const char **argv, const char *prefix)
return 0;
}
+static int starts_with_dot_slash(const char *const path)
+{
+ return path_match_flags(path, PATH_MATCH_STARTS_WITH_DOT_SLASH |
+ PATH_MATCH_XPLATFORM);
+}
+
+static int starts_with_dot_dot_slash(const char *const path)
+{
+ return path_match_flags(path, PATH_MATCH_STARTS_WITH_DOT_DOT_SLASH |
+ PATH_MATCH_XPLATFORM);
+}
+
struct init_cb {
const char *prefix;
const char *superprefix;
@@ -766,7 +649,7 @@ static void status_submodule(const char *path, const struct object_id *ce_oid,
{
char *displaypath;
struct strvec diff_files_args = STRVEC_INIT;
- struct rev_info rev;
+ struct rev_info rev = REV_INFO_INIT;
int diff_files_result;
struct strbuf buf = STRBUF_INIT;
const char *git_dir;
@@ -833,7 +716,7 @@ static void status_submodule(const char *path, const struct object_id *ce_oid,
cpr.git_cmd = 1;
cpr.dir = path;
- prepare_submodule_repo_env(&cpr.env_array);
+ prepare_submodule_repo_env(&cpr.env);
strvec_push(&cpr.args, "--super-prefix");
strvec_pushf(&cpr.args, "%s/", displaypath);
@@ -853,6 +736,7 @@ static void status_submodule(const char *path, const struct object_id *ce_oid,
cleanup:
strvec_clear(&diff_files_args);
free(displaypath);
+ release_revisions(&rev);
}
static void status_submodule_cb(const struct cache_entry *list_item,
@@ -955,7 +839,7 @@ static char *verify_submodule_committish(const char *sm_path,
cp_rev_parse.git_cmd = 1;
cp_rev_parse.dir = sm_path;
- prepare_submodule_repo_env(&cp_rev_parse.env_array);
+ prepare_submodule_repo_env(&cp_rev_parse.env);
strvec_pushl(&cp_rev_parse.args, "rev-parse", "-q", "--short", NULL);
strvec_pushf(&cp_rev_parse.args, "%s^0", committish);
strvec_push(&cp_rev_parse.args, "--");
@@ -996,7 +880,7 @@ static void print_submodule_summary(struct summary_cb *info, char *errmsg,
cp_log.git_cmd = 1;
cp_log.dir = p->sm_path;
- prepare_submodule_repo_env(&cp_log.env_array);
+ prepare_submodule_repo_env(&cp_log.env);
strvec_pushl(&cp_log.args, "log", NULL);
if (S_ISGITLINK(p->mod_src) && S_ISGITLINK(p->mod_dst)) {
@@ -1113,7 +997,7 @@ static void generate_submodule_summary(struct summary_cb *info,
cp_rev_list.git_cmd = 1;
cp_rev_list.dir = p->sm_path;
- prepare_submodule_repo_env(&cp_rev_list.env_array);
+ prepare_submodule_repo_env(&cp_rev_list.env);
if (!capture_command(&cp_rev_list, &sb_rev_list, 0))
total_commits = atoi(sb_rev_list.buf);
@@ -1231,6 +1115,7 @@ static int compute_summary_module_list(struct object_id *head_oid,
struct strvec diff_args = STRVEC_INIT;
struct rev_info rev;
struct module_cb_list list = MODULE_CB_LIST_INIT;
+ int ret = 0;
strvec_push(&diff_args, get_diff_cmd(diff_cmd));
if (info->cached)
@@ -1256,11 +1141,13 @@ static int compute_summary_module_list(struct object_id *head_oid,
setup_work_tree();
if (read_cache_preload(&rev.diffopt.pathspec) < 0) {
perror("read_cache_preload");
- return -1;
+ ret = -1;
+ goto cleanup;
}
} else if (read_cache() < 0) {
perror("read_cache");
- return -1;
+ ret = -1;
+ goto cleanup;
}
if (diff_cmd == DIFF_INDEX)
@@ -1268,8 +1155,10 @@ static int compute_summary_module_list(struct object_id *head_oid,
else
run_diff_files(&rev, 0);
prepare_submodule_summary(info, &list);
+cleanup:
strvec_clear(&diff_args);
- return 0;
+ release_revisions(&rev);
+ return ret;
}
static int module_summary(int argc, const char **argv, const char *prefix)
@@ -1414,7 +1303,7 @@ static void sync_submodule(const char *path, const char *prefix,
cpr.git_cmd = 1;
cpr.dir = path;
- prepare_submodule_repo_env(&cpr.env_array);
+ prepare_submodule_repo_env(&cpr.env);
strvec_push(&cpr.args, "--super-prefix");
strvec_pushf(&cpr.args, "%s/", displaypath);
@@ -1819,7 +1708,7 @@ static int clone_submodule(struct module_clone_data *clone_data)
strvec_push(&cp.args, clone_data->path);
cp.git_cmd = 1;
- prepare_submodule_repo_env(&cp.env_array);
+ prepare_submodule_repo_env(&cp.env);
cp.no_stdin = 1;
if(run_command(&cp))
@@ -2026,7 +1915,6 @@ struct update_data {
.references = STRING_LIST_INIT_DUP, \
.single_branch = -1, \
.max_jobs = 1, \
- .warn_if_uninitialized = 1, \
}
static void next_submodule_warn_missing(struct submodule_update_clone *suc,
@@ -2295,7 +2183,7 @@ static int is_tip_reachable(const char *path, struct object_id *oid)
cp.no_stderr = 1;
strvec_pushl(&cp.args, "rev-list", "-n", "1", hex, "--not", "--all", NULL);
- prepare_submodule_repo_env(&cp.env_array);
+ prepare_submodule_repo_env(&cp.env);
if (capture_command(&cp, &rev, GIT_MAX_HEXSZ + 1) || rev.len)
return 0;
@@ -2307,7 +2195,7 @@ static int fetch_in_submodule(const char *module_path, int depth, int quiet, str
{
struct child_process cp = CHILD_PROCESS_INIT;
- prepare_submodule_repo_env(&cp.env_array);
+ prepare_submodule_repo_env(&cp.env);
cp.git_cmd = 1;
cp.dir = xstrdup(module_path);
@@ -2320,6 +2208,7 @@ static int fetch_in_submodule(const char *module_path, int depth, int quiet, str
char *hex = oid_to_hex(oid);
char *remote = get_default_remote();
strvec_pushl(&cp.args, remote, hex, NULL);
+ free(remote);
}
return run_command(&cp);
@@ -2364,7 +2253,7 @@ static int run_update_command(struct update_data *ud, int subforce)
strvec_push(&cp.args, oid);
cp.dir = xstrdup(ud->sm_path);
- prepare_submodule_repo_env(&cp.env_array);
+ prepare_submodule_repo_env(&cp.env);
if (run_command(&cp)) {
switch (ud->update_strategy.type) {
case SM_UPDATE_CHECKOUT:
@@ -2630,7 +2519,7 @@ static int update_submodule(struct update_data *update_data)
cp.dir = update_data->sm_path;
cp.git_cmd = 1;
- prepare_submodule_repo_env(&cp.env_array);
+ prepare_submodule_repo_env(&cp.env);
update_data_to_args(&next, &cp.args);
/* die() if child process die()'d */
@@ -3123,9 +3012,9 @@ static void append_fetch_remotes(struct strbuf *msg, const char *git_dir_path)
struct strbuf sb_remote_out = STRBUF_INIT;
cp_remote.git_cmd = 1;
- strvec_pushf(&cp_remote.env_array,
+ strvec_pushf(&cp_remote.env,
"GIT_DIR=%s", git_dir_path);
- strvec_push(&cp_remote.env_array, "GIT_WORK_TREE=.");
+ strvec_push(&cp_remote.env, "GIT_WORK_TREE=.");
strvec_pushl(&cp_remote.args, "remote", "-v", NULL);
if (!capture_command(&cp_remote, &sb_remote_out, 0)) {
char *next_line;
@@ -3209,7 +3098,7 @@ static int add_submodule(const struct add_data *add_data)
if (clone_submodule(&clone_data))
return -1;
- prepare_submodule_repo_env(&cp.env_array);
+ prepare_submodule_repo_env(&cp.env);
cp.git_cmd = 1;
cp.dir = add_data->sm_path;
/*
@@ -3378,7 +3267,7 @@ static int module_add(int argc, const char **argv, const char *prefix)
N_("reference repository")),
OPT_BOOL(0, "dissociate", &dissociate, N_("borrow the objects from reference repositories")),
OPT_STRING(0, "name", &add_data.sm_name, N_("name"),
- N_("sets the submodule’s name to the given string "
+ N_("sets the submodule's name to the given string "
"instead of defaulting to its path")),
OPT_INTEGER(0, "depth", &add_data.depth, N_("depth for shallow clones")),
OPT_END()
diff --git a/builtin/tag.c b/builtin/tag.c
index e5a8f85..75dece0 100644
--- a/builtin/tag.c
+++ b/builtin/tag.c
@@ -364,7 +364,7 @@ static void create_reflog_msg(const struct object_id *oid, struct strbuf *sb)
strbuf_addstr(sb, "object of unknown type");
break;
case OBJ_COMMIT:
- if ((buf = read_object_file(oid, &type, &size)) != NULL) {
+ if ((buf = read_object_file(oid, &type, &size))) {
subject_len = find_commit_subject(buf, &subject_start);
strbuf_insert(sb, sb->len, subject_start, subject_len);
} else {
@@ -372,7 +372,7 @@ static void create_reflog_msg(const struct object_id *oid, struct strbuf *sb)
}
free(buf);
- if ((c = lookup_commit_reference(the_repository, oid)) != NULL)
+ if ((c = lookup_commit_reference(the_repository, oid)))
strbuf_addf(sb, ", %s", show_date(c->date, 0, DATE_MODE(SHORT)));
break;
case OBJ_TREE:
diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c
index dbeb068..56d05e2 100644
--- a/builtin/unpack-objects.c
+++ b/builtin/unpack-objects.c
@@ -1,5 +1,6 @@
#include "builtin.h"
#include "cache.h"
+#include "bulk-checkin.h"
#include "config.h"
#include "object-store.h"
#include "object.h"
@@ -503,10 +504,12 @@ static void unpack_all(void)
if (!quiet)
progress = start_progress(_("Unpacking objects"), nr_objects);
CALLOC_ARRAY(obj_list, nr_objects);
+ begin_odb_transaction();
for (i = 0; i < nr_objects; i++) {
unpack_one(i);
display_progress(progress, i + 1);
}
+ end_odb_transaction();
stop_progress(&progress);
if (delta_list)
diff --git a/builtin/update-index.c b/builtin/update-index.c
index 876112a..b622499 100644
--- a/builtin/update-index.c
+++ b/builtin/update-index.c
@@ -5,6 +5,7 @@
*/
#define USE_THE_INDEX_COMPATIBILITY_MACROS
#include "cache.h"
+#include "bulk-checkin.h"
#include "config.h"
#include "lockfile.h"
#include "quote.h"
@@ -57,6 +58,14 @@ static void report(const char *fmt, ...)
if (!verbose)
return;
+ /*
+ * It is possible, though unlikely, that a caller could use the verbose
+ * output to synchronize with addition of objects to the object
+ * database. The current implementation of ODB transactions leaves
+ * objects invisible while a transaction is active, so flush the
+ * transaction here before reporting a change made by update-index.
+ */
+ flush_odb_transaction();
va_start(vp, fmt);
vprintf(fmt, vp);
putchar('\n');
@@ -1116,6 +1125,12 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
*/
parse_options_start(&ctx, argc, argv, prefix,
options, PARSE_OPT_STOP_AT_NON_OPTION);
+
+ /*
+ * Allow the object layer to optimize adding multiple objects in
+ * a batch.
+ */
+ begin_odb_transaction();
while (ctx.argc) {
if (parseopt_state != PARSE_OPT_DONE)
parseopt_state = parse_options_step(&ctx, options,
@@ -1190,6 +1205,11 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
strbuf_release(&buf);
}
+ /*
+ * By now we have added all of the new objects
+ */
+ end_odb_transaction();
+
if (split_index > 0) {
if (git_config_get_split_index() == 0)
warning(_("core.splitIndex is set to false; "
@@ -1237,6 +1257,22 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
if (fsmonitor > 0) {
enum fsmonitor_mode fsm_mode = fsm_settings__get_mode(r);
+ enum fsmonitor_reason reason = fsm_settings__get_reason(r);
+
+ /*
+ * The user wants to turn on FSMonitor using the command
+ * line argument. (We don't know (or care) whether that
+ * is the IPC or HOOK version.)
+ *
+ * Use one of the __get routines to force load the FSMonitor
+ * config settings into the repo-settings. That will detect
+ * whether the file system is compatible so that we can stop
+ * here with a nice error message.
+ */
+ if (reason > FSMONITOR_REASON_OK)
+ die("%s",
+ fsm_settings__get_incompatible_msg(r, reason));
+
if (fsm_mode == FSMONITOR_MODE_DISABLED) {
warning(_("core.fsmonitor is unset; "
"set it if you really want to "
diff --git a/builtin/worktree.c b/builtin/worktree.c
index 8b32cd1..cd62eef 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -301,7 +301,7 @@ static int checkout_worktree(const struct add_opts *opts,
strvec_pushl(&cp.args, "reset", "--hard", "--no-recurse-submodules", NULL);
if (opts->quiet)
strvec_push(&cp.args, "--quiet");
- strvec_pushv(&cp.env_array, child_env->v);
+ strvec_pushv(&cp.env, child_env->v);
return run_command(&cp);
}
@@ -433,7 +433,7 @@ static int add_worktree(const char *path, const char *refname,
strvec_push(&cp.args, "--quiet");
}
- strvec_pushv(&cp.env_array, child_env.v);
+ strvec_pushv(&cp.env, child_env.v);
ret = run_command(&cp);
if (ret)
goto done;
@@ -989,9 +989,9 @@ static void check_clean_worktree(struct worktree *wt,
validate_no_submodules(wt);
child_process_init(&cp);
- strvec_pushf(&cp.env_array, "%s=%s/.git",
+ strvec_pushf(&cp.env, "%s=%s/.git",
GIT_DIR_ENVIRONMENT, wt->path);
- strvec_pushf(&cp.env_array, "%s=%s",
+ strvec_pushf(&cp.env, "%s=%s",
GIT_WORK_TREE_ENVIRONMENT, wt->path);
strvec_pushl(&cp.args, "status",
"--porcelain", "--ignore-submodules=none",
diff --git a/bulk-checkin.c b/bulk-checkin.c
index 6d6c371..98ec893 100644
--- a/bulk-checkin.c
+++ b/bulk-checkin.c
@@ -3,16 +3,21 @@
*/
#include "cache.h"
#include "bulk-checkin.h"
+#include "lockfile.h"
#include "repository.h"
#include "csum-file.h"
#include "pack.h"
#include "strbuf.h"
+#include "string-list.h"
+#include "tmp-objdir.h"
#include "packfile.h"
#include "object-store.h"
-static struct bulk_checkin_state {
- unsigned plugged:1;
+static int odb_transaction_nesting;
+static struct tmp_objdir *bulk_fsync_objdir;
+
+static struct bulk_checkin_packfile {
char *pack_tmp_name;
struct hashfile *f;
off_t offset;
@@ -21,7 +26,7 @@ static struct bulk_checkin_state {
struct pack_idx_entry **written;
uint32_t alloc_written;
uint32_t nr_written;
-} state;
+} bulk_checkin_packfile;
static void finish_tmp_packfile(struct strbuf *basename,
const char *pack_tmp_name,
@@ -33,13 +38,13 @@ static void finish_tmp_packfile(struct strbuf *basename,
char *idx_tmp_name = NULL;
stage_tmp_packfiles(basename, pack_tmp_name, written_list, nr_written,
- pack_idx_opts, hash, &idx_tmp_name);
+ NULL, pack_idx_opts, hash, &idx_tmp_name);
rename_tmp_packfile_idx(basename, &idx_tmp_name);
free(idx_tmp_name);
}
-static void finish_bulk_checkin(struct bulk_checkin_state *state)
+static void flush_bulk_checkin_packfile(struct bulk_checkin_packfile *state)
{
unsigned char hash[GIT_MAX_RAWSZ];
struct strbuf packname = STRBUF_INIT;
@@ -80,7 +85,41 @@ clear_exit:
reprepare_packed_git(the_repository);
}
-static int already_written(struct bulk_checkin_state *state, struct object_id *oid)
+/*
+ * Cleanup after batch-mode fsync_object_files.
+ */
+static void flush_batch_fsync(void)
+{
+ struct strbuf temp_path = STRBUF_INIT;
+ struct tempfile *temp;
+
+ if (!bulk_fsync_objdir)
+ return;
+
+ /*
+ * Issue a full hardware flush against a temporary file to ensure
+ * that all objects are durable before any renames occur. The code in
+ * fsync_loose_object_bulk_checkin has already issued a writeout
+ * request, but it has not flushed any writeback cache in the storage
+ * hardware or any filesystem logs. This fsync call acts as a barrier
+ * to ensure that the data in each new object file is durable before
+ * the final name is visible.
+ */
+ strbuf_addf(&temp_path, "%s/bulk_fsync_XXXXXX", get_object_directory());
+ temp = xmks_tempfile(temp_path.buf);
+ fsync_or_die(get_tempfile_fd(temp), get_tempfile_path(temp));
+ delete_tempfile(&temp);
+ strbuf_release(&temp_path);
+
+ /*
+ * Make the object files visible in the primary ODB after their data is
+ * fully durable.
+ */
+ tmp_objdir_migrate(bulk_fsync_objdir);
+ bulk_fsync_objdir = NULL;
+}
+
+static int already_written(struct bulk_checkin_packfile *state, struct object_id *oid)
{
int i;
@@ -112,7 +151,7 @@ static int already_written(struct bulk_checkin_state *state, struct object_id *o
* status before calling us just in case we ask it to call us again
* with a new pack.
*/
-static int stream_to_pack(struct bulk_checkin_state *state,
+static int stream_to_pack(struct bulk_checkin_packfile *state,
git_hash_ctx *ctx, off_t *already_hashed_to,
int fd, size_t size, enum object_type type,
const char *path, unsigned flags)
@@ -189,7 +228,7 @@ static int stream_to_pack(struct bulk_checkin_state *state,
}
/* Lazily create backing packfile for the state */
-static void prepare_to_stream(struct bulk_checkin_state *state,
+static void prepare_to_stream(struct bulk_checkin_packfile *state,
unsigned flags)
{
if (!(flags & HASH_WRITE_OBJECT) || state->f)
@@ -204,7 +243,7 @@ static void prepare_to_stream(struct bulk_checkin_state *state,
die_errno("unable to write pack header");
}
-static int deflate_to_pack(struct bulk_checkin_state *state,
+static int deflate_to_pack(struct bulk_checkin_packfile *state,
struct object_id *result_oid,
int fd, size_t size,
enum object_type type, const char *path,
@@ -251,7 +290,7 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
BUG("should not happen");
hashfile_truncate(state->f, &checkpoint);
state->offset = checkpoint.offset;
- finish_bulk_checkin(state);
+ flush_bulk_checkin_packfile(state);
if (lseek(fd, seekback, SEEK_SET) == (off_t) -1)
return error("cannot seek back");
}
@@ -274,25 +313,67 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
return 0;
}
+void prepare_loose_object_bulk_checkin(void)
+{
+ /*
+ * We lazily create the temporary object directory
+ * the first time an object might be added, since
+ * callers may not know whether any objects will be
+ * added at the time they call begin_odb_transaction.
+ */
+ if (!odb_transaction_nesting || bulk_fsync_objdir)
+ return;
+
+ bulk_fsync_objdir = tmp_objdir_create("bulk-fsync");
+ if (bulk_fsync_objdir)
+ tmp_objdir_replace_primary_odb(bulk_fsync_objdir, 0);
+}
+
+void fsync_loose_object_bulk_checkin(int fd, const char *filename)
+{
+ /*
+ * If we have an active ODB transaction, we issue a call that
+ * cleans the filesystem page cache but avoids a hardware flush
+ * command. Later on we will issue a single hardware flush
+ * before renaming the objects to their final names as part of
+ * flush_batch_fsync.
+ */
+ if (!bulk_fsync_objdir ||
+ git_fsync(fd, FSYNC_WRITEOUT_ONLY) < 0) {
+ fsync_or_die(fd, filename);
+ }
+}
+
int index_bulk_checkin(struct object_id *oid,
int fd, size_t size, enum object_type type,
const char *path, unsigned flags)
{
- int status = deflate_to_pack(&state, oid, fd, size, type,
+ int status = deflate_to_pack(&bulk_checkin_packfile, oid, fd, size, type,
path, flags);
- if (!state.plugged)
- finish_bulk_checkin(&state);
+ if (!odb_transaction_nesting)
+ flush_bulk_checkin_packfile(&bulk_checkin_packfile);
return status;
}
-void plug_bulk_checkin(void)
+void begin_odb_transaction(void)
{
- state.plugged = 1;
+ odb_transaction_nesting += 1;
}
-void unplug_bulk_checkin(void)
+void flush_odb_transaction(void)
{
- state.plugged = 0;
- if (state.f)
- finish_bulk_checkin(&state);
+ flush_batch_fsync();
+ flush_bulk_checkin_packfile(&bulk_checkin_packfile);
+}
+
+void end_odb_transaction(void)
+{
+ odb_transaction_nesting -= 1;
+ if (odb_transaction_nesting < 0)
+ BUG("Unbalanced ODB transaction nesting");
+
+ if (odb_transaction_nesting)
+ return;
+
+ flush_odb_transaction();
}
diff --git a/bulk-checkin.h b/bulk-checkin.h
index b26f3dc..8281b9c 100644
--- a/bulk-checkin.h
+++ b/bulk-checkin.h
@@ -6,11 +6,34 @@
#include "cache.h"
+void prepare_loose_object_bulk_checkin(void);
+void fsync_loose_object_bulk_checkin(int fd, const char *filename);
+
int index_bulk_checkin(struct object_id *oid,
int fd, size_t size, enum object_type type,
const char *path, unsigned flags);
-void plug_bulk_checkin(void);
-void unplug_bulk_checkin(void);
+/*
+ * Tell the object database to optimize for adding
+ * multiple objects. end_odb_transaction must be called
+ * to make new objects visible. Transactions can be nested,
+ * and objects are only visible after the outermost transaction
+ * is complete or the transaction is flushed.
+ */
+void begin_odb_transaction(void);
+
+/*
+ * Make any objects that are currently part of a pending object
+ * database transaction visible. It is valid to call this function
+ * even if no transaction is active.
+ */
+void flush_odb_transaction(void);
+
+/*
+ * Tell the object database to make any objects from the
+ * current transaction visible if this is the final nested
+ * transaction.
+ */
+void end_odb_transaction(void);
#endif
diff --git a/bundle.c b/bundle.c
index d50cfb5..0208e6d 100644
--- a/bundle.c
+++ b/bundle.c
@@ -66,8 +66,8 @@ static int parse_bundle_signature(struct bundle_header *header, const char *line
return -1;
}
-static int parse_bundle_header(int fd, struct bundle_header *header,
- const char *report_path)
+int read_bundle_header_fd(int fd, struct bundle_header *header,
+ const char *report_path)
{
struct strbuf buf = STRBUF_INIT;
int status = 0;
@@ -143,7 +143,7 @@ int read_bundle_header(const char *path, struct bundle_header *header)
if (fd < 0)
return error(_("could not open '%s'"), path);
- return parse_bundle_header(fd, header, path);
+ return read_bundle_header_fd(fd, header, path);
}
int is_bundle(const char *path, int quiet)
@@ -153,7 +153,7 @@ int is_bundle(const char *path, int quiet)
if (fd < 0)
return 0;
- fd = parse_bundle_header(fd, &header, quiet ? NULL : path);
+ fd = read_bundle_header_fd(fd, &header, quiet ? NULL : path);
if (fd >= 0)
close(fd);
bundle_header_release(&header);
@@ -196,14 +196,16 @@ int verify_bundle(struct repository *r,
* to be verbose about the errors
*/
struct string_list *p = &header->prerequisites;
- struct rev_info revs;
+ struct rev_info revs = REV_INFO_INIT;
const char *argv[] = {NULL, "--all", NULL};
struct commit *commit;
int i, ret = 0, req_nr;
const char *message = _("Repository lacks these prerequisite commits:");
- if (!r || !r->objects || !r->objects->odb)
- return error(_("need a repository to verify a bundle"));
+ if (!r || !r->objects || !r->objects->odb) {
+ ret = error(_("need a repository to verify a bundle"));
+ goto cleanup;
+ }
repo_init_revisions(r, &revs, NULL);
for (i = 0; i < p->nr; i++) {
@@ -221,7 +223,7 @@ int verify_bundle(struct repository *r,
error("%s %s", oid_to_hex(oid), name);
}
if (revs.pending.nr != p->nr)
- return ret;
+ goto cleanup;
req_nr = revs.pending.nr;
setup_revisions(2, argv, &revs, NULL);
@@ -284,6 +286,8 @@ int verify_bundle(struct repository *r,
printf_ln("The bundle uses this filter: %s",
list_objects_filter_spec(&header->filter));
}
+cleanup:
+ release_revisions(&revs);
return ret;
}
diff --git a/bundle.h b/bundle.h
index 7fef210..0c052f5 100644
--- a/bundle.h
+++ b/bundle.h
@@ -24,6 +24,8 @@ void bundle_header_release(struct bundle_header *header);
int is_bundle(const char *path, int quiet);
int read_bundle_header(const char *path, struct bundle_header *header);
+int read_bundle_header_fd(int fd, struct bundle_header *header,
+ const char *report_path);
int create_bundle(struct repository *r, const char *path,
int argc, const char **argv, struct strvec *pack_options,
int version);
diff --git a/cache-tree.c b/cache-tree.c
index 6752f69..56db0b5 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -3,6 +3,7 @@
#include "tree.h"
#include "tree-walk.h"
#include "cache-tree.h"
+#include "bulk-checkin.h"
#include "object-store.h"
#include "replace-object.h"
#include "promisor-remote.h"
@@ -474,8 +475,10 @@ int cache_tree_update(struct index_state *istate, int flags)
trace_performance_enter();
trace2_region_enter("cache_tree", "update", the_repository);
+ begin_odb_transaction();
i = update_one(istate->cache_tree, istate->cache, istate->cache_nr,
"", 0, &skip, flags);
+ end_odb_transaction();
trace2_region_leave("cache_tree", "update", the_repository);
trace_performance_leave("cache_tree_update");
if (i < 0)
@@ -692,14 +695,14 @@ struct tree* write_in_core_index_as_tree(struct repository *repo) {
ret = write_index_as_tree_internal(&o, index_state, was_valid, 0, NULL);
if (ret == WRITE_TREE_UNMERGED_INDEX) {
int i;
- fprintf(stderr, "BUG: There are unmerged index entries:\n");
+ bug("there are unmerged index entries:");
for (i = 0; i < index_state->cache_nr; i++) {
const struct cache_entry *ce = index_state->cache[i];
if (ce_stage(ce))
- fprintf(stderr, "BUG: %d %.*s\n", ce_stage(ce),
- (int)ce_namelen(ce), ce->name);
+ bug("%d %.*s", ce_stage(ce),
+ (int)ce_namelen(ce), ce->name);
}
- BUG("unmerged index entries when writing inmemory index");
+ BUG("unmerged index entries when writing in-core index");
}
return lookup_tree(repo, &index_state->cache_tree->oid);
diff --git a/cache.h b/cache.h
index 6226f6a..ac5ab4e 100644
--- a/cache.h
+++ b/cache.h
@@ -310,6 +310,29 @@ struct untracked_cache;
struct progress;
struct pattern_list;
+enum sparse_index_mode {
+ /*
+ * There are no sparse directories in the index at all.
+ *
+ * Repositories that don't use cone-mode sparse-checkout will
+ * always have their indexes in this mode.
+ */
+ INDEX_EXPANDED = 0,
+
+ /*
+ * The index has already been collapsed to sparse directories
+ * whereever possible.
+ */
+ INDEX_COLLAPSED,
+
+ /*
+ * The sparse directories that exist are outside the
+ * sparse-checkout boundary, but it is possible that some file
+ * entries could collapse to sparse directory entries.
+ */
+ INDEX_PARTIALLY_SPARSE,
+};
+
struct index_state {
struct cache_entry **cache;
unsigned int version;
@@ -323,14 +346,8 @@ struct index_state {
drop_cache_tree : 1,
updated_workdir : 1,
updated_skipworktree : 1,
- fsmonitor_has_run_once : 1,
-
- /*
- * sparse_index == 1 when sparse-directory
- * entries exist. Requires sparse-checkout
- * in cone mode.
- */
- sparse_index : 1;
+ fsmonitor_has_run_once : 1;
+ enum sparse_index_mode sparse_index;
struct hashmap name_hash;
struct hashmap dir_hash;
struct object_id oid;
@@ -566,7 +583,7 @@ extern char *git_work_tree_cfg;
int is_inside_work_tree(void);
const char *get_git_dir(void);
const char *get_git_common_dir(void);
-char *get_object_directory(void);
+const char *get_object_directory(void);
char *get_index_file(void);
char *get_graft_file(struct repository *r);
void set_git_dir(const char *path, int make_realpath);
@@ -1031,6 +1048,10 @@ enum fsync_component {
FSYNC_COMPONENT_INDEX | \
FSYNC_COMPONENT_REFERENCE)
+#ifndef FSYNC_COMPONENTS_PLATFORM_DEFAULT
+#define FSYNC_COMPONENTS_PLATFORM_DEFAULT FSYNC_COMPONENTS_DEFAULT
+#endif
+
/*
* A bitmask indicating which components of the repo should be fsynced.
*/
@@ -1040,7 +1061,8 @@ extern int use_fsync;
enum fsync_method {
FSYNC_METHOD_FSYNC,
- FSYNC_METHOD_WRITEOUT_ONLY
+ FSYNC_METHOD_WRITEOUT_ONLY,
+ FSYNC_METHOD_BATCH,
};
extern enum fsync_method fsync_method;
@@ -1766,6 +1788,11 @@ void fsync_or_die(int fd, const char *);
int fsync_component(enum fsync_component component, int fd);
void fsync_component_or_die(enum fsync_component component, int fd, const char *msg);
+static inline int batch_fsync_enabled(enum fsync_component component)
+{
+ return (fsync_components & component) && (fsync_method == FSYNC_METHOD_BATCH);
+}
+
ssize_t read_in_full(int fd, void *buf, size_t count);
ssize_t write_in_full(int fd, const void *buf, size_t count);
ssize_t pread_in_full(int fd, void *buf, size_t count, off_t offset);
diff --git a/chunk-format.c b/chunk-format.c
index 1c3dca6..0275b74 100644
--- a/chunk-format.c
+++ b/chunk-format.c
@@ -181,3 +181,15 @@ int read_chunk(struct chunkfile *cf,
return CHUNK_NOT_FOUND;
}
+
+uint8_t oid_version(const struct git_hash_algo *algop)
+{
+ switch (hash_algo_by_ptr(algop)) {
+ case GIT_HASH_SHA1:
+ return 1;
+ case GIT_HASH_SHA256:
+ return 2;
+ default:
+ die(_("invalid hash version"));
+ }
+}
diff --git a/chunk-format.h b/chunk-format.h
index 9ccbe00..7885aa0 100644
--- a/chunk-format.h
+++ b/chunk-format.h
@@ -2,6 +2,7 @@
#define CHUNK_FORMAT_H
#include "git-compat-util.h"
+#include "hash.h"
struct hashfile;
struct chunkfile;
@@ -65,4 +66,6 @@ int read_chunk(struct chunkfile *cf,
chunk_read_fn fn,
void *data);
+uint8_t oid_version(const struct git_hash_algo *algop);
+
#endif
diff --git a/ci/install-dependencies.sh b/ci/install-dependencies.sh
index dbcebad..107757a 100755
--- a/ci/install-dependencies.sh
+++ b/ci/install-dependencies.sh
@@ -5,7 +5,7 @@
. ${0%/*}/lib.sh
-P4WHENCE=http://filehost.perforce.com/perforce/r$LINUX_P4_VERSION
+P4WHENCE=https://cdist2.perforce.com/perforce/r$LINUX_P4_VERSION
LFSWHENCE=https://github.com/github/git-lfs/releases/download/v$LINUX_GIT_LFS_VERSION
UBUNTU_COMMON_PKGS="make libssl-dev libcurl4-openssl-dev libexpat-dev
tcl tk gettext zlib1g-dev perl-modules liberror-perl libauthen-sasl-perl
@@ -37,13 +37,15 @@ macos-latest)
test -z "$BREW_INSTALL_PACKAGES" ||
brew install $BREW_INSTALL_PACKAGES
brew link --force gettext
- brew install --cask --no-quarantine perforce || {
- # Update the definitions and try again
- cask_repo="$(brew --repository)"/Library/Taps/homebrew/homebrew-cask &&
- git -C "$cask_repo" pull --no-stat --ff-only &&
- brew install --cask --no-quarantine perforce
- } ||
- brew install homebrew/cask/perforce
+ mkdir -p $HOME/bin
+ (
+ cd $HOME/bin
+ wget -q "https://cdist2.perforce.com/perforce/r21.2/bin.macosx1015x86_64/helix-core-server.tgz" &&
+ tar -xf helix-core-server.tgz &&
+ sudo xattr -d com.apple.quarantine p4 p4d 2>/dev/null || true
+ )
+ PATH="$PATH:${HOME}/bin"
+ export PATH
if test -n "$CC_PACKAGE"
then
@@ -78,15 +80,19 @@ linux-gcc-default)
;;
esac
-if type p4d >/dev/null && type p4 >/dev/null
+if type p4d >/dev/null 2>&1 && type p4 >/dev/null 2>&1
then
echo "$(tput setaf 6)Perforce Server Version$(tput sgr0)"
p4d -V | grep Rev.
echo "$(tput setaf 6)Perforce Client Version$(tput sgr0)"
p4 -V | grep Rev.
+else
+ echo >&2 "WARNING: perforce wasn't installed, see above for clues why"
fi
-if type git-lfs >/dev/null
+if type git-lfs >/dev/null 2>&1
then
echo "$(tput setaf 6)Git-LFS Version$(tput sgr0)"
git-lfs version
+else
+ echo >&2 "WARNING: git-lfs wasn't installed, see above for clues why"
fi
diff --git a/ci/lib.sh b/ci/lib.sh
index cbc2f8f..f095519 100755
--- a/ci/lib.sh
+++ b/ci/lib.sh
@@ -1,5 +1,56 @@
# Library of functions shared by all CI scripts
+if test true != "$GITHUB_ACTIONS"
+then
+ begin_group () { :; }
+ end_group () { :; }
+
+ group () {
+ shift
+ "$@"
+ }
+ set -x
+else
+ begin_group () {
+ need_to_end_group=t
+ echo "::group::$1" >&2
+ set -x
+ }
+
+ end_group () {
+ test -n "$need_to_end_group" || return 0
+ set +x
+ need_to_end_group=
+ echo '::endgroup::' >&2
+ }
+ trap end_group EXIT
+
+ group () {
+ set +x
+ begin_group "$1"
+ shift
+ # work around `dash` not supporting `set -o pipefail`
+ (
+ "$@" 2>&1
+ echo $? >exit.status
+ ) |
+ sed 's/^\(\([^ ]*\):\([0-9]*\):\([0-9]*:\) \)\(error\|warning\): /::\5 file=\2,line=\3::\1/'
+ res=$(cat exit.status)
+ rm exit.status
+ end_group
+ return $res
+ }
+
+ begin_group "CI setup"
+fi
+
+# Set 'exit on error' for all CI scripts to let the caller know that
+# something went wrong.
+#
+# We already enabled tracing executed commands earlier. This helps by showing
+# how # environment variables are set and and dependencies are installed.
+set -e
+
skip_branch_tip_with_tag () {
# Sometimes, a branch is pushed at the same time the tag that points
# at the same commit as the tip of the branch is pushed, and building
@@ -69,8 +120,7 @@ skip_good_tree () {
exit 0
}
-check_unignored_build_artifacts ()
-{
+check_unignored_build_artifacts () {
! git ls-files --other --exclude-standard --error-unmatch \
-- ':/*' 2>/dev/null ||
{
@@ -79,18 +129,16 @@ check_unignored_build_artifacts ()
}
}
+handle_failed_tests () {
+ return 1
+}
+
# GitHub Action doesn't set TERM, which is required by tput
export TERM=${TERM:-dumb}
# Clear MAKEFLAGS that may come from the outside world.
export MAKEFLAGS=
-# Set 'exit on error' for all CI scripts to let the caller know that
-# something went wrong.
-# Set tracing executed commands, primarily setting environment variables
-# and installing dependencies.
-set -ex
-
if test -n "$SYSTEM_COLLECTIONURI" || test -n "$SYSTEM_TASKDEFINITIONSURI"
then
CI_TYPE=azure-pipelines
@@ -122,13 +170,34 @@ then
test macos != "$CI_OS_NAME" || CI_OS_NAME=osx
CI_REPO_SLUG="$GITHUB_REPOSITORY"
CI_JOB_ID="$GITHUB_RUN_ID"
- CC="${CC:-gcc}"
+ CC="${CC_PACKAGE:-${CC:-gcc}}"
DONT_SKIP_TAGS=t
+ handle_failed_tests () {
+ mkdir -p t/failed-test-artifacts
+ echo "FAILED_TEST_ARTIFACTS=t/failed-test-artifacts" >>$GITHUB_ENV
+
+ for test_exit in t/test-results/*.exit
+ do
+ test 0 != "$(cat "$test_exit")" || continue
+
+ test_name="${test_exit%.exit}"
+ test_name="${test_name##*/}"
+ printf "\\e[33m\\e[1m=== Failed test: ${test_name} ===\\e[m\\n"
+ echo "The full logs are in the 'print test failures' step below."
+ echo "See also the 'failed-tests-*' artifacts attached to this run."
+ cat "t/test-results/$test_name.markup"
+
+ trash_dir="t/trash directory.$test_name"
+ cp "t/test-results/$test_name.out" t/failed-test-artifacts/
+ tar czf t/failed-test-artifacts/"$test_name".trash.tar.gz "$trash_dir"
+ done
+ return 1
+ }
cache_dir="$HOME/none"
export GIT_PROVE_OPTS="--timer --jobs 10"
- export GIT_TEST_OPTS="--verbose-log -x"
+ export GIT_TEST_OPTS="--verbose-log -x --github-workflow-markup"
MAKEFLAGS="$MAKEFLAGS --jobs=10"
test windows != "$CI_OS_NAME" ||
GIT_TEST_OPTS="--no-chain-lint --no-bin-wrappers $GIT_TEST_OPTS"
@@ -211,3 +280,6 @@ linux-leaks)
esac
MAKEFLAGS="$MAKEFLAGS CC=${CC:-cc}"
+
+end_group
+set -x
diff --git a/ci/make-test-artifacts.sh b/ci/make-test-artifacts.sh
index 6469674..74141af 100755
--- a/ci/make-test-artifacts.sh
+++ b/ci/make-test-artifacts.sh
@@ -7,6 +7,6 @@ mkdir -p "$1" # in case ci/lib.sh decides to quit early
. ${0%/*}/lib.sh
-make artifacts-tar ARTIFACTS_DIRECTORY="$1"
+group Build make artifacts-tar ARTIFACTS_DIRECTORY="$1"
check_unignored_build_artifacts
diff --git a/ci/run-build-and-tests.sh b/ci/run-build-and-tests.sh
index 280dda7..8ebff42 100755
--- a/ci/run-build-and-tests.sh
+++ b/ci/run-build-and-tests.sh
@@ -10,7 +10,7 @@ windows*) cmd //c mklink //j t\\.prove "$(cygpath -aw "$cache_dir/.prove")";;
*) ln -s "$cache_dir/.prove" t/.prove;;
esac
-export MAKE_TARGETS="all test"
+run_tests=t
case "$jobname" in
linux-gcc)
@@ -26,7 +26,7 @@ linux-TEST-vars)
export GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS=1
export GIT_TEST_MULTI_PACK_INDEX=1
export GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=1
- export GIT_TEST_ADD_I_USE_BUILTIN=1
+ export GIT_TEST_ADD_I_USE_BUILTIN=0
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=master
export GIT_TEST_WRITE_REV_INDEX=1
export GIT_TEST_CHECKOUT_WORKERS=2
@@ -41,14 +41,16 @@ pedantic)
# Don't run the tests; we only care about whether Git can be
# built.
export DEVOPTS=pedantic
- export MAKE_TARGETS=all
+ run_tests=
;;
esac
-# Any new "test" targets should not go after this "make", but should
-# adjust $MAKE_TARGETS. Otherwise compilation-only targets above will
-# start running tests.
-make $MAKE_TARGETS
+group Build make
+if test -n "$run_tests"
+then
+ group "Run tests" make test ||
+ handle_failed_tests
+fi
check_unignored_build_artifacts
save_good_tree
diff --git a/ci/run-static-analysis.sh b/ci/run-static-analysis.sh
index 65bcebd..0d51e5c 100755
--- a/ci/run-static-analysis.sh
+++ b/ci/run-static-analysis.sh
@@ -29,4 +29,6 @@ fi
make hdr-check ||
exit 1
+make check-pot
+
save_good_tree
diff --git a/ci/run-test-slice.sh b/ci/run-test-slice.sh
index f8c2c31..a3c6795 100755
--- a/ci/run-test-slice.sh
+++ b/ci/run-test-slice.sh
@@ -10,8 +10,9 @@ windows*) cmd //c mklink //j t\\.prove "$(cygpath -aw "$cache_dir/.prove")";;
*) ln -s "$cache_dir/.prove" t/.prove;;
esac
-make --quiet -C t T="$(cd t &&
+group "Run tests" make --quiet -C t T="$(cd t &&
./helper/test-tool path-utils slice-tests "$1" "$2" t[0-9]*.sh |
- tr '\n' ' ')"
+ tr '\n' ' ')" ||
+handle_failed_tests
check_unignored_build_artifacts
diff --git a/combine-diff.c b/combine-diff.c
index d93782d..b724f02 100644
--- a/combine-diff.c
+++ b/combine-diff.c
@@ -195,10 +195,10 @@ static struct lline *coalesce_lines(struct lline *base, int *lenbase,
struct lline *baseend, *newend = NULL;
int i, j, origbaselen = *lenbase;
- if (newline == NULL)
+ if (!newline)
return base;
- if (base == NULL) {
+ if (!base) {
*lenbase = lennew;
return newline;
}
diff --git a/commit-graph.c b/commit-graph.c
index 441b360..92d4503 100644
--- a/commit-graph.c
+++ b/commit-graph.c
@@ -193,18 +193,6 @@ char *get_commit_graph_chain_filename(struct object_directory *odb)
return xstrfmt("%s/info/commit-graphs/commit-graph-chain", odb->path);
}
-static uint8_t oid_version(void)
-{
- switch (hash_algo_by_ptr(the_hash_algo)) {
- case GIT_HASH_SHA1:
- return 1;
- case GIT_HASH_SHA256:
- return 2;
- default:
- die(_("invalid hash version"));
- }
-}
-
static struct commit_graph *alloc_commit_graph(void)
{
struct commit_graph *g = xcalloc(1, sizeof(*g));
@@ -365,9 +353,9 @@ struct commit_graph *parse_commit_graph(struct repository *r,
}
hash_version = *(unsigned char*)(data + 5);
- if (hash_version != oid_version()) {
+ if (hash_version != oid_version(the_hash_algo)) {
error(_("commit-graph hash version %X does not match version %X"),
- hash_version, oid_version());
+ hash_version, oid_version(the_hash_algo));
return NULL;
}
@@ -523,10 +511,13 @@ static struct commit_graph *load_commit_graph_chain(struct repository *r,
stat_res = stat(chain_name, &st);
free(chain_name);
- if (!fp ||
- stat_res ||
- st.st_size <= the_hash_algo->hexsz)
+ if (!fp)
return NULL;
+ if (stat_res ||
+ st.st_size <= the_hash_algo->hexsz) {
+ fclose(fp);
+ return NULL;
+ }
count = st.st_size / (the_hash_algo->hexsz + 1);
CALLOC_ARRAY(oids, count);
@@ -1921,7 +1912,7 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
hashwrite_be32(f, GRAPH_SIGNATURE);
hashwrite_u8(f, GRAPH_VERSION);
- hashwrite_u8(f, oid_version());
+ hashwrite_u8(f, oid_version(the_hash_algo));
hashwrite_u8(f, get_num_chunks(cf));
hashwrite_u8(f, ctx->num_commit_graphs_after - 1);
@@ -2206,7 +2197,8 @@ static void mark_commit_graphs(struct write_commit_graph_context *ctx)
struct stat st;
struct utimbuf updated_time;
- stat(ctx->commit_graph_filenames_before[i], &st);
+ if (stat(ctx->commit_graph_filenames_before[i], &st) < 0)
+ continue;
updated_time.actime = st.st_atime;
updated_time.modtime = now;
@@ -2247,7 +2239,8 @@ static void expire_commit_graphs(struct write_commit_graph_context *ctx)
strbuf_setlen(&path, dirnamelen);
strbuf_addstr(&path, de->d_name);
- stat(path.buf, &st);
+ if (stat(path.buf, &st) < 0)
+ continue;
if (st.st_mtime > expire_time)
continue;
@@ -2567,7 +2560,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g, int flags)
odb_parents = odb_commit->parents;
while (graph_parents) {
- if (odb_parents == NULL) {
+ if (!odb_parents) {
graph_report(_("commit-graph parent list for commit %s is too long"),
oid_to_hex(&cur_oid));
break;
@@ -2590,7 +2583,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g, int flags)
odb_parents = odb_parents->next;
}
- if (odb_parents != NULL)
+ if (odb_parents)
graph_report(_("commit-graph parent list for commit %s terminates early"),
oid_to_hex(&cur_oid));
diff --git a/commit.c b/commit.c
index 59b6c3e..1fb1b2e 100644
--- a/commit.c
+++ b/commit.c
@@ -120,6 +120,17 @@ int commit_graft_pos(struct repository *r, const struct object_id *oid)
commit_graft_oid_access);
}
+static void unparse_commit(struct repository *r, const struct object_id *oid)
+{
+ struct commit *c = lookup_commit(r, oid);
+
+ if (!c->object.parsed)
+ return;
+ free_commit_list(c->parents);
+ c->parents = NULL;
+ c->object.parsed = 0;
+}
+
int register_commit_graft(struct repository *r, struct commit_graft *graft,
int ignore_dups)
{
@@ -145,6 +156,7 @@ int register_commit_graft(struct repository *r, struct commit_graft *graft,
(r->parsed_objects->grafts_nr - pos - 1) *
sizeof(*r->parsed_objects->grafts));
r->parsed_objects->grafts[pos] = graft;
+ unparse_commit(r, &graft->oid);
return 0;
}
@@ -253,8 +265,10 @@ void reset_commit_grafts(struct repository *r)
{
int i;
- for (i = 0; i < r->parsed_objects->grafts_nr; i++)
+ for (i = 0; i < r->parsed_objects->grafts_nr; i++) {
+ unparse_commit(r, &r->parsed_objects->grafts[i]->oid);
free(r->parsed_objects->grafts[i]);
+ }
r->parsed_objects->grafts_nr = 0;
r->parsed_objects->commit_graft_prepared = 0;
}
@@ -407,17 +421,14 @@ int parse_commit_buffer(struct repository *r, struct commit *item, const void *b
if (item->object.parsed)
return 0;
-
- if (item->parents) {
- /*
- * Presumably this is leftover from an earlier failed parse;
- * clear it out in preparation for us re-parsing (we'll hit the
- * same error, but that's good, since it lets our caller know
- * the result cannot be trusted.
- */
- free_commit_list(item->parents);
- item->parents = NULL;
- }
+ /*
+ * Presumably this is leftover from an earlier failed parse;
+ * clear it out in preparation for us re-parsing (we'll hit the
+ * same error, but that's good, since it lets our caller know
+ * the result cannot be trusted.
+ */
+ free_commit_list(item->parents);
+ item->parents = NULL;
tail += size;
if (tail <= bufptr + tree_entry_len + 1 || memcmp(bufptr, "tree ", 5) ||
@@ -1515,7 +1526,7 @@ static int verify_utf8(struct strbuf *buf)
static const char commit_utf8_warn[] =
N_("Warning: commit message did not conform to UTF-8.\n"
"You may want to amend it after fixing the message, or set the config\n"
- "variable i18n.commitencoding to the encoding your project uses.\n");
+ "variable i18n.commitEncoding to the encoding your project uses.\n");
int commit_tree_extended(const char *msg, size_t msg_len,
const struct object_id *tree,
diff --git a/common-main.c b/common-main.c
index 29fb745..c531372 100644
--- a/common-main.c
+++ b/common-main.c
@@ -55,10 +55,30 @@ int main(int argc, const char **argv)
result = cmd_main(argc, argv);
+ /* Not exit(3), but a wrapper calling our common_exit() */
+ exit(result);
+}
+
+static void check_bug_if_BUG(void)
+{
+ if (!bug_called_must_BUG)
+ return;
+ BUG("on exit(): had bug() call(s) in this process without explicit BUG_if_bug()");
+}
+
+/* We wrap exit() to call common_exit() in git-compat-util.h */
+int common_exit(const char *file, int line, int code)
+{
/*
- * We define exit() to call trace2_cmd_exit_fl() in
- * git-compat-util.h. Whether we reach this or exit()
- * elsewhere we'll always run our trace2 exit handler.
+ * For non-POSIX systems: Take the lowest 8 bits of the "code"
+ * to e.g. turn -1 into 255. On a POSIX system this is
+ * redundant, see exit(3) and wait(2), but as it doesn't harm
+ * anything there we don't need to guard this with an "ifdef".
*/
- exit(result);
+ code &= 0xff;
+
+ check_bug_if_BUG();
+ trace2_cmd_exit_fl(file, line, code);
+
+ return code;
}
diff --git a/compat/fsmonitor/fsm-health-darwin.c b/compat/fsmonitor/fsm-health-darwin.c
new file mode 100644
index 0000000..b9f709e
--- /dev/null
+++ b/compat/fsmonitor/fsm-health-darwin.c
@@ -0,0 +1,24 @@
+#include "cache.h"
+#include "config.h"
+#include "fsmonitor.h"
+#include "fsm-health.h"
+#include "fsmonitor--daemon.h"
+
+int fsm_health__ctor(struct fsmonitor_daemon_state *state)
+{
+ return 0;
+}
+
+void fsm_health__dtor(struct fsmonitor_daemon_state *state)
+{
+ return;
+}
+
+void fsm_health__loop(struct fsmonitor_daemon_state *state)
+{
+ return;
+}
+
+void fsm_health__stop_async(struct fsmonitor_daemon_state *state)
+{
+}
diff --git a/compat/fsmonitor/fsm-health-win32.c b/compat/fsmonitor/fsm-health-win32.c
new file mode 100644
index 0000000..2ea08c1
--- /dev/null
+++ b/compat/fsmonitor/fsm-health-win32.c
@@ -0,0 +1,278 @@
+#include "cache.h"
+#include "config.h"
+#include "fsmonitor.h"
+#include "fsm-health.h"
+#include "fsmonitor--daemon.h"
+
+/*
+ * Every minute wake up and test our health.
+ */
+#define WAIT_FREQ_MS (60 * 1000)
+
+/*
+ * State machine states for each of the interval functions
+ * used for polling our health.
+ */
+enum interval_fn_ctx {
+ CTX_INIT = 0,
+ CTX_TERM,
+ CTX_TIMER
+};
+
+typedef int (interval_fn)(struct fsmonitor_daemon_state *state,
+ enum interval_fn_ctx ctx);
+
+struct fsm_health_data
+{
+ HANDLE hEventShutdown;
+
+ HANDLE hHandles[1]; /* the array does not own these handles */
+#define HEALTH_SHUTDOWN 0
+ int nr_handles; /* number of active event handles */
+
+ struct wt_moved
+ {
+ wchar_t wpath[MAX_PATH + 1];
+ BY_HANDLE_FILE_INFORMATION bhfi;
+ } wt_moved;
+};
+
+/*
+ * Lookup the system unique ID for the path. This is as close as
+ * we get to an inode number, but this also contains volume info,
+ * so it is a little stronger.
+ */
+static int lookup_bhfi(wchar_t *wpath,
+ BY_HANDLE_FILE_INFORMATION *bhfi)
+{
+ DWORD desired_access = FILE_LIST_DIRECTORY;
+ DWORD share_mode =
+ FILE_SHARE_WRITE | FILE_SHARE_READ | FILE_SHARE_DELETE;
+ HANDLE hDir;
+
+ hDir = CreateFileW(wpath, desired_access, share_mode, NULL,
+ OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL);
+ if (hDir == INVALID_HANDLE_VALUE) {
+ error(_("[GLE %ld] health thread could not open '%ls'"),
+ GetLastError(), wpath);
+ return -1;
+ }
+
+ if (!GetFileInformationByHandle(hDir, bhfi)) {
+ error(_("[GLE %ld] health thread getting BHFI for '%ls'"),
+ GetLastError(), wpath);
+ CloseHandle(hDir);
+ return -1;
+ }
+
+ CloseHandle(hDir);
+ return 0;
+}
+
+/*
+ * Compare the relevant fields from two system unique IDs.
+ * We use this to see if two different handles to the same
+ * path actually refer to the same *instance* of the file
+ * or directory.
+ */
+static int bhfi_eq(const BY_HANDLE_FILE_INFORMATION *bhfi_1,
+ const BY_HANDLE_FILE_INFORMATION *bhfi_2)
+{
+ return (bhfi_1->dwVolumeSerialNumber == bhfi_2->dwVolumeSerialNumber &&
+ bhfi_1->nFileIndexHigh == bhfi_2->nFileIndexHigh &&
+ bhfi_1->nFileIndexLow == bhfi_2->nFileIndexLow);
+}
+
+/*
+ * Shutdown if the original worktree root directory been deleted,
+ * moved, or renamed?
+ *
+ * Since the main thread did a "chdir(getenv($HOME))" and our CWD
+ * is not in the worktree root directory and because the listener
+ * thread added FILE_SHARE_DELETE to the watch handle, it is possible
+ * for the root directory to be moved or deleted while we are still
+ * watching it. We want to detect that here and force a shutdown.
+ *
+ * Granted, a delete MAY cause some operations to fail, such as
+ * GetOverlappedResult(), but it is not guaranteed. And because
+ * ReadDirectoryChangesW() only reports on changes *WITHIN* the
+ * directory, not changes *ON* the directory, our watch will not
+ * receive a delete event for it.
+ *
+ * A move/rename of the worktree root will also not generate an event.
+ * And since the listener thread already has an open handle, it may
+ * continue to receive events for events within the directory.
+ * However, the pathname of the named-pipe was constructed using the
+ * original location of the worktree root. (Remember named-pipes are
+ * stored in the NPFS and not in the actual file system.) Clients
+ * trying to talk to the worktree after the move/rename will not
+ * reach our daemon process, since we're still listening on the
+ * pipe with original path.
+ *
+ * Furthermore, if the user does something like:
+ *
+ * $ mv repo repo.old
+ * $ git init repo
+ *
+ * A new daemon cannot be started in the new instance of "repo"
+ * because the named-pipe is still being used by the daemon on
+ * the original instance.
+ *
+ * So, detect move/rename/delete and shutdown. This should also
+ * handle unsafe drive removal.
+ *
+ * We use the file system unique ID to distinguish the original
+ * directory instance from a new instance and force a shutdown
+ * if the unique ID changes.
+ *
+ * Since a worktree move/rename/delete/unmount doesn't happen
+ * that often (and we can't get an immediate event anyway), we
+ * use a timeout and periodically poll it.
+ */
+static int has_worktree_moved(struct fsmonitor_daemon_state *state,
+ enum interval_fn_ctx ctx)
+{
+ struct fsm_health_data *data = state->health_data;
+ BY_HANDLE_FILE_INFORMATION bhfi;
+ int r;
+
+ switch (ctx) {
+ case CTX_TERM:
+ return 0;
+
+ case CTX_INIT:
+ if (xutftowcs_path(data->wt_moved.wpath,
+ state->path_worktree_watch.buf) < 0) {
+ error(_("could not convert to wide characters: '%s'"),
+ state->path_worktree_watch.buf);
+ return -1;
+ }
+
+ /*
+ * On the first call we lookup the unique sequence ID for
+ * the worktree root directory.
+ */
+ return lookup_bhfi(data->wt_moved.wpath, &data->wt_moved.bhfi);
+
+ case CTX_TIMER:
+ r = lookup_bhfi(data->wt_moved.wpath, &bhfi);
+ if (r)
+ return r;
+ if (!bhfi_eq(&data->wt_moved.bhfi, &bhfi)) {
+ error(_("BHFI changed '%ls'"), data->wt_moved.wpath);
+ return -1;
+ }
+ return 0;
+
+ default:
+ die(_("unhandled case in 'has_worktree_moved': %d"),
+ (int)ctx);
+ }
+
+ return 0;
+}
+
+
+int fsm_health__ctor(struct fsmonitor_daemon_state *state)
+{
+ struct fsm_health_data *data;
+
+ CALLOC_ARRAY(data, 1);
+
+ data->hEventShutdown = CreateEvent(NULL, TRUE, FALSE, NULL);
+
+ data->hHandles[HEALTH_SHUTDOWN] = data->hEventShutdown;
+ data->nr_handles++;
+
+ state->health_data = data;
+ return 0;
+}
+
+void fsm_health__dtor(struct fsmonitor_daemon_state *state)
+{
+ struct fsm_health_data *data;
+
+ if (!state || !state->health_data)
+ return;
+
+ data = state->health_data;
+
+ CloseHandle(data->hEventShutdown);
+
+ FREE_AND_NULL(state->health_data);
+}
+
+/*
+ * A table of the polling functions.
+ */
+static interval_fn *table[] = {
+ has_worktree_moved,
+ NULL, /* must be last */
+};
+
+/*
+ * Call all of the polling functions in the table.
+ * Shortcut and return first error.
+ *
+ * Return 0 if all succeeded.
+ */
+static int call_all(struct fsmonitor_daemon_state *state,
+ enum interval_fn_ctx ctx)
+{
+ int k;
+
+ for (k = 0; table[k]; k++) {
+ int r = table[k](state, ctx);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+void fsm_health__loop(struct fsmonitor_daemon_state *state)
+{
+ struct fsm_health_data *data = state->health_data;
+ int r;
+
+ r = call_all(state, CTX_INIT);
+ if (r < 0)
+ goto force_error_stop;
+ if (r > 0)
+ goto force_shutdown;
+
+ for (;;) {
+ DWORD dwWait = WaitForMultipleObjects(data->nr_handles,
+ data->hHandles,
+ FALSE, WAIT_FREQ_MS);
+
+ if (dwWait == WAIT_OBJECT_0 + HEALTH_SHUTDOWN)
+ goto clean_shutdown;
+
+ if (dwWait == WAIT_TIMEOUT) {
+ r = call_all(state, CTX_TIMER);
+ if (r < 0)
+ goto force_error_stop;
+ if (r > 0)
+ goto force_shutdown;
+ continue;
+ }
+
+ error(_("health thread wait failed [GLE %ld]"),
+ GetLastError());
+ goto force_error_stop;
+ }
+
+force_error_stop:
+ state->health_error_code = -1;
+force_shutdown:
+ ipc_server_stop_async(state->ipc_server_data);
+clean_shutdown:
+ call_all(state, CTX_TERM);
+ return;
+}
+
+void fsm_health__stop_async(struct fsmonitor_daemon_state *state)
+{
+ SetEvent(state->health_data->hHandles[HEALTH_SHUTDOWN]);
+}
diff --git a/compat/fsmonitor/fsm-health.h b/compat/fsmonitor/fsm-health.h
new file mode 100644
index 0000000..45547ba
--- /dev/null
+++ b/compat/fsmonitor/fsm-health.h
@@ -0,0 +1,47 @@
+#ifndef FSM_HEALTH_H
+#define FSM_HEALTH_H
+
+/* This needs to be implemented by each backend */
+
+#ifdef HAVE_FSMONITOR_DAEMON_BACKEND
+
+struct fsmonitor_daemon_state;
+
+/*
+ * Initialize platform-specific data for the fsmonitor health thread.
+ * This will be called from the main thread PRIOR to staring the
+ * thread.
+ *
+ * Returns 0 if successful.
+ * Returns -1 otherwise.
+ */
+int fsm_health__ctor(struct fsmonitor_daemon_state *state);
+
+/*
+ * Cleanup platform-specific data for the health thread.
+ * This will be called from the main thread AFTER joining the thread.
+ */
+void fsm_health__dtor(struct fsmonitor_daemon_state *state);
+
+/*
+ * The main body of the platform-specific event loop to monitor the
+ * health of the daemon process. This will run in the health thread.
+ *
+ * The health thread should call `ipc_server_stop_async()` if it needs
+ * to cause a shutdown. (It should NOT do so if it receives a shutdown
+ * shutdown signal.)
+ *
+ * It should set `state->health_error_code` to -1 if the daemon should exit
+ * with an error.
+ */
+void fsm_health__loop(struct fsmonitor_daemon_state *state);
+
+/*
+ * Gently request that the health thread shutdown.
+ * It does not wait for it to stop. The caller should do a JOIN
+ * to wait for it.
+ */
+void fsm_health__stop_async(struct fsmonitor_daemon_state *state);
+
+#endif /* HAVE_FSMONITOR_DAEMON_BACKEND */
+#endif /* FSM_HEALTH_H */
diff --git a/compat/fsmonitor/fsm-listen-darwin.c b/compat/fsmonitor/fsm-listen-darwin.c
index 0741fe8..8e208e8 100644
--- a/compat/fsmonitor/fsm-listen-darwin.c
+++ b/compat/fsmonitor/fsm-listen-darwin.c
@@ -27,7 +27,7 @@
#include "fsm-listen.h"
#include "fsmonitor--daemon.h"
-struct fsmonitor_daemon_backend_data
+struct fsm_listen_data
{
CFStringRef cfsr_worktree_path;
CFStringRef cfsr_gitdir_path;
@@ -100,12 +100,17 @@ static void log_flags_set(const char *path, const FSEventStreamEventFlags flag)
if (flag & kFSEventStreamEventFlagItemCloned)
strbuf_addstr(&msg, "ItemCloned|");
- trace_printf_key(&trace_fsmonitor, "fsevent: '%s', flags=%u %s",
+ trace_printf_key(&trace_fsmonitor, "fsevent: '%s', flags=0x%x %s",
path, flag, msg.buf);
strbuf_release(&msg);
}
+static int ef_is_root_changed(const FSEventStreamEventFlags ef)
+{
+ return (ef & kFSEventStreamEventFlagRootChanged);
+}
+
static int ef_is_root_delete(const FSEventStreamEventFlags ef)
{
return (ef & kFSEventStreamEventFlagItemIsDir &&
@@ -125,6 +130,60 @@ static int ef_is_dropped(const FSEventStreamEventFlags ef)
ef & kFSEventStreamEventFlagUserDropped);
}
+/*
+ * If an `xattr` change is the only reason we received this event,
+ * then silently ignore it. Git doesn't care about xattr's. We
+ * have to be careful here because the kernel can combine multiple
+ * events for a single path. And because events always have certain
+ * bits set, such as `ItemIsFile` or `ItemIsDir`.
+ *
+ * Return 1 if we should ignore it.
+ */
+static int ef_ignore_xattr(const FSEventStreamEventFlags ef)
+{
+ static const FSEventStreamEventFlags mask =
+ kFSEventStreamEventFlagItemChangeOwner |
+ kFSEventStreamEventFlagItemCreated |
+ kFSEventStreamEventFlagItemFinderInfoMod |
+ kFSEventStreamEventFlagItemInodeMetaMod |
+ kFSEventStreamEventFlagItemModified |
+ kFSEventStreamEventFlagItemRemoved |
+ kFSEventStreamEventFlagItemRenamed |
+ kFSEventStreamEventFlagItemXattrMod |
+ kFSEventStreamEventFlagItemCloned;
+
+ return ((ef & mask) == kFSEventStreamEventFlagItemXattrMod);
+}
+
+/*
+ * On MacOS we have to adjust for Unicode composition insensitivity
+ * (where NFC and NFD spellings are not respected). The different
+ * spellings are essentially aliases regardless of how the path is
+ * actually stored on the disk.
+ *
+ * This is related to "core.precomposeUnicode" (which wants to try
+ * to hide NFD completely and treat everything as NFC). Here, we
+ * don't know what the value the client has (or will have) for this
+ * config setting when they make a query, so assume the worst and
+ * emit both when the OS gives us an NFD path.
+ */
+static void my_add_path(struct fsmonitor_batch *batch, const char *path)
+{
+ char *composed;
+
+ /* add the NFC or NFD path as received from the OS */
+ fsmonitor_batch__add_path(batch, path);
+
+ /* if NFD, also add the corresponding NFC spelling */
+ composed = (char *)precompose_string_if_needed(path);
+ if (!composed || composed == path)
+ return;
+
+ fsmonitor_batch__add_path(batch, composed);
+ free(composed);
+}
+
+
static void fsevent_callback(ConstFSEventStreamRef streamRef,
void *ctx,
size_t num_of_events,
@@ -133,7 +192,7 @@ static void fsevent_callback(ConstFSEventStreamRef streamRef,
const FSEventStreamEventId event_ids[])
{
struct fsmonitor_daemon_state *state = ctx;
- struct fsmonitor_daemon_backend_data *data = state->backend_data;
+ struct fsm_listen_data *data = state->listen_data;
char **paths = (char **)event_paths;
struct fsmonitor_batch *batch = NULL;
struct string_list cookie_list = STRING_LIST_INIT_DUP;
@@ -190,6 +249,33 @@ static void fsevent_callback(ConstFSEventStreamRef streamRef,
continue;
}
+ if (ef_is_root_changed(event_flags[k])) {
+ /*
+ * The spelling of the pathname of the root directory
+ * has changed. This includes the name of the root
+ * directory itself or of any parent directory in the
+ * path.
+ *
+ * (There may be other conditions that throw this,
+ * but I couldn't find any information on it.)
+ *
+ * Force a shutdown now and avoid things getting
+ * out of sync. The Unix domain socket is inside
+ * the .git directory and a spelling change will make
+ * it hard for clients to rendezvous with us.
+ */
+ trace_printf_key(&trace_fsmonitor,
+ "event: root changed");
+ goto force_shutdown;
+ }
+
+ if (ef_ignore_xattr(event_flags[k])) {
+ trace_printf_key(&trace_fsmonitor,
+ "ignore-xattr: '%s', flags=0x%x",
+ path_k, event_flags[k]);
+ continue;
+ }
+
switch (fsmonitor_classify_path_absolute(state, path_k)) {
case IS_INSIDE_DOT_GIT_WITH_COOKIE_PREFIX:
@@ -248,7 +334,7 @@ static void fsevent_callback(ConstFSEventStreamRef streamRef,
if (!batch)
batch = fsmonitor_batch__new();
- fsmonitor_batch__add_path(batch, rel);
+ my_add_path(batch, rel);
}
if (event_flags[k] & kFSEventStreamEventFlagItemIsDir) {
@@ -261,7 +347,7 @@ static void fsevent_callback(ConstFSEventStreamRef streamRef,
if (!batch)
batch = fsmonitor_batch__new();
- fsmonitor_batch__add_path(batch, tmp.buf);
+ my_add_path(batch, tmp.buf);
}
break;
@@ -318,11 +404,11 @@ int fsm_listen__ctor(struct fsmonitor_daemon_state *state)
NULL,
NULL
};
- struct fsmonitor_daemon_backend_data *data;
+ struct fsm_listen_data *data;
const void *dir_array[2];
CALLOC_ARRAY(data, 1);
- state->backend_data = data;
+ state->listen_data = data;
data->cfsr_worktree_path = CFStringCreateWithCString(
NULL, state->path_worktree_watch.buf, kCFStringEncodingUTF8);
@@ -342,7 +428,7 @@ int fsm_listen__ctor(struct fsmonitor_daemon_state *state)
data->cfar_paths_to_watch,
kFSEventStreamEventIdSinceNow,
0.001, flags);
- if (data->stream == NULL)
+ if (!data->stream)
goto failed;
/*
@@ -354,18 +440,18 @@ int fsm_listen__ctor(struct fsmonitor_daemon_state *state)
failed:
error(_("Unable to create FSEventStream."));
- FREE_AND_NULL(state->backend_data);
+ FREE_AND_NULL(state->listen_data);
return -1;
}
void fsm_listen__dtor(struct fsmonitor_daemon_state *state)
{
- struct fsmonitor_daemon_backend_data *data;
+ struct fsm_listen_data *data;
- if (!state || !state->backend_data)
+ if (!state || !state->listen_data)
return;
- data = state->backend_data;
+ data = state->listen_data;
if (data->stream) {
if (data->stream_started)
@@ -375,14 +461,14 @@ void fsm_listen__dtor(struct fsmonitor_daemon_state *state)
FSEventStreamRelease(data->stream);
}
- FREE_AND_NULL(state->backend_data);
+ FREE_AND_NULL(state->listen_data);
}
void fsm_listen__stop_async(struct fsmonitor_daemon_state *state)
{
- struct fsmonitor_daemon_backend_data *data;
+ struct fsm_listen_data *data;
- data = state->backend_data;
+ data = state->listen_data;
data->shutdown_style = SHUTDOWN_EVENT;
CFRunLoopStop(data->rl);
@@ -390,9 +476,9 @@ void fsm_listen__stop_async(struct fsmonitor_daemon_state *state)
void fsm_listen__loop(struct fsmonitor_daemon_state *state)
{
- struct fsmonitor_daemon_backend_data *data;
+ struct fsm_listen_data *data;
- data = state->backend_data;
+ data = state->listen_data;
data->rl = CFRunLoopGetCurrent();
@@ -409,7 +495,7 @@ void fsm_listen__loop(struct fsmonitor_daemon_state *state)
switch (data->shutdown_style) {
case FORCE_ERROR_STOP:
- state->error_code = -1;
+ state->listen_error_code = -1;
/* fall thru */
case FORCE_SHUTDOWN:
ipc_server_stop_async(state->ipc_server_data);
@@ -421,7 +507,7 @@ void fsm_listen__loop(struct fsmonitor_daemon_state *state)
return;
force_error_stop_without_loop:
- state->error_code = -1;
+ state->listen_error_code = -1;
ipc_server_stop_async(state->ipc_server_data);
return;
}
diff --git a/compat/fsmonitor/fsm-listen-win32.c b/compat/fsmonitor/fsm-listen-win32.c
index 5b928ab..03df8d9 100644
--- a/compat/fsmonitor/fsm-listen-win32.c
+++ b/compat/fsmonitor/fsm-listen-win32.c
@@ -25,6 +25,9 @@ struct one_watch
DWORD count;
struct strbuf path;
+ wchar_t wpath_longname[MAX_PATH + 1];
+ DWORD wpath_longname_len;
+
HANDLE hDir;
HANDLE hEvent;
OVERLAPPED overlapped;
@@ -34,9 +37,24 @@ struct one_watch
* need to later call GetOverlappedResult() and possibly CancelIoEx().
*/
BOOL is_active;
+
+ /*
+ * Are shortnames enabled on the containing drive? This is
+ * always true for "C:/" drives and usually never true for
+ * other drives.
+ *
+ * We only set this for the worktree because we only need to
+ * convert shortname paths to longname paths for items we send
+ * to clients. (We don't care about shortname expansion for
+ * paths inside a GITDIR because we never send them to
+ * clients.)
+ */
+ BOOL has_shortnames;
+ BOOL has_tilde;
+ wchar_t dotgit_shortname[16]; /* for 8.3 name */
};
-struct fsmonitor_daemon_backend_data
+struct fsm_listen_data
{
struct one_watch *watch_worktree;
struct one_watch *watch_gitdir;
@@ -51,17 +69,18 @@ struct fsmonitor_daemon_backend_data
};
/*
- * Convert the WCHAR path from the notification into UTF8 and
- * then normalize it.
+ * Convert the WCHAR path from the event into UTF8 and normalize it.
+ *
+ * `wpath_len` is in WCHARS not bytes.
*/
-static int normalize_path_in_utf8(FILE_NOTIFY_INFORMATION *info,
+static int normalize_path_in_utf8(wchar_t *wpath, DWORD wpath_len,
struct strbuf *normalized_path)
{
int reserve;
int len = 0;
strbuf_reset(normalized_path);
- if (!info->FileNameLength)
+ if (!wpath_len)
goto normalize;
/*
@@ -70,12 +89,12 @@ static int normalize_path_in_utf8(FILE_NOTIFY_INFORMATION *info,
* sequence of 2 UTF8 characters. That should let us
* avoid ERROR_INSUFFICIENT_BUFFER 99.9+% of the time.
*/
- reserve = info->FileNameLength + 1;
+ reserve = 2 * wpath_len + 1;
strbuf_grow(normalized_path, reserve);
for (;;) {
- len = WideCharToMultiByte(CP_UTF8, 0, info->FileName,
- info->FileNameLength / sizeof(WCHAR),
+ len = WideCharToMultiByte(CP_UTF8, 0,
+ wpath, wpath_len,
normalized_path->buf,
strbuf_avail(normalized_path) - 1,
NULL, NULL);
@@ -83,9 +102,7 @@ static int normalize_path_in_utf8(FILE_NOTIFY_INFORMATION *info,
goto normalize;
if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
error(_("[GLE %ld] could not convert path to UTF-8: '%.*ls'"),
- GetLastError(),
- (int)(info->FileNameLength / sizeof(WCHAR)),
- info->FileName);
+ GetLastError(), (int)wpath_len, wpath);
return -1;
}
@@ -98,9 +115,176 @@ normalize:
return strbuf_normalize_path(normalized_path);
}
+/*
+ * See if the worktree root directory has shortnames enabled.
+ * This will help us decide if we need to do an expensive shortname
+ * to longname conversion on every notification event.
+ *
+ * We do not want to create a file to test this, so we assume that the
+ * root directory contains a ".git" file or directory. (Our caller
+ * only calls us for the worktree root, so this should be fine.)
+ *
+ * Remember the spelling of the shortname for ".git" if it exists.
+ */
+static void check_for_shortnames(struct one_watch *watch)
+{
+ wchar_t buf_in[MAX_PATH + 1];
+ wchar_t buf_out[MAX_PATH + 1];
+ wchar_t *last;
+ wchar_t *p;
+
+ /* build L"<wt-root-path>/.git" */
+ swprintf(buf_in, ARRAY_SIZE(buf_in) - 1, L"%ls.git",
+ watch->wpath_longname);
+
+ if (!GetShortPathNameW(buf_in, buf_out, ARRAY_SIZE(buf_out)))
+ return;
+
+ /*
+ * Get the final filename component of the shortpath.
+ * We know that the path does not have a final slash.
+ */
+ for (last = p = buf_out; *p; p++)
+ if (*p == L'/' || *p == '\\')
+ last = p + 1;
+
+ if (!wcscmp(last, L".git"))
+ return;
+
+ watch->has_shortnames = 1;
+ wcsncpy(watch->dotgit_shortname, last,
+ ARRAY_SIZE(watch->dotgit_shortname));
+
+ /*
+ * The shortname for ".git" is usually of the form "GIT~1", so
+ * we should be able to avoid shortname to longname mapping on
+ * every notification event if the source string does not
+ * contain a "~".
+ *
+ * However, the documentation for GetLongPathNameW() says
+ * that there are filesystems that don't follow that pattern
+ * and warns against this optimization.
+ *
+ * Lets test this.
+ */
+ if (wcschr(watch->dotgit_shortname, L'~'))
+ watch->has_tilde = 1;
+}
+
+enum get_relative_result {
+ GRR_NO_CONVERSION_NEEDED,
+ GRR_HAVE_CONVERSION,
+ GRR_SHUTDOWN,
+};
+
+/*
+ * Info notification paths are relative to the root of the watch.
+ * If our CWD is still at the root, then we can use relative paths
+ * to convert from shortnames to longnames. If our process has a
+ * different CWD, then we need to construct an absolute path, do
+ * the conversion, and then return the root-relative portion.
+ *
+ * We use the longname form of the root as our basis and assume that
+ * it already has a trailing slash.
+ *
+ * `wpath_len` is in WCHARS not bytes.
+ */
+static enum get_relative_result get_relative_longname(
+ struct one_watch *watch,
+ const wchar_t *wpath, DWORD wpath_len,
+ wchar_t *wpath_longname, size_t bufsize_wpath_longname)
+{
+ wchar_t buf_in[2 * MAX_PATH + 1];
+ wchar_t buf_out[MAX_PATH + 1];
+ DWORD root_len;
+ DWORD out_len;
+
+ /*
+ * Build L"<wt-root-path>/<event-rel-path>"
+ * Note that the <event-rel-path> might not be null terminated
+ * so we avoid swprintf() constructions.
+ */
+ root_len = watch->wpath_longname_len;
+ if (root_len + wpath_len >= ARRAY_SIZE(buf_in)) {
+ /*
+ * This should not happen. We cannot append the observed
+ * relative path onto the end of the worktree root path
+ * without overflowing the buffer. Just give up.
+ */
+ return GRR_SHUTDOWN;
+ }
+ wcsncpy(buf_in, watch->wpath_longname, root_len);
+ wcsncpy(buf_in + root_len, wpath, wpath_len);
+ buf_in[root_len + wpath_len] = 0;
+
+ /*
+ * We don't actually know if the source pathname is a
+ * shortname or a longname. This Windows routine allows
+ * either to be given as input.
+ */
+ out_len = GetLongPathNameW(buf_in, buf_out, ARRAY_SIZE(buf_out));
+ if (!out_len) {
+ /*
+ * The shortname to longname conversion can fail for
+ * various reasons, for example if the file has been
+ * deleted. (That is, if we just received a
+ * delete-file notification event and the file is
+ * already gone, we can't ask the file system to
+ * lookup the longname for it. Likewise, for moves
+ * and renames where we are given the old name.)
+ *
+ * Since deleting or moving a file or directory by its
+ * shortname is rather obscure, I'm going ignore the
+ * failure and ask the caller to report the original
+ * relative path. This seems kinder than failing here
+ * and forcing a resync. Besides, forcing a resync on
+ * every file/directory delete would effectively
+ * cripple monitoring.
+ *
+ * We might revisit this in the future.
+ */
+ return GRR_NO_CONVERSION_NEEDED;
+ }
+
+ if (!wcscmp(buf_in, buf_out)) {
+ /*
+ * The path does not have a shortname alias.
+ */
+ return GRR_NO_CONVERSION_NEEDED;
+ }
+
+ if (wcsncmp(buf_in, buf_out, root_len)) {
+ /*
+ * The spelling of the root directory portion of the computed
+ * longname has changed. This should not happen. Basically,
+ * it means that we don't know where (without recomputing the
+ * longname of just the root directory) to split out the
+ * relative path. Since this should not happen, I'm just
+ * going to let this fail and force a shutdown (because all
+ * subsequent events are probably going to see the same
+ * mismatch).
+ */
+ return GRR_SHUTDOWN;
+ }
+
+ if (out_len - root_len >= bufsize_wpath_longname) {
+ /*
+ * This should not happen. We cannot copy the root-relative
+ * portion of the path into the provided buffer without an
+ * overrun. Just give up.
+ */
+ return GRR_SHUTDOWN;
+ }
+
+ /* Return the worktree root-relative portion of the longname. */
+
+ wcscpy(wpath_longname, buf_out + root_len);
+ return GRR_HAVE_CONVERSION;
+}
+
void fsm_listen__stop_async(struct fsmonitor_daemon_state *state)
{
- SetEvent(state->backend_data->hListener[LISTENER_SHUTDOWN]);
+ SetEvent(state->listen_data->hListener[LISTENER_SHUTDOWN]);
}
static struct one_watch *create_watch(struct fsmonitor_daemon_state *state,
@@ -111,7 +295,9 @@ static struct one_watch *create_watch(struct fsmonitor_daemon_state *state,
DWORD share_mode =
FILE_SHARE_WRITE | FILE_SHARE_READ | FILE_SHARE_DELETE;
HANDLE hDir;
- wchar_t wpath[MAX_PATH];
+ DWORD len_longname;
+ wchar_t wpath[MAX_PATH + 1];
+ wchar_t wpath_longname[MAX_PATH + 1];
if (xutftowcs_path(wpath, path) < 0) {
error(_("could not convert to wide characters: '%s'"), path);
@@ -128,6 +314,21 @@ static struct one_watch *create_watch(struct fsmonitor_daemon_state *state,
return NULL;
}
+ len_longname = GetLongPathNameW(wpath, wpath_longname,
+ ARRAY_SIZE(wpath_longname));
+ if (!len_longname) {
+ error(_("[GLE %ld] could not get longname of '%s'"),
+ GetLastError(), path);
+ CloseHandle(hDir);
+ return NULL;
+ }
+
+ if (wpath_longname[len_longname - 1] != L'/' &&
+ wpath_longname[len_longname - 1] != L'\\') {
+ wpath_longname[len_longname++] = L'/';
+ wpath_longname[len_longname] = 0;
+ }
+
CALLOC_ARRAY(watch, 1);
watch->buf_len = sizeof(watch->buffer); /* assume full MAX_RDCW_BUF */
@@ -135,6 +336,9 @@ static struct one_watch *create_watch(struct fsmonitor_daemon_state *state,
strbuf_init(&watch->path, 0);
strbuf_addstr(&watch->path, path);
+ wcscpy(watch->wpath_longname, wpath_longname);
+ watch->wpath_longname_len = len_longname;
+
watch->hDir = hDir;
watch->hEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
@@ -155,7 +359,7 @@ static void destroy_watch(struct one_watch *watch)
free(watch);
}
-static int start_rdcw_watch(struct fsmonitor_daemon_backend_data *data,
+static int start_rdcw_watch(struct fsm_listen_data *data,
struct one_watch *watch)
{
DWORD dwNotifyFilter =
@@ -220,12 +424,22 @@ static int recv_rdcw_watch(struct one_watch *watch)
}
/*
- * NEEDSWORK: If an external <gitdir> is deleted, the above
- * returns an error. I'm not sure that there's anything that
- * we can do here other than failing -- the <worktree>/.git
- * link file would be broken anyway. We might try to check
- * for that and return a better error message, but I'm not
- * sure it is worth it.
+ * GetOverlappedResult() fails if the watched directory is
+ * deleted while we were waiting for an overlapped IO to
+ * complete. The documentation did not list specific errors,
+ * but I observed ERROR_ACCESS_DENIED (0x05) errors during
+ * testing.
+ *
+ * Note that we only get notificaiton events for events
+ * *within* the directory, not *on* the directory itself.
+ * (These might be properies of the parent directory, for
+ * example).
+ *
+ * NEEDSWORK: We might try to check for the deleted directory
+ * case and return a better error message, but I'm not sure it
+ * is worth it.
+ *
+ * Shutdown if we get any error.
*/
error(_("GetOverlappedResult failed on '%s' [GLE %ld]"),
@@ -259,6 +473,62 @@ static void cancel_rdcw_watch(struct one_watch *watch)
}
/*
+ * Process a single relative pathname event.
+ * Return 1 if we should shutdown.
+ */
+static int process_1_worktree_event(
+ struct string_list *cookie_list,
+ struct fsmonitor_batch **batch,
+ const struct strbuf *path,
+ enum fsmonitor_path_type t,
+ DWORD info_action)
+{
+ const char *slash;
+
+ switch (t) {
+ case IS_INSIDE_DOT_GIT_WITH_COOKIE_PREFIX:
+ /* special case cookie files within .git */
+
+ /* Use just the filename of the cookie file. */
+ slash = find_last_dir_sep(path->buf);
+ string_list_append(cookie_list,
+ slash ? slash + 1 : path->buf);
+ break;
+
+ case IS_INSIDE_DOT_GIT:
+ /* ignore everything inside of "<worktree>/.git/" */
+ break;
+
+ case IS_DOT_GIT:
+ /* "<worktree>/.git" was deleted (or renamed away) */
+ if ((info_action == FILE_ACTION_REMOVED) ||
+ (info_action == FILE_ACTION_RENAMED_OLD_NAME)) {
+ trace2_data_string("fsmonitor", NULL,
+ "fsm-listen/dotgit",
+ "removed");
+ return 1;
+ }
+ break;
+
+ case IS_WORKDIR_PATH:
+ /* queue normal pathname */
+ if (!*batch)
+ *batch = fsmonitor_batch__new();
+ fsmonitor_batch__add_path(*batch, path->buf);
+ break;
+
+ case IS_GITDIR:
+ case IS_INSIDE_GITDIR:
+ case IS_INSIDE_GITDIR_WITH_COOKIE_PREFIX:
+ default:
+ BUG("unexpected path classification '%d' for '%s'",
+ t, path->buf);
+ }
+
+ return 0;
+}
+
+/*
* Process filesystem events that happen anywhere (recursively) under the
* <worktree> root directory. For a normal working directory, this includes
* both version controlled files and the contents of the .git/ directory.
@@ -268,12 +538,13 @@ static void cancel_rdcw_watch(struct one_watch *watch)
*/
static int process_worktree_events(struct fsmonitor_daemon_state *state)
{
- struct fsmonitor_daemon_backend_data *data = state->backend_data;
+ struct fsm_listen_data *data = state->listen_data;
struct one_watch *watch = data->watch_worktree;
struct strbuf path = STRBUF_INIT;
struct string_list cookie_list = STRING_LIST_INIT_DUP;
struct fsmonitor_batch *batch = NULL;
const char *p = watch->buffer;
+ wchar_t wpath_longname[MAX_PATH + 1];
/*
* If the kernel gets more events than will fit in the kernel
@@ -306,54 +577,64 @@ static int process_worktree_events(struct fsmonitor_daemon_state *state)
*/
for (;;) {
FILE_NOTIFY_INFORMATION *info = (void *)p;
- const char *slash;
+ wchar_t *wpath = info->FileName;
+ DWORD wpath_len = info->FileNameLength / sizeof(WCHAR);
enum fsmonitor_path_type t;
+ enum get_relative_result grr;
+
+ if (watch->has_shortnames) {
+ if (!wcscmp(wpath, watch->dotgit_shortname)) {
+ /*
+ * This event exactly matches the
+ * spelling of the shortname of
+ * ".git", so we can skip some steps.
+ *
+ * (This case is odd because the user
+ * can "rm -rf GIT~1" and we cannot
+ * use the filesystem to map it back
+ * to ".git".)
+ */
+ strbuf_reset(&path);
+ strbuf_addstr(&path, ".git");
+ t = IS_DOT_GIT;
+ goto process_it;
+ }
- strbuf_reset(&path);
- if (normalize_path_in_utf8(info, &path) == -1)
- goto skip_this_path;
-
- t = fsmonitor_classify_path_workdir_relative(path.buf);
-
- switch (t) {
- case IS_INSIDE_DOT_GIT_WITH_COOKIE_PREFIX:
- /* special case cookie files within .git */
-
- /* Use just the filename of the cookie file. */
- slash = find_last_dir_sep(path.buf);
- string_list_append(&cookie_list,
- slash ? slash + 1 : path.buf);
- break;
-
- case IS_INSIDE_DOT_GIT:
- /* ignore everything inside of "<worktree>/.git/" */
- break;
+ if (watch->has_tilde && !wcschr(wpath, L'~')) {
+ /*
+ * Shortnames on this filesystem have tildes
+ * and the notification path does not have
+ * one, so we assume that it is a longname.
+ */
+ goto normalize_it;
+ }
- case IS_DOT_GIT:
- /* "<worktree>/.git" was deleted (or renamed away) */
- if ((info->Action == FILE_ACTION_REMOVED) ||
- (info->Action == FILE_ACTION_RENAMED_OLD_NAME)) {
- trace2_data_string("fsmonitor", NULL,
- "fsm-listen/dotgit",
- "removed");
+ grr = get_relative_longname(watch, wpath, wpath_len,
+ wpath_longname,
+ ARRAY_SIZE(wpath_longname));
+ switch (grr) {
+ case GRR_NO_CONVERSION_NEEDED: /* use info buffer as is */
+ break;
+ case GRR_HAVE_CONVERSION:
+ wpath = wpath_longname;
+ wpath_len = wcslen(wpath);
+ break;
+ default:
+ case GRR_SHUTDOWN:
goto force_shutdown;
}
- break;
+ }
- case IS_WORKDIR_PATH:
- /* queue normal pathname */
- if (!batch)
- batch = fsmonitor_batch__new();
- fsmonitor_batch__add_path(batch, path.buf);
- break;
+normalize_it:
+ if (normalize_path_in_utf8(wpath, wpath_len, &path) == -1)
+ goto skip_this_path;
- case IS_GITDIR:
- case IS_INSIDE_GITDIR:
- case IS_INSIDE_GITDIR_WITH_COOKIE_PREFIX:
- default:
- BUG("unexpected path classification '%d' for '%s'",
- t, path.buf);
- }
+ t = fsmonitor_classify_path_workdir_relative(path.buf);
+
+process_it:
+ if (process_1_worktree_event(&cookie_list, &batch, &path, t,
+ info->Action))
+ goto force_shutdown;
skip_this_path:
if (!info->NextEntryOffset)
@@ -382,10 +663,13 @@ force_shutdown:
* Note that we DO NOT get filesystem events on the external <gitdir>
* itself (it is not inside something that we are watching). In particular,
* we do not get an event if the external <gitdir> is deleted.
+ *
+ * Also, we do not care about shortnames within the external <gitdir>, since
+ * we never send these paths to clients.
*/
static int process_gitdir_events(struct fsmonitor_daemon_state *state)
{
- struct fsmonitor_daemon_backend_data *data = state->backend_data;
+ struct fsm_listen_data *data = state->listen_data;
struct one_watch *watch = data->watch_gitdir;
struct strbuf path = STRBUF_INIT;
struct string_list cookie_list = STRING_LIST_INIT_DUP;
@@ -403,8 +687,10 @@ static int process_gitdir_events(struct fsmonitor_daemon_state *state)
const char *slash;
enum fsmonitor_path_type t;
- strbuf_reset(&path);
- if (normalize_path_in_utf8(info, &path) == -1)
+ if (normalize_path_in_utf8(
+ info->FileName,
+ info->FileNameLength / sizeof(WCHAR),
+ &path) == -1)
goto skip_this_path;
t = fsmonitor_classify_path_gitdir_relative(path.buf);
@@ -441,11 +727,11 @@ skip_this_path:
void fsm_listen__loop(struct fsmonitor_daemon_state *state)
{
- struct fsmonitor_daemon_backend_data *data = state->backend_data;
+ struct fsm_listen_data *data = state->listen_data;
DWORD dwWait;
int result;
- state->error_code = 0;
+ state->listen_error_code = 0;
if (start_rdcw_watch(data, data->watch_worktree) == -1)
goto force_error_stop;
@@ -510,7 +796,7 @@ void fsm_listen__loop(struct fsmonitor_daemon_state *state)
}
force_error_stop:
- state->error_code = -1;
+ state->listen_error_code = -1;
force_shutdown:
/*
@@ -527,7 +813,7 @@ clean_shutdown:
int fsm_listen__ctor(struct fsmonitor_daemon_state *state)
{
- struct fsmonitor_daemon_backend_data *data;
+ struct fsm_listen_data *data;
CALLOC_ARRAY(data, 1);
@@ -538,6 +824,8 @@ int fsm_listen__ctor(struct fsmonitor_daemon_state *state)
if (!data->watch_worktree)
goto failed;
+ check_for_shortnames(data->watch_worktree);
+
if (state->nr_paths_watching > 1) {
data->watch_gitdir = create_watch(state,
state->path_gitdir_watch.buf);
@@ -558,7 +846,7 @@ int fsm_listen__ctor(struct fsmonitor_daemon_state *state)
data->nr_listener_handles++;
}
- state->backend_data = data;
+ state->listen_data = data;
return 0;
failed:
@@ -571,16 +859,16 @@ failed:
void fsm_listen__dtor(struct fsmonitor_daemon_state *state)
{
- struct fsmonitor_daemon_backend_data *data;
+ struct fsm_listen_data *data;
- if (!state || !state->backend_data)
+ if (!state || !state->listen_data)
return;
- data = state->backend_data;
+ data = state->listen_data;
CloseHandle(data->hEventShutdown);
destroy_watch(data->watch_worktree);
destroy_watch(data->watch_gitdir);
- FREE_AND_NULL(state->backend_data);
+ FREE_AND_NULL(state->listen_data);
}
diff --git a/compat/fsmonitor/fsm-listen.h b/compat/fsmonitor/fsm-listen.h
index f053934..41650bf 100644
--- a/compat/fsmonitor/fsm-listen.h
+++ b/compat/fsmonitor/fsm-listen.h
@@ -33,7 +33,7 @@ void fsm_listen__dtor(struct fsmonitor_daemon_state *state);
* do so if the listener thread receives a normal shutdown signal from
* the IPC layer.)
*
- * It should set `state->error_code` to -1 if the daemon should exit
+ * It should set `state->listen_error_code` to -1 if the daemon should exit
* with an error.
*/
void fsm_listen__loop(struct fsmonitor_daemon_state *state);
diff --git a/compat/fsmonitor/fsm-settings-darwin.c b/compat/fsmonitor/fsm-settings-darwin.c
new file mode 100644
index 0000000..efc732c
--- /dev/null
+++ b/compat/fsmonitor/fsm-settings-darwin.c
@@ -0,0 +1,89 @@
+#include "cache.h"
+#include "config.h"
+#include "repository.h"
+#include "fsmonitor-settings.h"
+#include "fsmonitor.h"
+#include <sys/param.h>
+#include <sys/mount.h>
+
+/*
+ * [1] Remote working directories are problematic for FSMonitor.
+ *
+ * The underlying file system on the server machine and/or the remote
+ * mount type (NFS, SAMBA, etc.) dictates whether notification events
+ * are available at all to remote client machines.
+ *
+ * Kernel differences between the server and client machines also
+ * dictate the how (buffering, frequency, de-dup) the events are
+ * delivered to client machine processes.
+ *
+ * A client machine (such as a laptop) may choose to suspend/resume
+ * and it is unclear (without lots of testing) whether the watcher can
+ * resync after a resume. We might be able to treat this as a normal
+ * "events were dropped by the kernel" event and do our normal "flush
+ * and resync" --or-- we might need to close the existing (zombie?)
+ * notification fd and create a new one.
+ *
+ * In theory, the above issues need to be addressed whether we are
+ * using the Hook or IPC API.
+ *
+ * For the builtin FSMonitor, we create the Unix domain socket for the
+ * IPC in the .git directory. If the working directory is remote,
+ * then the socket will be created on the remote file system. This
+ * can fail if the remote file system does not support UDS file types
+ * (e.g. smbfs to a Windows server) or if the remote kernel does not
+ * allow a non-local process to bind() the socket. (These problems
+ * could be fixed by moving the UDS out of the .git directory and to a
+ * well-known local directory on the client machine, but care should
+ * be taken to ensure that $HOME is actually local and not a managed
+ * file share.)
+ *
+ * So (for now at least), mark remote working directories as
+ * incompatible.
+ *
+ *
+ * [2] FAT32 and NTFS working directories are problematic too.
+ *
+ * The builtin FSMonitor uses a Unix domain socket in the .git
+ * directory for IPC. These Windows drive formats do not support
+ * Unix domain sockets, so mark them as incompatible for the daemon.
+ *
+ */
+static enum fsmonitor_reason check_volume(struct repository *r)
+{
+ struct statfs fs;
+
+ if (statfs(r->worktree, &fs) == -1) {
+ int saved_errno = errno;
+ trace_printf_key(&trace_fsmonitor, "statfs('%s') failed: %s",
+ r->worktree, strerror(saved_errno));
+ errno = saved_errno;
+ return FSMONITOR_REASON_ERROR;
+ }
+
+ trace_printf_key(&trace_fsmonitor,
+ "statfs('%s') [type 0x%08x][flags 0x%08x] '%s'",
+ r->worktree, fs.f_type, fs.f_flags, fs.f_fstypename);
+
+ if (!(fs.f_flags & MNT_LOCAL))
+ return FSMONITOR_REASON_REMOTE;
+
+ if (!strcmp(fs.f_fstypename, "msdos")) /* aka FAT32 */
+ return FSMONITOR_REASON_NOSOCKETS;
+
+ if (!strcmp(fs.f_fstypename, "ntfs"))
+ return FSMONITOR_REASON_NOSOCKETS;
+
+ return FSMONITOR_REASON_OK;
+}
+
+enum fsmonitor_reason fsm_os__incompatible(struct repository *r)
+{
+ enum fsmonitor_reason reason;
+
+ reason = check_volume(r);
+ if (reason != FSMONITOR_REASON_OK)
+ return reason;
+
+ return FSMONITOR_REASON_OK;
+}
diff --git a/compat/fsmonitor/fsm-settings-win32.c b/compat/fsmonitor/fsm-settings-win32.c
new file mode 100644
index 0000000..9076557
--- /dev/null
+++ b/compat/fsmonitor/fsm-settings-win32.c
@@ -0,0 +1,137 @@
+#include "cache.h"
+#include "config.h"
+#include "repository.h"
+#include "fsmonitor-settings.h"
+#include "fsmonitor.h"
+
+/*
+ * VFS for Git is incompatible with FSMonitor.
+ *
+ * Granted, core Git does not know anything about VFS for Git and we
+ * shouldn't make assumptions about a downstream feature, but users
+ * can install both versions. And this can lead to incorrect results
+ * from core Git commands. So, without bringing in any of the VFS for
+ * Git code, do a simple config test for a published config setting.
+ * (We do not look at the various *_TEST_* environment variables.)
+ */
+static enum fsmonitor_reason check_vfs4git(struct repository *r)
+{
+ const char *const_str;
+
+ if (!repo_config_get_value(r, "core.virtualfilesystem", &const_str))
+ return FSMONITOR_REASON_VFS4GIT;
+
+ return FSMONITOR_REASON_OK;
+}
+
+/*
+ * Remote working directories are problematic for FSMonitor.
+ *
+ * The underlying file system on the server machine and/or the remote
+ * mount type dictates whether notification events are available at
+ * all to remote client machines.
+ *
+ * Kernel differences between the server and client machines also
+ * dictate the how (buffering, frequency, de-dup) the events are
+ * delivered to client machine processes.
+ *
+ * A client machine (such as a laptop) may choose to suspend/resume
+ * and it is unclear (without lots of testing) whether the watcher can
+ * resync after a resume. We might be able to treat this as a normal
+ * "events were dropped by the kernel" event and do our normal "flush
+ * and resync" --or-- we might need to close the existing (zombie?)
+ * notification fd and create a new one.
+ *
+ * In theory, the above issues need to be addressed whether we are
+ * using the Hook or IPC API.
+ *
+ * So (for now at least), mark remote working directories as
+ * incompatible.
+ *
+ * Notes for testing:
+ *
+ * (a) Windows allows a network share to be mapped to a drive letter.
+ * (This is the normal method to access it.)
+ *
+ * $ NET USE Z: \\server\share
+ * $ git -C Z:/repo status
+ *
+ * (b) Windows allows a network share to be referenced WITHOUT mapping
+ * it to drive letter.
+ *
+ * $ NET USE \\server\share\dir
+ * $ git -C //server/share/repo status
+ *
+ * (c) Windows allows "SUBST" to create a fake drive mapping to an
+ * arbitrary path (which may be remote)
+ *
+ * $ SUBST Q: Z:\repo
+ * $ git -C Q:/ status
+ *
+ * (d) Windows allows a directory symlink to be created on a local
+ * file system that points to a remote repo.
+ *
+ * $ mklink /d ./link //server/share/repo
+ * $ git -C ./link status
+ */
+static enum fsmonitor_reason check_remote(struct repository *r)
+{
+ wchar_t wpath[MAX_PATH];
+ wchar_t wfullpath[MAX_PATH];
+ size_t wlen;
+ UINT driveType;
+
+ /*
+ * Do everything in wide chars because the drive letter might be
+ * a multi-byte sequence. See win32_has_dos_drive_prefix().
+ */
+ if (xutftowcs_path(wpath, r->worktree) < 0)
+ return FSMONITOR_REASON_ERROR;
+
+ /*
+ * GetDriveTypeW() requires a final slash. We assume that the
+ * worktree pathname points to an actual directory.
+ */
+ wlen = wcslen(wpath);
+ if (wpath[wlen - 1] != L'\\' && wpath[wlen - 1] != L'/') {
+ wpath[wlen++] = L'\\';
+ wpath[wlen] = 0;
+ }
+
+ /*
+ * Normalize the path. If nothing else, this converts forward
+ * slashes to backslashes. This is essential to get GetDriveTypeW()
+ * correctly handle some UNC "\\server\share\..." paths.
+ */
+ if (!GetFullPathNameW(wpath, MAX_PATH, wfullpath, NULL))
+ return FSMONITOR_REASON_ERROR;
+
+ driveType = GetDriveTypeW(wfullpath);
+ trace_printf_key(&trace_fsmonitor,
+ "DriveType '%s' L'%ls' (%u)",
+ r->worktree, wfullpath, driveType);
+
+ if (driveType == DRIVE_REMOTE) {
+ trace_printf_key(&trace_fsmonitor,
+ "check_remote('%s') true",
+ r->worktree);
+ return FSMONITOR_REASON_REMOTE;
+ }
+
+ return FSMONITOR_REASON_OK;
+}
+
+enum fsmonitor_reason fsm_os__incompatible(struct repository *r)
+{
+ enum fsmonitor_reason reason;
+
+ reason = check_vfs4git(r);
+ if (reason != FSMONITOR_REASON_OK)
+ return reason;
+
+ reason = check_remote(r);
+ if (reason != FSMONITOR_REASON_OK)
+ return reason;
+
+ return FSMONITOR_REASON_OK;
+}
diff --git a/compat/mingw.c b/compat/mingw.c
index 6fe80fd..2607de9 100644
--- a/compat/mingw.c
+++ b/compat/mingw.c
@@ -1060,7 +1060,7 @@ char *mingw_mktemp(char *template)
int mkstemp(char *template)
{
char *filename = mktemp(template);
- if (filename == NULL)
+ if (!filename)
return -1;
return open(filename, O_RDWR | O_CREAT, 0600);
}
@@ -2332,7 +2332,7 @@ int setitimer(int type, struct itimerval *in, struct itimerval *out)
static const struct timeval zero;
static int atexit_done;
- if (out != NULL)
+ if (out)
return errno = EINVAL,
error("setitimer param 3 != NULL not implemented");
if (!is_timeval_eq(&in->it_interval, &zero) &&
@@ -2361,7 +2361,7 @@ int sigaction(int sig, struct sigaction *in, struct sigaction *out)
if (sig != SIGALRM)
return errno = EINVAL,
error("sigaction only implemented for SIGALRM");
- if (out != NULL)
+ if (out)
return errno = EINVAL,
error("sigaction: param 3 != NULL not implemented");
@@ -2830,7 +2830,7 @@ not_a_reserved_name:
}
c = path[i];
- if (c && c != '.' && c != ':' && c != '/' && c != '\\')
+ if (c && c != '.' && c != ':' && !is_xplatform_dir_sep(c))
goto not_a_reserved_name;
/* contains reserved name */
diff --git a/compat/mingw.h b/compat/mingw.h
index 494cc8d..a74da68 100644
--- a/compat/mingw.h
+++ b/compat/mingw.h
@@ -332,6 +332,9 @@ int mingw_getpagesize(void);
int win32_fsync_no_flush(int fd);
#define fsync_no_flush win32_fsync_no_flush
+#define FSYNC_COMPONENTS_PLATFORM_DEFAULT (FSYNC_COMPONENTS_DEFAULT | FSYNC_COMPONENT_LOOSE_OBJECT)
+#define FSYNC_METHOD_DEFAULT (FSYNC_METHOD_BATCH)
+
struct rlimit {
unsigned int rlim_cur;
};
diff --git a/compat/mkdir.c b/compat/mkdir.c
index 9e253fb..02aea3b 100644
--- a/compat/mkdir.c
+++ b/compat/mkdir.c
@@ -9,7 +9,7 @@ int compat_mkdir_wo_trailing_slash(const char *dir, mode_t mode)
size_t len = strlen(dir);
if (len && dir[len-1] == '/') {
- if ((tmp_dir = strdup(dir)) == NULL)
+ if (!(tmp_dir = strdup(dir)))
return -1;
tmp_dir[len-1] = '\0';
}
diff --git a/compat/mmap.c b/compat/mmap.c
index 8d6c02d..2fe1c77 100644
--- a/compat/mmap.c
+++ b/compat/mmap.c
@@ -13,7 +13,7 @@ void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t of
}
start = malloc(length);
- if (start == NULL) {
+ if (!start) {
errno = ENOMEM;
return MAP_FAILED;
}
diff --git a/compat/nedmalloc/nedmalloc.c b/compat/nedmalloc/nedmalloc.c
index edb438a..2c0ace7 100644
--- a/compat/nedmalloc/nedmalloc.c
+++ b/compat/nedmalloc/nedmalloc.c
@@ -323,7 +323,6 @@ static NOINLINE void RemoveCacheEntries(nedpool *p, threadcache *tc, unsigned in
}
static void DestroyCaches(nedpool *p) THROWSPEC
{
- if(p->caches)
{
threadcache *tc;
int n;
diff --git a/compat/win32/path-utils.h b/compat/win32/path-utils.h
index bba2b64..65fa3b9 100644
--- a/compat/win32/path-utils.h
+++ b/compat/win32/path-utils.h
@@ -6,11 +6,7 @@ int win32_has_dos_drive_prefix(const char *path);
int win32_skip_dos_drive_prefix(char **path);
#define skip_dos_drive_prefix win32_skip_dos_drive_prefix
-static inline int win32_is_dir_sep(int c)
-{
- return c == '/' || c == '\\';
-}
-#define is_dir_sep win32_is_dir_sep
+#define is_dir_sep is_xplatform_dir_sep
static inline char *win32_find_last_dir_sep(const char *path)
{
char *ret = NULL;
diff --git a/compat/win32/syslog.c b/compat/win32/syslog.c
index 161978d..1f8d893 100644
--- a/compat/win32/syslog.c
+++ b/compat/win32/syslog.c
@@ -43,6 +43,7 @@ void syslog(int priority, const char *fmt, ...)
va_end(ap);
while ((pos = strstr(str, "%1")) != NULL) {
+ size_t offset = pos - str;
char *oldstr = str;
str = realloc(str, st_add(++str_len, 1));
if (!str) {
@@ -50,6 +51,7 @@ void syslog(int priority, const char *fmt, ...)
warning_errno("realloc failed");
return;
}
+ pos = str + offset;
memmove(pos + 2, pos + 1, strlen(pos));
pos[1] = ' ';
}
diff --git a/config.c b/config.c
index a5e11aa..9b0e9c9 100644
--- a/config.c
+++ b/config.c
@@ -1342,7 +1342,7 @@ static const struct fsync_component_name {
static enum fsync_component parse_fsync_components(const char *var, const char *string)
{
- enum fsync_component current = FSYNC_COMPONENTS_DEFAULT;
+ enum fsync_component current = FSYNC_COMPONENTS_PLATFORM_DEFAULT;
enum fsync_component positive = 0, negative = 0;
while (string) {
@@ -1688,6 +1688,8 @@ static int git_default_core_config(const char *var, const char *value, void *cb)
fsync_method = FSYNC_METHOD_FSYNC;
else if (!strcmp(value, "writeout-only"))
fsync_method = FSYNC_METHOD_WRITEOUT_ONLY;
+ else if (!strcmp(value, "batch"))
+ fsync_method = FSYNC_METHOD_BATCH;
else
warning(_("ignoring unknown core.fsyncMethod value '%s'"), value);
@@ -1781,6 +1783,9 @@ static int git_default_branch_config(const char *var, const char *value)
} else if (value && !strcmp(value, "inherit")) {
git_branch_track = BRANCH_TRACK_INHERIT;
return 0;
+ } else if (value && !strcmp(value, "simple")) {
+ git_branch_track = BRANCH_TRACK_SIMPLE;
+ return 0;
}
git_branch_track = git_config_bool(var, value);
return 0;
@@ -3190,7 +3195,7 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
goto out_free;
}
/* if nothing to unset, error out */
- if (value == NULL) {
+ if (!value) {
ret = CONFIG_NOTHING_SET;
goto out_free;
}
@@ -3206,7 +3211,7 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
int i, new_line = 0;
struct config_options opts;
- if (value_pattern == NULL)
+ if (!value_pattern)
store.value_pattern = NULL;
else if (value_pattern == CONFIG_REGEX_NONE)
store.value_pattern = CONFIG_REGEX_NONE;
@@ -3346,7 +3351,7 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
}
/* write the pair (value == NULL means unset) */
- if (value != NULL) {
+ if (value) {
if (!store.section_seen) {
if (write_section(fd, key, &store) < 0)
goto write_err_out;
@@ -3567,7 +3572,7 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
offset = section_name_match(&buf[i], old_name);
if (offset > 0) {
ret++;
- if (new_name == NULL) {
+ if (!new_name) {
remove = 1;
continue;
}
diff --git a/config.mak.dev b/config.mak.dev
index c3104f4..335efd4 100644
--- a/config.mak.dev
+++ b/config.mak.dev
@@ -68,7 +68,6 @@ endif
# https://bugzilla.redhat.com/show_bug.cgi?id=2075786
ifneq ($(filter gcc12,$(COMPILER_FEATURES)),)
DEVELOPER_CFLAGS += -Wno-error=stringop-overread
-DEVELOPER_CFLAGS += -Wno-error=dangling-pointer
endif
GIT_TEST_PERL_FATAL_WARNINGS = YesPlease
diff --git a/config.mak.uname b/config.mak.uname
index 259d151..ce83cad 100644
--- a/config.mak.uname
+++ b/config.mak.uname
@@ -164,6 +164,7 @@ ifeq ($(uname_S),Darwin)
ifndef NO_PTHREADS
ifndef NO_UNIX_SOCKETS
FSMONITOR_DAEMON_BACKEND = darwin
+ FSMONITOR_OS_SETTINGS = darwin
endif
endif
@@ -451,6 +452,8 @@ ifeq ($(uname_S),Windows)
# These are always available, so we do not have to conditionally
# support it.
FSMONITOR_DAEMON_BACKEND = win32
+ FSMONITOR_OS_SETTINGS = win32
+
NO_SVN_TESTS = YesPlease
RUNTIME_PREFIX = YesPlease
HAVE_WPGMPTR = YesWeDo
@@ -641,6 +644,8 @@ ifeq ($(uname_S),MINGW)
# These are always available, so we do not have to conditionally
# support it.
FSMONITOR_DAEMON_BACKEND = win32
+ FSMONITOR_OS_SETTINGS = win32
+
RUNTIME_PREFIX = YesPlease
HAVE_WPGMPTR = YesWeDo
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
diff --git a/configure.ac b/configure.ac
index 316a31d..7dcd048 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1189,9 +1189,6 @@ AC_COMPILE_IFELSE([BSD_SYSCTL_SRC],
GIT_CONF_SUBST([HAVE_BSD_SYSCTL])
## Other checks.
-# Define USE_PIC if you need the main git objects to be built with -fPIC
-# in order to build and link perl/Git.so. x86-64 seems to need this.
-#
# Define NO_SYMLINK_HEAD if you never want .git/HEAD to be a symbolic link.
# Enable it on Windows. By default, symrefs are still used.
#
diff --git a/connect.c b/connect.c
index afc79a6..5ea53de 100644
--- a/connect.c
+++ b/connect.c
@@ -473,6 +473,24 @@ void check_stateless_delimiter(int stateless_rpc,
die("%s", error);
}
+static void send_capabilities(int fd_out, struct packet_reader *reader)
+{
+ const char *hash_name;
+
+ if (server_supports_v2("agent", 0))
+ packet_write_fmt(fd_out, "agent=%s", git_user_agent_sanitized());
+
+ if (server_feature_v2("object-format", &hash_name)) {
+ int hash_algo = hash_algo_by_name(hash_name);
+ if (hash_algo == GIT_HASH_UNKNOWN)
+ die(_("unknown object format '%s' specified by server"), hash_name);
+ reader->hash_algo = &hash_algos[hash_algo];
+ packet_write_fmt(fd_out, "object-format=%s", reader->hash_algo->name);
+ } else {
+ reader->hash_algo = &hash_algos[GIT_HASH_SHA1];
+ }
+}
+
struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
struct ref **list, int for_push,
struct transport_ls_refs_options *transport_options,
@@ -480,7 +498,6 @@ struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
int stateless_rpc)
{
int i;
- const char *hash_name;
struct strvec *ref_prefixes = transport_options ?
&transport_options->ref_prefixes : NULL;
const char **unborn_head_target = transport_options ?
@@ -490,18 +507,8 @@ struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
if (server_supports_v2("ls-refs", 1))
packet_write_fmt(fd_out, "command=ls-refs\n");
- if (server_supports_v2("agent", 0))
- packet_write_fmt(fd_out, "agent=%s", git_user_agent_sanitized());
-
- if (server_feature_v2("object-format", &hash_name)) {
- int hash_algo = hash_algo_by_name(hash_name);
- if (hash_algo == GIT_HASH_UNKNOWN)
- die(_("unknown object format '%s' specified by server"), hash_name);
- reader->hash_algo = &hash_algos[hash_algo];
- packet_write_fmt(fd_out, "object-format=%s", reader->hash_algo->name);
- } else {
- reader->hash_algo = &hash_algos[GIT_HASH_SHA1];
- }
+ /* Send capabilities */
+ send_capabilities(fd_out, reader);
if (server_options && server_options->nr &&
server_supports_v2("server-option", 1))
@@ -1327,7 +1334,7 @@ static void fill_ssh_args(struct child_process *conn, const char *ssh_host,
strvec_push(&detect.args, ssh);
strvec_push(&detect.args, "-G");
- push_ssh_options(&detect.args, &detect.env_array,
+ push_ssh_options(&detect.args, &detect.env,
VARIANT_SSH, port, version, flags);
strvec_push(&detect.args, ssh_host);
@@ -1335,7 +1342,8 @@ static void fill_ssh_args(struct child_process *conn, const char *ssh_host,
}
strvec_push(&conn->args, ssh);
- push_ssh_options(&conn->args, &conn->env_array, variant, port, version, flags);
+ push_ssh_options(&conn->args, &conn->env, variant, port, version,
+ flags);
strvec_push(&conn->args, ssh_host);
}
@@ -1397,7 +1405,7 @@ struct child_process *git_connect(int fd[2], const char *url,
/* remove repo-local variables from the environment */
for (var = local_repo_env; *var; var++)
- strvec_push(&conn->env_array, *var);
+ strvec_push(&conn->env, *var);
conn->use_shell = 1;
conn->in = conn->out = -1;
@@ -1429,7 +1437,7 @@ struct child_process *git_connect(int fd[2], const char *url,
transport_check_allowed("file");
conn->trace2_child_class = "transport/file";
if (version > 0) {
- strvec_pushf(&conn->env_array,
+ strvec_pushf(&conn->env,
GIT_PROTOCOL_ENVIRONMENT "=version=%d",
version);
}
diff --git a/connected.c b/connected.c
index ed3025e..74a20cb 100644
--- a/connected.c
+++ b/connected.c
@@ -110,7 +110,7 @@ no_promisor_pack_found:
rev_list.git_cmd = 1;
if (opt->env)
- strvec_pushv(&rev_list.env_array, opt->env);
+ strvec_pushv(&rev_list.env, opt->env);
rev_list.in = -1;
rev_list.no_stdout = 1;
if (opt->err_fd)
diff --git a/contrib/buildsystems/CMakeLists.txt b/contrib/buildsystems/CMakeLists.txt
index 185f56f..1b23f24 100644
--- a/contrib/buildsystems/CMakeLists.txt
+++ b/contrib/buildsystems/CMakeLists.txt
@@ -54,7 +54,7 @@ set(CMAKE_SOURCE_DIR ${CMAKE_CURRENT_LIST_DIR}/../..)
option(USE_VCPKG "Whether or not to use vcpkg for obtaining dependencies. Only applicable to Windows platforms" ON)
if(NOT WIN32)
- set(USE_VCPKG OFF CACHE BOOL FORCE)
+ set(USE_VCPKG OFF CACHE BOOL "" FORCE)
endif()
if(NOT DEFINED CMAKE_EXPORT_COMPILE_COMMANDS)
@@ -108,7 +108,6 @@ project(git
#TODO gitk git-gui gitweb
#TODO Enable NLS on windows natively
-#TODO Add pcre support
#macros for parsing the Makefile for sources and scripts
macro(parse_makefile_for_sources list_var regex)
@@ -160,6 +159,14 @@ if(NOT (WIN32 AND (CMAKE_C_COMPILER_ID STREQUAL "MSVC" OR CMAKE_C_COMPILER_ID ST
find_package(Intl)
endif()
+find_package(PkgConfig)
+if(PkgConfig_FOUND)
+ pkg_check_modules(PCRE2 libpcre2-8)
+ if(PCRE2_FOUND)
+ add_compile_definitions(USE_LIBPCRE2)
+ endif()
+endif()
+
if(NOT Intl_FOUND)
add_compile_definitions(NO_GETTEXT)
if(NOT Iconv_FOUND)
@@ -180,6 +187,9 @@ endif()
if(Intl_FOUND)
include_directories(SYSTEM ${Intl_INCLUDE_DIRS})
endif()
+if(PCRE2_FOUND)
+ include_directories(SYSTEM ${PCRE2_INCLUDE_DIRS})
+endif()
if(WIN32 AND NOT MSVC)#not required for visual studio builds
@@ -260,7 +270,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
_CONSOLE DETECT_MSYS_TTY STRIP_EXTENSION=".exe" NO_SYMLINK_HEAD UNRELIABLE_FSTAT
NOGDI OBJECT_CREATION_MODE=1 __USE_MINGW_ANSI_STDIO=0
USE_NED_ALLOCATOR OVERRIDE_STRDUP MMAP_PREVENTS_DELETE USE_WIN32_MMAP
- UNICODE _UNICODE HAVE_WPGMPTR ENSURE_MSYSTEM_IS_SET HAVE_RTLGENRANDOM)
+ HAVE_WPGMPTR ENSURE_MSYSTEM_IS_SET HAVE_RTLGENRANDOM)
list(APPEND compat_SOURCES
compat/mingw.c
compat/winansi.c
@@ -277,7 +287,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux")
add_compile_definitions(PROCFS_EXECUTABLE_PATH="/proc/self/exe" HAVE_DEV_TTY )
- list(APPEND compat_SOURCES unix-socket.c unix-stream-server.c)
+ list(APPEND compat_SOURCES unix-socket.c unix-stream-server.c compat/linux/procinfo.c)
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
@@ -297,9 +307,17 @@ if(SUPPORTS_SIMPLE_IPC)
if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
add_compile_definitions(HAVE_FSMONITOR_DAEMON_BACKEND)
list(APPEND compat_SOURCES compat/fsmonitor/fsm-listen-win32.c)
+ list(APPEND compat_SOURCES compat/fsmonitor/fsm-health-win32.c)
+
+ add_compile_definitions(HAVE_FSMONITOR_OS_SETTINGS)
+ list(APPEND compat_SOURCES compat/fsmonitor/fsm-settings-win32.c)
elseif(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
add_compile_definitions(HAVE_FSMONITOR_DAEMON_BACKEND)
list(APPEND compat_SOURCES compat/fsmonitor/fsm-listen-darwin.c)
+ list(APPEND compat_SOURCES compat/fsmonitor/fsm-health-darwin.c)
+
+ add_compile_definitions(HAVE_FSMONITOR_OS_SETTINGS)
+ list(APPEND compat_SOURCES compat/fsmonitor/fsm-settings-darwin.c)
endif()
endif()
@@ -700,6 +718,10 @@ endif()
if(Iconv_FOUND)
target_link_libraries(common-main ${Iconv_LIBRARIES})
endif()
+if(PCRE2_FOUND)
+ target_link_libraries(common-main ${PCRE2_LIBRARIES})
+ target_link_directories(common-main PUBLIC ${PCRE2_LIBRARY_DIRS})
+endif()
if(WIN32)
target_link_libraries(common-main ws2_32 ntdll ${CMAKE_BINARY_DIR}/git.res)
add_dependencies(common-main git-rc)
diff --git a/contrib/coccinelle/equals-null.cocci b/contrib/coccinelle/equals-null.cocci
new file mode 100644
index 0000000..92c7054
--- /dev/null
+++ b/contrib/coccinelle/equals-null.cocci
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+@@
+expression e;
+statement s;
+@@
+if (
+(
+!e
+|
+- e == NULL
++ !e
+)
+ )
+ {...}
+else s
+
+@@
+expression e;
+statement s;
+@@
+if (
+(
+e
+|
+- e != NULL
++ e
+)
+ )
+ {...}
+else s
diff --git a/contrib/coccinelle/free.cocci b/contrib/coccinelle/free.cocci
index 4490069..6fb9eb6 100644
--- a/contrib/coccinelle/free.cocci
+++ b/contrib/coccinelle/free.cocci
@@ -2,13 +2,21 @@
expression E;
@@
- if (E)
+(
free(E);
+|
+ free_commit_list(E);
+)
@@
expression E;
@@
- if (!E)
+(
free(E);
+|
+ free_commit_list(E);
+)
@@
expression E;
@@ -16,3 +24,22 @@ expression E;
- free(E);
+ FREE_AND_NULL(E);
- E = NULL;
+
+@@
+expression E;
+@@
+- if (E)
+- {
+ free_commit_list(E);
+ E = NULL;
+- }
+
+@@
+expression E;
+statement S;
+@@
+- if (E) {
++ if (E)
+ S
+ free_commit_list(E);
+- }
diff --git a/contrib/coccinelle/object_id.cocci b/contrib/coccinelle/object_id.cocci
index ddf4f22..01f8d69 100644
--- a/contrib/coccinelle/object_id.cocci
+++ b/contrib/coccinelle/object_id.cocci
@@ -1,18 +1,6 @@
@@
struct object_id OID;
@@
-- is_null_sha1(OID.hash)
-+ is_null_oid(&OID)
-
-@@
-struct object_id *OIDPTR;
-@@
-- is_null_sha1(OIDPTR->hash)
-+ is_null_oid(OIDPTR)
-
-@@
-struct object_id OID;
-@@
- hashclr(OID.hash)
+ oidclr(&OID)
diff --git a/contrib/coccinelle/the_repository.pending.cocci b/contrib/coccinelle/the_repository.pending.cocci
index 2ee702e..072ea0d 100644
--- a/contrib/coccinelle/the_repository.pending.cocci
+++ b/contrib/coccinelle/the_repository.pending.cocci
@@ -14,21 +14,6 @@ expression G;
@@
expression E;
@@
-- has_sha1_file(
-+ repo_has_sha1_file(the_repository,
- E)
-
-@@
-expression E;
-expression F;
-@@
-- has_sha1_file_with_flags(
-+ repo_has_sha1_file_with_flags(the_repository,
- E)
-
-@@
-expression E;
-@@
- has_object_file(
+ repo_has_object_file(the_repository,
E)
diff --git a/contrib/coccinelle/xstrdup_or_null.cocci b/contrib/coccinelle/xstrdup_or_null.cocci
index 8e05d1c..9c1d293 100644
--- a/contrib/coccinelle/xstrdup_or_null.cocci
+++ b/contrib/coccinelle/xstrdup_or_null.cocci
@@ -1,13 +1,5 @@
@@
expression E;
-expression V;
-@@
-- if (E)
-- V = xstrdup(E);
-+ V = xstrdup_or_null(E);
-
-@@
-expression E;
@@
- xstrdup(absolute_path(E))
+ absolute_pathdup(E)
diff --git a/contrib/completion/git-prompt.sh b/contrib/completion/git-prompt.sh
index 87b2b91..1435548 100644
--- a/contrib/completion/git-prompt.sh
+++ b/contrib/completion/git-prompt.sh
@@ -245,7 +245,8 @@ __git_ps1_show_upstream ()
# Helper function that is meant to be called from __git_ps1. It
# injects color codes into the appropriate gitstring variables used
-# to build a gitstring.
+# to build a gitstring. Colored variables are responsible for clearing
+# their own color.
__git_ps1_colorize_gitstring ()
{
if [[ -n ${ZSH_VERSION-} ]]; then
@@ -271,22 +272,23 @@ __git_ps1_colorize_gitstring ()
else
branch_color="$bad_color"
fi
- c="$branch_color$c"
+ if [ -n "$c" ]; then
+ c="$branch_color$c$c_clear"
+ fi
+ b="$branch_color$b$c_clear"
- z="$c_clear$z"
- if [ "$w" = "*" ]; then
- w="$bad_color$w"
+ if [ -n "$w" ]; then
+ w="$bad_color$w$c_clear"
fi
if [ -n "$i" ]; then
- i="$ok_color$i"
+ i="$ok_color$i$c_clear"
fi
if [ -n "$s" ]; then
- s="$flags_color$s"
+ s="$flags_color$s$c_clear"
fi
if [ -n "$u" ]; then
- u="$bad_color$u"
+ u="$bad_color$u$c_clear"
fi
- r="$c_clear$r"
}
# Helper function to read the first line of a file into a variable.
@@ -556,6 +558,12 @@ __git_ps1 ()
local z="${GIT_PS1_STATESEPARATOR-" "}"
+ b=${b##refs/heads/}
+ if [ $pcmode = yes ] && [ $ps1_expanded = yes ]; then
+ __git_ps1_branch_name=$b
+ b="\${__git_ps1_branch_name}"
+ fi
+
# NO color option unless in PROMPT_COMMAND mode or it's Zsh
if [ -n "${GIT_PS1_SHOWCOLORHINTS-}" ]; then
if [ $pcmode = yes ] || [ -n "${ZSH_VERSION-}" ]; then
@@ -563,12 +571,6 @@ __git_ps1 ()
fi
fi
- b=${b##refs/heads/}
- if [ $pcmode = yes ] && [ $ps1_expanded = yes ]; then
- __git_ps1_branch_name=$b
- b="\${__git_ps1_branch_name}"
- fi
-
local f="$h$w$i$s$u$p"
local gitstring="$c$b${f:+$z$f}${sparse}$r${upstream}"
diff --git a/contrib/scalar/scalar.c b/contrib/scalar/scalar.c
index 58ca0e5..2817691 100644
--- a/contrib/scalar/scalar.c
+++ b/contrib/scalar/scalar.c
@@ -11,6 +11,8 @@
#include "dir.h"
#include "packfile.h"
#include "help.h"
+#include "archive.h"
+#include "object-store.h"
/*
* Remove the deepest subdirectory in the provided path string. Path must not
@@ -43,9 +45,11 @@ static void setup_enlistment_directory(int argc, const char **argv,
usage_with_options(usagestr, options);
/* find the worktree, determine its corresponding root */
- if (argc == 1)
+ if (argc == 1) {
strbuf_add_absolute_path(&path, argv[0]);
- else if (strbuf_getcwd(&path) < 0)
+ if (!is_directory(path.buf))
+ die(_("'%s' does not exist"), path.buf);
+ } else if (strbuf_getcwd(&path) < 0)
die(_("need a working directory"));
strbuf_trim_trailing_dir_sep(&path);
@@ -258,6 +262,99 @@ static int unregister_dir(void)
return res;
}
+static int add_directory_to_archiver(struct strvec *archiver_args,
+ const char *path, int recurse)
+{
+ int at_root = !*path;
+ DIR *dir = opendir(at_root ? "." : path);
+ struct dirent *e;
+ struct strbuf buf = STRBUF_INIT;
+ size_t len;
+ int res = 0;
+
+ if (!dir)
+ return error_errno(_("could not open directory '%s'"), path);
+
+ if (!at_root)
+ strbuf_addf(&buf, "%s/", path);
+ len = buf.len;
+ strvec_pushf(archiver_args, "--prefix=%s", buf.buf);
+
+ while (!res && (e = readdir(dir))) {
+ if (!strcmp(".", e->d_name) || !strcmp("..", e->d_name))
+ continue;
+
+ strbuf_setlen(&buf, len);
+ strbuf_addstr(&buf, e->d_name);
+
+ if (e->d_type == DT_REG)
+ strvec_pushf(archiver_args, "--add-file=%s", buf.buf);
+ else if (e->d_type != DT_DIR)
+ warning(_("skipping '%s', which is neither file nor "
+ "directory"), buf.buf);
+ else if (recurse &&
+ add_directory_to_archiver(archiver_args,
+ buf.buf, recurse) < 0)
+ res = -1;
+ }
+
+ closedir(dir);
+ strbuf_release(&buf);
+ return res;
+}
+
+#ifndef WIN32
+#include <sys/statvfs.h>
+#endif
+
+static int get_disk_info(struct strbuf *out)
+{
+#ifdef WIN32
+ struct strbuf buf = STRBUF_INIT;
+ char volume_name[MAX_PATH], fs_name[MAX_PATH];
+ DWORD serial_number, component_length, flags;
+ ULARGE_INTEGER avail2caller, total, avail;
+
+ strbuf_realpath(&buf, ".", 1);
+ if (!GetDiskFreeSpaceExA(buf.buf, &avail2caller, &total, &avail)) {
+ error(_("could not determine free disk size for '%s'"),
+ buf.buf);
+ strbuf_release(&buf);
+ return -1;
+ }
+
+ strbuf_setlen(&buf, offset_1st_component(buf.buf));
+ if (!GetVolumeInformationA(buf.buf, volume_name, sizeof(volume_name),
+ &serial_number, &component_length, &flags,
+ fs_name, sizeof(fs_name))) {
+ error(_("could not get info for '%s'"), buf.buf);
+ strbuf_release(&buf);
+ return -1;
+ }
+ strbuf_addf(out, "Available space on '%s': ", buf.buf);
+ strbuf_humanise_bytes(out, avail2caller.QuadPart);
+ strbuf_addch(out, '\n');
+ strbuf_release(&buf);
+#else
+ struct strbuf buf = STRBUF_INIT;
+ struct statvfs stat;
+
+ strbuf_realpath(&buf, ".", 1);
+ if (statvfs(buf.buf, &stat) < 0) {
+ error_errno(_("could not determine free disk size for '%s'"),
+ buf.buf);
+ strbuf_release(&buf);
+ return -1;
+ }
+
+ strbuf_addf(out, "Available space on '%s': ", buf.buf);
+ strbuf_humanise_bytes(out, st_mult(stat.f_bsize, stat.f_bavail));
+ strbuf_addf(out, " (mount flags 0x%lx)\n", stat.f_flag);
+ strbuf_release(&buf);
+#endif
+ return 0;
+}
+
/* printf-style interface, expects `<key>=<value>` argument */
static int set_config(const char *fmt, ...)
{
@@ -498,6 +595,196 @@ cleanup:
return res;
}
+static void dir_file_stats_objects(const char *full_path, size_t full_path_len,
+ const char *file_name, void *data)
+{
+ struct strbuf *buf = data;
+ struct stat st;
+
+ if (!stat(full_path, &st))
+ strbuf_addf(buf, "%-70s %16" PRIuMAX "\n", file_name,
+ (uintmax_t)st.st_size);
+}
+
+static int dir_file_stats(struct object_directory *object_dir, void *data)
+{
+ struct strbuf *buf = data;
+
+ strbuf_addf(buf, "Contents of %s:\n", object_dir->path);
+
+ for_each_file_in_pack_dir(object_dir->path, dir_file_stats_objects,
+ data);
+
+ return 0;
+}
+
+static int count_files(char *path)
+{
+ DIR *dir = opendir(path);
+ struct dirent *e;
+ int count = 0;
+
+ if (!dir)
+ return 0;
+
+ while ((e = readdir(dir)) != NULL)
+ if (!is_dot_or_dotdot(e->d_name) && e->d_type == DT_REG)
+ count++;
+
+ closedir(dir);
+ return count;
+}
+
+static void loose_objs_stats(struct strbuf *buf, const char *path)
+{
+ DIR *dir = opendir(path);
+ struct dirent *e;
+ int count;
+ int total = 0;
+ unsigned char c;
+ struct strbuf count_path = STRBUF_INIT;
+ size_t base_path_len;
+
+ if (!dir)
+ return;
+
+ strbuf_addstr(buf, "Object directory stats for ");
+ strbuf_add_absolute_path(buf, path);
+ strbuf_addstr(buf, ":\n");
+
+ strbuf_add_absolute_path(&count_path, path);
+ strbuf_addch(&count_path, '/');
+ base_path_len = count_path.len;
+
+ while ((e = readdir(dir)) != NULL)
+ if (!is_dot_or_dotdot(e->d_name) &&
+ e->d_type == DT_DIR && strlen(e->d_name) == 2 &&
+ !hex_to_bytes(&c, e->d_name, 1)) {
+ strbuf_setlen(&count_path, base_path_len);
+ strbuf_addstr(&count_path, e->d_name);
+ total += (count = count_files(count_path.buf));
+ strbuf_addf(buf, "%s : %7d files\n", e->d_name, count);
+ }
+
+ strbuf_addf(buf, "Total: %d loose objects", total);
+
+ strbuf_release(&count_path);
+ closedir(dir);
+}
+
+static int cmd_diagnose(int argc, const char **argv)
+{
+ struct option options[] = {
+ OPT_END(),
+ };
+ const char * const usage[] = {
+ N_("scalar diagnose [<enlistment>]"),
+ NULL
+ };
+ struct strbuf zip_path = STRBUF_INIT;
+ struct strvec archiver_args = STRVEC_INIT;
+ char **argv_copy = NULL;
+ int stdout_fd = -1, archiver_fd = -1;
+ time_t now = time(NULL);
+ struct tm tm;
+ struct strbuf path = STRBUF_INIT, buf = STRBUF_INIT;
+ int res = 0;
+
+ argc = parse_options(argc, argv, NULL, options,
+ usage, 0);
+
+ setup_enlistment_directory(argc, argv, usage, options, &zip_path);
+
+ strbuf_addstr(&zip_path, "/.scalarDiagnostics/scalar_");
+ strbuf_addftime(&zip_path,
+ "%Y%m%d_%H%M%S", localtime_r(&now, &tm), 0, 0);
+ strbuf_addstr(&zip_path, ".zip");
+ switch (safe_create_leading_directories(zip_path.buf)) {
+ case SCLD_EXISTS:
+ case SCLD_OK:
+ break;
+ default:
+ error_errno(_("could not create directory for '%s'"),
+ zip_path.buf);
+ goto diagnose_cleanup;
+ }
+ stdout_fd = dup(1);
+ if (stdout_fd < 0) {
+ res = error_errno(_("could not duplicate stdout"));
+ goto diagnose_cleanup;
+ }
+
+ archiver_fd = xopen(zip_path.buf, O_CREAT | O_WRONLY | O_TRUNC, 0666);
+ if (archiver_fd < 0 || dup2(archiver_fd, 1) < 0) {
+ res = error_errno(_("could not redirect output"));
+ goto diagnose_cleanup;
+ }
+
+ init_zip_archiver();
+ strvec_pushl(&archiver_args, "scalar-diagnose", "--format=zip", NULL);
+
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, "Collecting diagnostic info\n\n");
+ get_version_info(&buf, 1);
+
+ strbuf_addf(&buf, "Enlistment root: %s\n", the_repository->worktree);
+ get_disk_info(&buf);
+ write_or_die(stdout_fd, buf.buf, buf.len);
+ strvec_pushf(&archiver_args,
+ "--add-virtual-file=diagnostics.log:%.*s",
+ (int)buf.len, buf.buf);
+
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, "--add-virtual-file=packs-local.txt:");
+ dir_file_stats(the_repository->objects->odb, &buf);
+ foreach_alt_odb(dir_file_stats, &buf);
+ strvec_push(&archiver_args, buf.buf);
+
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, "--add-virtual-file=objects-local.txt:");
+ loose_objs_stats(&buf, ".git/objects");
+ strvec_push(&archiver_args, buf.buf);
+
+ if ((res = add_directory_to_archiver(&archiver_args, ".git", 0)) ||
+ (res = add_directory_to_archiver(&archiver_args, ".git/hooks", 0)) ||
+ (res = add_directory_to_archiver(&archiver_args, ".git/info", 0)) ||
+ (res = add_directory_to_archiver(&archiver_args, ".git/logs", 1)) ||
+ (res = add_directory_to_archiver(&archiver_args, ".git/objects/info", 0)))
+ goto diagnose_cleanup;
+
+ strvec_pushl(&archiver_args, "--prefix=",
+ oid_to_hex(the_hash_algo->empty_tree), "--", NULL);
+
+ /* `write_archive()` modifies the `argv` passed to it. Let it. */
+ argv_copy = xmemdupz(archiver_args.v,
+ sizeof(char *) * archiver_args.nr);
+ res = write_archive(archiver_args.nr, (const char **)argv_copy, NULL,
+ the_repository, NULL, 0);
+ if (res) {
+ error(_("failed to write archive"));
+ goto diagnose_cleanup;
+ }
+
+ if (!res)
+ fprintf(stderr, "\n"
+ "Diagnostics complete.\n"
+ "All of the gathered info is captured in '%s'\n",
+ zip_path.buf);
+
+diagnose_cleanup:
+ if (archiver_fd >= 0) {
+ close(1);
+ dup2(stdout_fd, 1);
+ }
+ free(argv_copy);
+ strvec_clear(&archiver_args);
+ strbuf_release(&zip_path);
+ strbuf_release(&path);
+ strbuf_release(&buf);
+
+ return res;
+}
+
static int cmd_list(int argc, const char **argv)
{
if (argc != 1)
@@ -799,6 +1086,7 @@ static struct {
{ "reconfigure", cmd_reconfigure },
{ "delete", cmd_delete },
{ "version", cmd_version },
+ { "diagnose", cmd_diagnose },
{ NULL, NULL},
};
diff --git a/contrib/scalar/scalar.txt b/contrib/scalar/scalar.txt
index cf4e5b8..c0425e0 100644
--- a/contrib/scalar/scalar.txt
+++ b/contrib/scalar/scalar.txt
@@ -14,6 +14,7 @@ scalar register [<enlistment>]
scalar unregister [<enlistment>]
scalar run ( all | config | commit-graph | fetch | loose-objects | pack-files ) [<enlistment>]
scalar reconfigure [ --all | <enlistment> ]
+scalar diagnose [<enlistment>]
scalar delete <enlistment>
DESCRIPTION
@@ -139,6 +140,17 @@ reconfigure the enlistment.
With the `--all` option, all enlistments currently registered with Scalar
will be reconfigured. Use this option after each Scalar upgrade.
+Diagnose
+~~~~~~~~
+
+diagnose [<enlistment>]::
+ When reporting issues with Scalar, it is often helpful to provide the
+ information gathered by this command, including logs and certain
+ statistics describing the data shape of the current enlistment.
++
+The output of this command is a `.zip` file that is written into
+a directory adjacent to the worktree in the `src` directory.
+
Delete
~~~~~~
diff --git a/contrib/scalar/t/t9099-scalar.sh b/contrib/scalar/t/t9099-scalar.sh
index 8978156..10b1172 100755
--- a/contrib/scalar/t/t9099-scalar.sh
+++ b/contrib/scalar/t/t9099-scalar.sh
@@ -93,4 +93,31 @@ test_expect_success 'scalar supports -c/-C' '
test true = "$(git -C sub config core.preloadIndex)"
'
+test_expect_success '`scalar [...] <dir>` errors out when dir is missing' '
+ ! scalar run config cloned 2>err &&
+ grep "cloned. does not exist" err
+'
+
+SQ="'"
+test_expect_success UNZIP 'scalar diagnose' '
+ scalar clone "file://$(pwd)" cloned --single-branch &&
+ git repack &&
+ echo "$(pwd)/.git/objects/" >>cloned/src/.git/objects/info/alternates &&
+ test_commit -C cloned/src loose &&
+ scalar diagnose cloned >out 2>err &&
+ grep "Available space" out &&
+ sed -n "s/.*$SQ\\(.*\\.zip\\)$SQ.*/\\1/p" <err >zip_path &&
+ zip_path=$(cat zip_path) &&
+ test -n "$zip_path" &&
+ unzip -v "$zip_path" &&
+ folder=${zip_path%.zip} &&
+ test_path_is_missing "$folder" &&
+ unzip -p "$zip_path" diagnostics.log >out &&
+ test_file_not_empty out &&
+ unzip -p "$zip_path" packs-local.txt >out &&
+ grep "$(pwd)/.git/objects" out &&
+ unzip -p "$zip_path" objects-local.txt >out &&
+ grep "^Total: [1-9]" out
+'
+
test_done
diff --git a/contrib/vscode/README.md b/contrib/vscode/README.md
index 8202d62..f383c95 100644
--- a/contrib/vscode/README.md
+++ b/contrib/vscode/README.md
@@ -6,7 +6,11 @@ code editor which runs on your desktop and is available for
[Windows](https://code.visualstudio.com/docs/setup/windows),
[macOS](https://code.visualstudio.com/docs/setup/mac) and
[Linux](https://code.visualstudio.com/docs/setup/linux). Among other languages,
-it has [support for C/C++ via an extension](https://github.com/Microsoft/vscode-cpptools).
+it has [support for C/C++ via an extension](https://github.com/Microsoft/vscode-cpptools) with
+[debugging support](https://code.visualstudio.com/docs/editor/debugging)
+
+To get help about "how to personalize your settings" read:
+[How to set up your settings](https://code.visualstudio.com/docs/getstarted/settings)
To start developing Git with VS Code, simply run the Unix shell script called
`init.sh` in this directory, which creates the configuration files in
diff --git a/contrib/vscode/init.sh b/contrib/vscode/init.sh
index 27de949..f139fd8 100755
--- a/contrib/vscode/init.sh
+++ b/contrib/vscode/init.sh
@@ -271,7 +271,6 @@ cat >.vscode/launch.json.new <<EOF ||
"stopAtEntry": false,
"cwd": "\${workspaceFolder}",
"environment": [],
- "externalConsole": true,
"MIMode": "gdb",
"miDebuggerPath": "$GDBPATH",
"setupCommands": [
diff --git a/convert.c b/convert.c
index 8e39731..4d15372 100644
--- a/convert.c
+++ b/convert.c
@@ -195,9 +195,9 @@ static void check_global_conv_flags_eol(const char *path,
if (conv_flags & CONV_EOL_RNDTRP_DIE)
die(_("CRLF would be replaced by LF in %s"), path);
else if (conv_flags & CONV_EOL_RNDTRP_WARN)
- warning(_("CRLF will be replaced by LF in %s.\n"
- "The file will have its original line"
- " endings in your working directory"), path);
+ warning(_("in the working copy of '%s', CRLF will be"
+ " replaced by LF the next time Git touches"
+ " it"), path);
} else if (old_stats->lonelf && !new_stats->lonelf ) {
/*
* CRLFs would be added by checkout
@@ -205,9 +205,9 @@ static void check_global_conv_flags_eol(const char *path,
if (conv_flags & CONV_EOL_RNDTRP_DIE)
die(_("LF would be replaced by CRLF in %s"), path);
else if (conv_flags & CONV_EOL_RNDTRP_WARN)
- warning(_("LF will be replaced by CRLF in %s.\n"
- "The file will have its original line"
- " endings in your working directory"), path);
+ warning(_("in the working copy of '%s', LF will be"
+ " replaced by CRLF the next time Git touches"
+ " it"), path);
}
}
diff --git a/daemon.c b/daemon.c
index 94a5b8a..58f1077 100644
--- a/daemon.c
+++ b/daemon.c
@@ -447,7 +447,7 @@ static void copy_to_log(int fd)
FILE *fp;
fp = fdopen(fd, "r");
- if (fp == NULL) {
+ if (!fp) {
logerror("fdopen of error channel failed");
close(fd);
return;
@@ -484,7 +484,7 @@ static int upload_pack(const struct strvec *env)
strvec_pushl(&cld.args, "upload-pack", "--strict", NULL);
strvec_pushf(&cld.args, "--timeout=%u", timeout);
- strvec_pushv(&cld.env_array, env->v);
+ strvec_pushv(&cld.env, env->v);
return run_service_command(&cld);
}
@@ -494,7 +494,7 @@ static int upload_archive(const struct strvec *env)
struct child_process cld = CHILD_PROCESS_INIT;
strvec_push(&cld.args, "upload-archive");
- strvec_pushv(&cld.env_array, env->v);
+ strvec_pushv(&cld.env, env->v);
return run_service_command(&cld);
}
@@ -504,7 +504,7 @@ static int receive_pack(const struct strvec *env)
struct child_process cld = CHILD_PROCESS_INIT;
strvec_push(&cld.args, "receive-pack");
- strvec_pushv(&cld.env_array, env->v);
+ strvec_pushv(&cld.env, env->v);
return run_service_command(&cld);
}
@@ -904,16 +904,16 @@ static void handle(int incoming, struct sockaddr *addr, socklen_t addrlen)
char buf[128] = "";
struct sockaddr_in *sin_addr = (void *) addr;
inet_ntop(addr->sa_family, &sin_addr->sin_addr, buf, sizeof(buf));
- strvec_pushf(&cld.env_array, "REMOTE_ADDR=%s", buf);
- strvec_pushf(&cld.env_array, "REMOTE_PORT=%d",
+ strvec_pushf(&cld.env, "REMOTE_ADDR=%s", buf);
+ strvec_pushf(&cld.env, "REMOTE_PORT=%d",
ntohs(sin_addr->sin_port));
#ifndef NO_IPV6
} else if (addr->sa_family == AF_INET6) {
char buf[128] = "";
struct sockaddr_in6 *sin6_addr = (void *) addr;
inet_ntop(AF_INET6, &sin6_addr->sin6_addr, buf, sizeof(buf));
- strvec_pushf(&cld.env_array, "REMOTE_ADDR=[%s]", buf);
- strvec_pushf(&cld.env_array, "REMOTE_PORT=%d",
+ strvec_pushf(&cld.env, "REMOTE_ADDR=[%s]", buf);
+ strvec_pushf(&cld.env, "REMOTE_PORT=%d",
ntohs(sin6_addr->sin6_port));
#endif
}
diff --git a/detect-compiler b/detect-compiler
index 11d60da..50087f5 100755
--- a/detect-compiler
+++ b/detect-compiler
@@ -9,7 +9,7 @@ CC="$*"
#
# FreeBSD clang version 3.4.1 (tags/RELEASE...)
get_version_line() {
- $CC -v 2>&1 | grep ' version '
+ LANG=C LC_ALL=C $CC -v 2>&1 | grep ' version '
}
get_family() {
diff --git a/diff-lib.c b/diff-lib.c
index ca085a0..7eb66a4 100644
--- a/diff-lib.c
+++ b/diff-lib.c
@@ -641,7 +641,7 @@ int do_diff_cache(const struct object_id *tree_oid, struct diff_options *opt)
if (diff_cache(&revs, tree_oid, NULL, 1))
exit(128);
- clear_pathspec(&revs.prune_data);
+ release_revisions(&revs);
return 0;
}
@@ -651,6 +651,7 @@ int index_differs_from(struct repository *r,
{
struct rev_info rev;
struct setup_revision_opt opt;
+ unsigned has_changes;
repo_init_revisions(r, &rev, NULL);
memset(&opt, 0, sizeof(opt));
@@ -662,8 +663,9 @@ int index_differs_from(struct repository *r,
diff_flags_or(&rev.diffopt.flags, flags);
rev.diffopt.ita_invisible_in_index = ita_invisible_in_index;
run_diff_index(&rev, 1);
- object_array_clear(&rev.pending);
- return (rev.diffopt.flags.has_changes != 0);
+ has_changes = rev.diffopt.flags.has_changes;
+ release_revisions(&rev);
+ return (has_changes != 0);
}
static struct strbuf *idiff_prefix_cb(struct diff_options *opt, void *data)
diff --git a/diff.c b/diff.c
index ef71599..e71cf75 100644
--- a/diff.c
+++ b/diff.c
@@ -4136,18 +4136,13 @@ static void prep_temp_blob(struct index_state *istate,
int mode)
{
struct strbuf buf = STRBUF_INIT;
- struct strbuf tempfile = STRBUF_INIT;
char *path_dup = xstrdup(path);
const char *base = basename(path_dup);
struct checkout_metadata meta;
init_checkout_metadata(&meta, NULL, NULL, oid);
- /* Generate "XXXXXX_basename.ext" */
- strbuf_addstr(&tempfile, "XXXXXX_");
- strbuf_addstr(&tempfile, base);
-
- temp->tempfile = mks_tempfile_ts(tempfile.buf, strlen(base) + 1);
+ temp->tempfile = mks_tempfile_dt("git-blob-XXXXXX", base);
if (!temp->tempfile)
die_errno("unable to create temp-file");
if (convert_to_working_tree(istate, path,
@@ -4162,7 +4157,6 @@ static void prep_temp_blob(struct index_state *istate,
oid_to_hex_r(temp->hex, oid);
xsnprintf(temp->mode, sizeof(temp->mode), "%06o", mode);
strbuf_release(&buf);
- strbuf_release(&tempfile);
free(path_dup);
}
diff --git a/dir.c b/dir.c
index f2b0f24..6ca2ef5 100644
--- a/dir.c
+++ b/dir.c
@@ -2747,13 +2747,33 @@ static void set_untracked_ident(struct untracked_cache *uc)
strbuf_addch(&uc->ident, 0);
}
-static void new_untracked_cache(struct index_state *istate)
+static unsigned new_untracked_cache_flags(struct index_state *istate)
+{
+ struct repository *repo = istate->repo;
+ char *val;
+
+ /*
+ * This logic is coordinated with the setting of these flags in
+ * wt-status.c#wt_status_collect_untracked(), and the evaluation
+ * of the config setting in commit.c#git_status_config()
+ */
+ if (!repo_config_get_string(repo, "status.showuntrackedfiles", &val) &&
+ !strcmp(val, "all"))
+ return 0;
+
+ /*
+ * The default, if "all" is not set, is "normal" - leading us here.
+ * If the value is "none" then it really doesn't matter.
+ */
+ return DIR_SHOW_OTHER_DIRECTORIES | DIR_HIDE_EMPTY_DIRECTORIES;
+}
+
+static void new_untracked_cache(struct index_state *istate, int flags)
{
struct untracked_cache *uc = xcalloc(1, sizeof(*uc));
strbuf_init(&uc->ident, 100);
uc->exclude_per_dir = ".gitignore";
- /* should be the same flags used by git-status */
- uc->dir_flags = DIR_SHOW_OTHER_DIRECTORIES | DIR_HIDE_EMPTY_DIRECTORIES;
+ uc->dir_flags = flags >= 0 ? flags : new_untracked_cache_flags(istate);
set_untracked_ident(uc);
istate->untracked = uc;
istate->cache_changed |= UNTRACKED_CHANGED;
@@ -2762,11 +2782,11 @@ static void new_untracked_cache(struct index_state *istate)
void add_untracked_cache(struct index_state *istate)
{
if (!istate->untracked) {
- new_untracked_cache(istate);
+ new_untracked_cache(istate, -1);
} else {
if (!ident_in_untracked(istate->untracked)) {
free_untracked_cache(istate->untracked);
- new_untracked_cache(istate);
+ new_untracked_cache(istate, -1);
}
}
}
@@ -2814,17 +2834,9 @@ static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *d
if (base_len || (pathspec && pathspec->nr))
return NULL;
- /* Different set of flags may produce different results */
- if (dir->flags != dir->untracked->dir_flags ||
- /*
- * See treat_directory(), case index_nonexistent. Without
- * this flag, we may need to also cache .git file content
- * for the resolve_gitlink_ref() call, which we don't.
- */
- !(dir->flags & DIR_SHOW_OTHER_DIRECTORIES) ||
- /* We don't support collecting ignore files */
- (dir->flags & (DIR_SHOW_IGNORED | DIR_SHOW_IGNORED_TOO |
- DIR_COLLECT_IGNORED)))
+ /* We don't support collecting ignore files */
+ if (dir->flags & (DIR_SHOW_IGNORED | DIR_SHOW_IGNORED_TOO |
+ DIR_COLLECT_IGNORED))
return NULL;
/*
@@ -2847,6 +2859,50 @@ static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *d
return NULL;
}
+ /*
+ * If the untracked structure we received does not have the same flags
+ * as requested in this run, we're going to need to either discard the
+ * existing structure (and potentially later recreate), or bypass the
+ * untracked cache mechanism for this run.
+ */
+ if (dir->flags != dir->untracked->dir_flags) {
+ /*
+ * If the untracked structure we received does not have the same flags
+ * as configured, then we need to reset / create a new "untracked"
+ * structure to match the new config.
+ *
+ * Keeping the saved and used untracked cache consistent with the
+ * configuration provides an opportunity for frequent users of
+ * "git status -uall" to leverage the untracked cache by aligning their
+ * configuration - setting "status.showuntrackedfiles" to "all" or
+ * "normal" as appropriate.
+ *
+ * Previously using -uall (or setting "status.showuntrackedfiles" to
+ * "all") was incompatible with untracked cache and *consistently*
+ * caused surprisingly bad performance (with fscache and fsmonitor
+ * enabled) on Windows.
+ *
+ * IMPROVEMENT OPPORTUNITY: If we reworked the untracked cache storage
+ * to not be as bound up with the desired output in a given run,
+ * and instead iterated through and stored enough information to
+ * correctly serve both "modes", then users could get peak performance
+ * with or without '-uall' regardless of their
+ * "status.showuntrackedfiles" config.
+ */
+ if (dir->untracked->dir_flags != new_untracked_cache_flags(istate)) {
+ free_untracked_cache(istate->untracked);
+ new_untracked_cache(istate, dir->flags);
+ dir->untracked = istate->untracked;
+ }
+ else {
+ /*
+ * Current untracked cache data is consistent with config, but not
+ * usable in this request/run; just bypass untracked cache.
+ */
+ return NULL;
+ }
+ }
+
if (!dir->untracked->root) {
/* Untracked cache existed but is not initialized; fix that */
FLEX_ALLOC_STR(dir->untracked->root, name, "");
@@ -3054,7 +3110,7 @@ char *git_url_basename(const char *repo, int is_bundle, int is_bare)
* Skip scheme.
*/
start = strstr(repo, "://");
- if (start == NULL)
+ if (!start)
start = repo;
else
start += 3;
@@ -3082,6 +3138,15 @@ char *git_url_basename(const char *repo, int is_bundle, int is_bare)
}
/*
+ * It should not be possible to overflow `ptrdiff_t` by passing in an
+ * insanely long URL, but GCC does not know that and will complain
+ * without this check.
+ */
+ if (end - start < 0)
+ die(_("No directory name could be guessed.\n"
+ "Please specify a directory on the command line"));
+
+ /*
* Strip trailing port number if we've got only a
* hostname (that is, there is no dir separator but a
* colon). This check is required such that we do not
@@ -3890,3 +3955,32 @@ void relocate_gitdir(const char *path, const char *old_git_dir, const char *new_
connect_work_tree_and_git_dir(path, new_git_dir, 0);
}
+
+int path_match_flags(const char *const str, const enum path_match_flags flags)
+{
+ const char *p = str;
+
+ if (flags & PATH_MATCH_NATIVE &&
+ flags & PATH_MATCH_XPLATFORM)
+ BUG("path_match_flags() must get one match kind, not multiple!");
+ else if (!(flags & PATH_MATCH_KINDS_MASK))
+ BUG("path_match_flags() must get at least one match kind!");
+
+ if (flags & PATH_MATCH_STARTS_WITH_DOT_SLASH &&
+ flags & PATH_MATCH_STARTS_WITH_DOT_DOT_SLASH)
+ BUG("path_match_flags() must get one platform kind, not multiple!");
+ else if (!(flags & PATH_MATCH_PLATFORM_MASK))
+ BUG("path_match_flags() must get at least one platform kind!");
+
+ if (*p++ != '.')
+ return 0;
+ if (flags & PATH_MATCH_STARTS_WITH_DOT_DOT_SLASH &&
+ *p++ != '.')
+ return 0;
+
+ if (flags & PATH_MATCH_NATIVE)
+ return is_dir_sep(*p);
+ else if (flags & PATH_MATCH_XPLATFORM)
+ return is_xplatform_dir_sep(*p);
+ BUG("unreachable");
+}
diff --git a/dir.h b/dir.h
index 8e02dfb..7bc8620 100644
--- a/dir.h
+++ b/dir.h
@@ -578,4 +578,67 @@ void connect_work_tree_and_git_dir(const char *work_tree,
void relocate_gitdir(const char *path,
const char *old_git_dir,
const char *new_git_dir);
+
+/**
+ * The "enum path_matches_kind" determines how path_match_flags() will
+ * behave. The flags come in sets, and one (and only one) must be
+ * provided out of each "set":
+ *
+ * PATH_MATCH_NATIVE:
+ * Path separator is is_dir_sep()
+ * PATH_MATCH_XPLATFORM:
+ * Path separator is is_xplatform_dir_sep()
+ *
+ * Do we use is_dir_sep() to check for a directory separator
+ * (*_NATIVE), or do we always check for '/' or '\' (*_XPLATFORM). The
+ * "*_NATIVE" version on Windows is the same as "*_XPLATFORM",
+ * everywhere else "*_NATIVE" means "only /".
+ *
+ * PATH_MATCH_STARTS_WITH_DOT_SLASH:
+ * Match a path starting with "./"
+ * PATH_MATCH_STARTS_WITH_DOT_DOT_SLASH:
+ * Match a path starting with "../"
+ *
+ * The "/" in the above is adjusted based on the "*_NATIVE" and
+ * "*_XPLATFORM" flags.
+ */
+enum path_match_flags {
+ PATH_MATCH_NATIVE = 1 << 0,
+ PATH_MATCH_XPLATFORM = 1 << 1,
+ PATH_MATCH_STARTS_WITH_DOT_SLASH = 1 << 2,
+ PATH_MATCH_STARTS_WITH_DOT_DOT_SLASH = 1 << 3,
+};
+#define PATH_MATCH_KINDS_MASK (PATH_MATCH_STARTS_WITH_DOT_SLASH | \
+ PATH_MATCH_STARTS_WITH_DOT_DOT_SLASH)
+#define PATH_MATCH_PLATFORM_MASK (PATH_MATCH_NATIVE | PATH_MATCH_XPLATFORM)
+
+/**
+ * path_match_flags() checks if a given "path" matches a given "enum
+ * path_match_flags" criteria.
+ */
+int path_match_flags(const char *const path, const enum path_match_flags f);
+
+/**
+ * starts_with_dot_slash_native(): convenience wrapper for
+ * path_match_flags() with PATH_MATCH_STARTS_WITH_DOT_SLASH and
+ * PATH_MATCH_NATIVE.
+ */
+static inline int starts_with_dot_slash_native(const char *const path)
+{
+ const enum path_match_flags what = PATH_MATCH_STARTS_WITH_DOT_SLASH;
+
+ return path_match_flags(path, what | PATH_MATCH_NATIVE);
+}
+
+/**
+ * starts_with_dot_slash_native(): convenience wrapper for
+ * path_match_flags() with PATH_MATCH_STARTS_WITH_DOT_DOT_SLASH and
+ * PATH_MATCH_NATIVE.
+ */
+static inline int starts_with_dot_dot_slash_native(const char *const path)
+{
+ const enum path_match_flags what = PATH_MATCH_STARTS_WITH_DOT_DOT_SLASH;
+
+ return path_match_flags(path, what | PATH_MATCH_NATIVE);
+}
#endif
diff --git a/editor.c b/editor.c
index 8b96482..008c04f 100644
--- a/editor.c
+++ b/editor.c
@@ -80,7 +80,7 @@ static int launch_specified_editor(const char *editor, const char *path,
strvec_pushl(&p.args, editor, realpath.buf, NULL);
if (env)
- strvec_pushv(&p.env_array, (const char **)env);
+ strvec_pushv(&p.env, (const char **)env);
p.use_shell = 1;
p.trace2_child_class = "editor";
if (start_command(&p) < 0) {
diff --git a/environment.c b/environment.c
index 5bff1b3..b3296ce 100644
--- a/environment.c
+++ b/environment.c
@@ -273,7 +273,7 @@ const char *get_git_work_tree(void)
return the_repository->worktree;
}
-char *get_object_directory(void)
+const char *get_object_directory(void)
{
if (!the_repository->objects->odb)
BUG("git environment hasn't been setup");
diff --git a/ewah/bitmap.c b/ewah/bitmap.c
index 38a47c4..ac61864 100644
--- a/ewah/bitmap.c
+++ b/ewah/bitmap.c
@@ -216,14 +216,9 @@ int bitmap_is_subset(struct bitmap *self, struct bitmap *other)
return 0;
}
-void bitmap_reset(struct bitmap *bitmap)
-{
- memset(bitmap->words, 0x0, bitmap->word_alloc * sizeof(eword_t));
-}
-
void bitmap_free(struct bitmap *bitmap)
{
- if (bitmap == NULL)
+ if (!bitmap)
return;
free(bitmap->words);
diff --git a/ewah/ewah_bitmap.c b/ewah/ewah_bitmap.c
index 2a8c7c5..6fe48d3 100644
--- a/ewah/ewah_bitmap.c
+++ b/ewah/ewah_bitmap.c
@@ -451,7 +451,7 @@ struct ewah_bitmap *ewah_pool_new(void)
void ewah_pool_free(struct ewah_bitmap *self)
{
- if (self == NULL)
+ if (!self)
return;
if (bitmap_pool_size == BITMAP_POOL_MAX ||
diff --git a/ewah/ewok.h b/ewah/ewok.h
index 6692096..7eb8b9b 100644
--- a/ewah/ewok.h
+++ b/ewah/ewok.h
@@ -177,7 +177,6 @@ struct bitmap *bitmap_dup(const struct bitmap *src);
void bitmap_set(struct bitmap *self, size_t pos);
void bitmap_unset(struct bitmap *self, size_t pos);
int bitmap_get(struct bitmap *self, size_t pos);
-void bitmap_reset(struct bitmap *self);
void bitmap_free(struct bitmap *self);
int bitmap_equals(struct bitmap *self, struct bitmap *other);
int bitmap_is_subset(struct bitmap *self, struct bitmap *other);
diff --git a/fetch-pack.c b/fetch-pack.c
index 4e1e88e..cb6647d 100644
--- a/fetch-pack.c
+++ b/fetch-pack.c
@@ -115,11 +115,12 @@ static void for_each_cached_alternate(struct fetch_negotiator *negotiator,
cb(negotiator, cache.items[i]);
}
-static struct commit *deref_without_lazy_fetch(const struct object_id *oid,
- int mark_tags_complete)
+static struct commit *deref_without_lazy_fetch_extended(const struct object_id *oid,
+ int mark_tags_complete,
+ enum object_type *type,
+ unsigned int oi_flags)
{
- enum object_type type;
- struct object_info info = { .typep = &type };
+ struct object_info info = { .typep = type };
struct commit *commit;
commit = lookup_commit_in_graph(the_repository, oid);
@@ -128,9 +129,9 @@ static struct commit *deref_without_lazy_fetch(const struct object_id *oid,
while (1) {
if (oid_object_info_extended(the_repository, oid, &info,
- OBJECT_INFO_SKIP_FETCH_OBJECT | OBJECT_INFO_QUICK))
+ oi_flags))
return NULL;
- if (type == OBJ_TAG) {
+ if (*type == OBJ_TAG) {
struct tag *tag = (struct tag *)
parse_object(the_repository, oid);
@@ -144,7 +145,7 @@ static struct commit *deref_without_lazy_fetch(const struct object_id *oid,
}
}
- if (type == OBJ_COMMIT) {
+ if (*type == OBJ_COMMIT) {
struct commit *commit = lookup_commit(the_repository, oid);
if (!commit || repo_parse_commit(the_repository, commit))
return NULL;
@@ -154,6 +155,16 @@ static struct commit *deref_without_lazy_fetch(const struct object_id *oid,
return NULL;
}
+
+static struct commit *deref_without_lazy_fetch(const struct object_id *oid,
+ int mark_tags_complete)
+{
+ enum object_type type;
+ unsigned flags = OBJECT_INFO_SKIP_FETCH_OBJECT | OBJECT_INFO_QUICK;
+ return deref_without_lazy_fetch_extended(oid, mark_tags_complete,
+ &type, flags);
+}
+
static int rev_list_insert_ref(struct fetch_negotiator *negotiator,
const struct object_id *oid)
{
@@ -836,6 +847,16 @@ static void parse_gitmodules_oids(int fd, struct oidset *gitmodules_oids)
} while (1);
}
+static void add_index_pack_keep_option(struct strvec *args)
+{
+ char hostname[HOST_NAME_MAX + 1];
+
+ if (xgethostname(hostname, sizeof(hostname)))
+ xsnprintf(hostname, sizeof(hostname), "localhost");
+ strvec_pushf(args, "--keep=fetch-pack %"PRIuMAX " on %s",
+ (uintmax_t)getpid(), hostname);
+}
+
/*
* If packfile URIs were provided, pass a non-NULL pointer to index_pack_args.
* The strings to pass as the --index-pack-arg arguments to http-fetch will be
@@ -905,14 +926,8 @@ static int get_pack(struct fetch_pack_args *args,
strvec_push(&cmd.args, "-v");
if (args->use_thin_pack)
strvec_push(&cmd.args, "--fix-thin");
- if ((do_keep || index_pack_args) && (args->lock_pack || unpack_limit)) {
- char hostname[HOST_NAME_MAX + 1];
- if (xgethostname(hostname, sizeof(hostname)))
- xsnprintf(hostname, sizeof(hostname), "localhost");
- strvec_pushf(&cmd.args,
- "--keep=fetch-pack %"PRIuMAX " on %s",
- (uintmax_t)getpid(), hostname);
- }
+ if ((do_keep || index_pack_args) && (args->lock_pack || unpack_limit))
+ add_index_pack_keep_option(&cmd.args);
if (!index_pack_args && args->check_self_contained_and_connected)
strvec_push(&cmd.args, "--check-self-contained-and-connected");
else
@@ -1370,17 +1385,20 @@ static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out,
static int process_section_header(struct packet_reader *reader,
const char *section, int peek)
{
- int ret;
-
- if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
- die(_("error reading section header '%s'"), section);
+ int ret = 0;
- ret = !strcmp(reader->line, section);
+ if (packet_reader_peek(reader) == PACKET_READ_NORMAL &&
+ !strcmp(reader->line, section))
+ ret = 1;
if (!peek) {
- if (!ret)
- die(_("expected '%s', received '%s'"),
- section, reader->line);
+ if (!ret) {
+ if (reader->line)
+ die(_("expected '%s', received '%s'"),
+ section, reader->line);
+ else
+ die(_("expected '%s'"), section);
+ }
packet_reader_read(reader);
}
diff --git a/fmt-merge-msg.c b/fmt-merge-msg.c
index baca57d..f48f44f 100644
--- a/fmt-merge-msg.c
+++ b/fmt-merge-msg.c
@@ -699,6 +699,7 @@ int fmt_merge_msg(struct strbuf *in, struct strbuf *out,
shortlog(origins.items[i].string,
origins.items[i].util,
head, &rev, opts, out);
+ release_revisions(&rev);
}
strbuf_complete_line(out);
diff --git a/fsck.c b/fsck.c
index 3ec500d..dd4822b 100644
--- a/fsck.c
+++ b/fsck.c
@@ -975,27 +975,16 @@ done:
return ret;
}
-/*
- * Like builtin/submodule--helper.c's starts_with_dot_slash, but without
- * relying on the platform-dependent is_dir_sep helper.
- *
- * This is for use in checking whether a submodule URL is interpreted as
- * relative to the current directory on any platform, since \ is a
- * directory separator on Windows but not on other platforms.
- */
-static int starts_with_dot_slash(const char *str)
+static int starts_with_dot_slash(const char *const path)
{
- return str[0] == '.' && (str[1] == '/' || str[1] == '\\');
+ return path_match_flags(path, PATH_MATCH_STARTS_WITH_DOT_SLASH |
+ PATH_MATCH_XPLATFORM);
}
-/*
- * Like starts_with_dot_slash, this is a variant of submodule--helper's
- * helper of the same name with the twist that it accepts backslash as a
- * directory separator even on non-Windows platforms.
- */
-static int starts_with_dot_dot_slash(const char *str)
+static int starts_with_dot_dot_slash(const char *const path)
{
- return str[0] == '.' && starts_with_dot_slash(str + 1);
+ return path_match_flags(path, PATH_MATCH_STARTS_WITH_DOT_DOT_SLASH |
+ PATH_MATCH_XPLATFORM);
}
static int submodule_url_is_relative(const char *url)
diff --git a/fsmonitor--daemon.h b/fsmonitor--daemon.h
index bd09fff..2102a5c 100644
--- a/fsmonitor--daemon.h
+++ b/fsmonitor--daemon.h
@@ -33,10 +33,12 @@ void fsmonitor_batch__free_list(struct fsmonitor_batch *batch);
*/
void fsmonitor_batch__add_path(struct fsmonitor_batch *batch, const char *path);
-struct fsmonitor_daemon_backend_data; /* opaque platform-specific data */
+struct fsm_listen_data; /* opaque platform-specific data for listener thread */
+struct fsm_health_data; /* opaque platform-specific data for health thread */
struct fsmonitor_daemon_state {
pthread_t listener_thread;
+ pthread_t health_thread;
pthread_mutex_t main_lock;
struct strbuf path_worktree_watch;
@@ -50,10 +52,13 @@ struct fsmonitor_daemon_state {
int cookie_seq;
struct hashmap cookies;
- int error_code;
- struct fsmonitor_daemon_backend_data *backend_data;
+ int listen_error_code;
+ int health_error_code;
+ struct fsm_listen_data *listen_data;
+ struct fsm_health_data *health_data;
struct ipc_server_data *ipc_server_data;
+ struct strbuf path_ipc;
};
/*
diff --git a/fsmonitor-settings.c b/fsmonitor-settings.c
index 757d230..464424a 100644
--- a/fsmonitor-settings.c
+++ b/fsmonitor-settings.c
@@ -9,23 +9,52 @@
*/
struct fsmonitor_settings {
enum fsmonitor_mode mode;
+ enum fsmonitor_reason reason;
char *hook_path;
};
-static void lookup_fsmonitor_settings(struct repository *r)
+static enum fsmonitor_reason check_for_incompatible(struct repository *r)
+{
+ if (!r->worktree) {
+ /*
+ * Bare repositories don't have a working directory and
+ * therefore have nothing to watch.
+ */
+ return FSMONITOR_REASON_BARE;
+ }
+
+#ifdef HAVE_FSMONITOR_OS_SETTINGS
+ {
+ enum fsmonitor_reason reason;
+
+ reason = fsm_os__incompatible(r);
+ if (reason != FSMONITOR_REASON_OK)
+ return reason;
+ }
+#endif
+
+ return FSMONITOR_REASON_OK;
+}
+
+static struct fsmonitor_settings *alloc_settings(void)
{
struct fsmonitor_settings *s;
+
+ CALLOC_ARRAY(s, 1);
+ s->mode = FSMONITOR_MODE_DISABLED;
+ s->reason = FSMONITOR_REASON_UNTESTED;
+
+ return s;
+}
+
+static void lookup_fsmonitor_settings(struct repository *r)
+{
const char *const_str;
int bool_value;
if (r->settings.fsmonitor)
return;
- CALLOC_ARRAY(s, 1);
- s->mode = FSMONITOR_MODE_DISABLED;
-
- r->settings.fsmonitor = s;
-
/*
* Overload the existing "core.fsmonitor" config setting (which
* has historically been either unset or a hook pathname) to
@@ -38,6 +67,8 @@ static void lookup_fsmonitor_settings(struct repository *r)
case 0: /* config value was set to <bool> */
if (bool_value)
fsm_settings__set_ipc(r);
+ else
+ fsm_settings__set_disabled(r);
return;
case 1: /* config value was unset */
@@ -53,18 +84,18 @@ static void lookup_fsmonitor_settings(struct repository *r)
return;
}
- if (!const_str || !*const_str)
- return;
-
- fsm_settings__set_hook(r, const_str);
+ if (const_str && *const_str)
+ fsm_settings__set_hook(r, const_str);
+ else
+ fsm_settings__set_disabled(r);
}
enum fsmonitor_mode fsm_settings__get_mode(struct repository *r)
{
if (!r)
r = the_repository;
-
- lookup_fsmonitor_settings(r);
+ if (!r->settings.fsmonitor)
+ lookup_fsmonitor_settings(r);
return r->settings.fsmonitor->mode;
}
@@ -73,31 +104,55 @@ const char *fsm_settings__get_hook_path(struct repository *r)
{
if (!r)
r = the_repository;
-
- lookup_fsmonitor_settings(r);
+ if (!r->settings.fsmonitor)
+ lookup_fsmonitor_settings(r);
return r->settings.fsmonitor->hook_path;
}
void fsm_settings__set_ipc(struct repository *r)
{
+ enum fsmonitor_reason reason = check_for_incompatible(r);
+
+ if (reason != FSMONITOR_REASON_OK) {
+ fsm_settings__set_incompatible(r, reason);
+ return;
+ }
+
+ /*
+ * Caller requested IPC explicitly, so avoid (possibly
+ * recursive) config lookup.
+ */
if (!r)
r = the_repository;
-
- lookup_fsmonitor_settings(r);
+ if (!r->settings.fsmonitor)
+ r->settings.fsmonitor = alloc_settings();
r->settings.fsmonitor->mode = FSMONITOR_MODE_IPC;
+ r->settings.fsmonitor->reason = reason;
FREE_AND_NULL(r->settings.fsmonitor->hook_path);
}
void fsm_settings__set_hook(struct repository *r, const char *path)
{
+ enum fsmonitor_reason reason = check_for_incompatible(r);
+
+ if (reason != FSMONITOR_REASON_OK) {
+ fsm_settings__set_incompatible(r, reason);
+ return;
+ }
+
+ /*
+ * Caller requested hook explicitly, so avoid (possibly
+ * recursive) config lookup.
+ */
if (!r)
r = the_repository;
-
- lookup_fsmonitor_settings(r);
+ if (!r->settings.fsmonitor)
+ r->settings.fsmonitor = alloc_settings();
r->settings.fsmonitor->mode = FSMONITOR_MODE_HOOK;
+ r->settings.fsmonitor->reason = reason;
FREE_AND_NULL(r->settings.fsmonitor->hook_path);
r->settings.fsmonitor->hook_path = strdup(path);
}
@@ -106,9 +161,85 @@ void fsm_settings__set_disabled(struct repository *r)
{
if (!r)
r = the_repository;
-
- lookup_fsmonitor_settings(r);
+ if (!r->settings.fsmonitor)
+ r->settings.fsmonitor = alloc_settings();
r->settings.fsmonitor->mode = FSMONITOR_MODE_DISABLED;
+ r->settings.fsmonitor->reason = FSMONITOR_REASON_OK;
FREE_AND_NULL(r->settings.fsmonitor->hook_path);
}
+
+void fsm_settings__set_incompatible(struct repository *r,
+ enum fsmonitor_reason reason)
+{
+ if (!r)
+ r = the_repository;
+ if (!r->settings.fsmonitor)
+ r->settings.fsmonitor = alloc_settings();
+
+ r->settings.fsmonitor->mode = FSMONITOR_MODE_INCOMPATIBLE;
+ r->settings.fsmonitor->reason = reason;
+ FREE_AND_NULL(r->settings.fsmonitor->hook_path);
+}
+
+enum fsmonitor_reason fsm_settings__get_reason(struct repository *r)
+{
+ if (!r)
+ r = the_repository;
+ if (!r->settings.fsmonitor)
+ lookup_fsmonitor_settings(r);
+
+ return r->settings.fsmonitor->reason;
+}
+
+char *fsm_settings__get_incompatible_msg(const struct repository *r,
+ enum fsmonitor_reason reason)
+{
+ struct strbuf msg = STRBUF_INIT;
+
+ switch (reason) {
+ case FSMONITOR_REASON_UNTESTED:
+ case FSMONITOR_REASON_OK:
+ goto done;
+
+ case FSMONITOR_REASON_BARE: {
+ char *cwd = xgetcwd();
+
+ strbuf_addf(&msg,
+ _("bare repository '%s' is incompatible with fsmonitor"),
+ cwd);
+ free(cwd);
+ goto done;
+ }
+
+ case FSMONITOR_REASON_ERROR:
+ strbuf_addf(&msg,
+ _("repository '%s' is incompatible with fsmonitor due to errors"),
+ r->worktree);
+ goto done;
+
+ case FSMONITOR_REASON_REMOTE:
+ strbuf_addf(&msg,
+ _("remote repository '%s' is incompatible with fsmonitor"),
+ r->worktree);
+ goto done;
+
+ case FSMONITOR_REASON_VFS4GIT:
+ strbuf_addf(&msg,
+ _("virtual repository '%s' is incompatible with fsmonitor"),
+ r->worktree);
+ goto done;
+
+ case FSMONITOR_REASON_NOSOCKETS:
+ strbuf_addf(&msg,
+ _("repository '%s' is incompatible with fsmonitor due to lack of Unix sockets"),
+ r->worktree);
+ goto done;
+ }
+
+ BUG("Unhandled case in fsm_settings__get_incompatible_msg: '%d'",
+ reason);
+
+done:
+ return strbuf_detach(&msg, NULL);
+}
diff --git a/fsmonitor-settings.h b/fsmonitor-settings.h
index a4c5d7b..d9c2605 100644
--- a/fsmonitor-settings.h
+++ b/fsmonitor-settings.h
@@ -4,18 +4,51 @@
struct repository;
enum fsmonitor_mode {
+ FSMONITOR_MODE_INCOMPATIBLE = -1, /* see _reason */
FSMONITOR_MODE_DISABLED = 0,
FSMONITOR_MODE_HOOK = 1, /* core.fsmonitor=<hook_path> */
FSMONITOR_MODE_IPC = 2, /* core.fsmonitor=<true> */
};
+/*
+ * Incompatibility reasons.
+ */
+enum fsmonitor_reason {
+ FSMONITOR_REASON_UNTESTED = 0,
+ FSMONITOR_REASON_OK, /* no incompatibility or when disabled */
+ FSMONITOR_REASON_BARE,
+ FSMONITOR_REASON_ERROR, /* FS error probing for compatibility */
+ FSMONITOR_REASON_REMOTE,
+ FSMONITOR_REASON_VFS4GIT, /* VFS for Git virtualization */
+ FSMONITOR_REASON_NOSOCKETS, /* NTFS,FAT32 do not support Unix sockets */
+};
+
void fsm_settings__set_ipc(struct repository *r);
void fsm_settings__set_hook(struct repository *r, const char *path);
void fsm_settings__set_disabled(struct repository *r);
+void fsm_settings__set_incompatible(struct repository *r,
+ enum fsmonitor_reason reason);
enum fsmonitor_mode fsm_settings__get_mode(struct repository *r);
const char *fsm_settings__get_hook_path(struct repository *r);
+enum fsmonitor_reason fsm_settings__get_reason(struct repository *r);
+char *fsm_settings__get_incompatible_msg(const struct repository *r,
+ enum fsmonitor_reason reason);
+
struct fsmonitor_settings;
+#ifdef HAVE_FSMONITOR_OS_SETTINGS
+/*
+ * Ask platform-specific code whether the repository is incompatible
+ * with fsmonitor (both hook and ipc modes). For example, if the working
+ * directory is on a remote volume and mounted via a technology that does
+ * not support notification events, then we should not pretend to watch it.
+ *
+ * fsm_os__* routines should considered private to fsm_settings__
+ * routines.
+ */
+enum fsmonitor_reason fsm_os__incompatible(struct repository *r);
+#endif /* HAVE_FSMONITOR_OS_SETTINGS */
+
#endif /* FSMONITOR_SETTINGS_H */
diff --git a/fsmonitor.c b/fsmonitor.c
index 292a674..57d6a48 100644
--- a/fsmonitor.c
+++ b/fsmonitor.c
@@ -184,30 +184,68 @@ static int query_fsmonitor_hook(struct repository *r,
static void fsmonitor_refresh_callback(struct index_state *istate, char *name)
{
int i, len = strlen(name);
- if (name[len - 1] == '/') {
+ int pos = index_name_pos(istate, name, len);
+
+ trace_printf_key(&trace_fsmonitor,
+ "fsmonitor_refresh_callback '%s' (pos %d)",
+ name, pos);
+ if (name[len - 1] == '/') {
/*
- * TODO We should binary search to find the first path with
- * TODO this directory prefix. Then linearly update entries
- * TODO while the prefix matches. Taking care to search without
- * TODO the trailing slash -- because '/' sorts after a few
- * TODO interesting special chars, like '.' and ' '.
+ * The daemon can decorate directory events, such as
+ * moves or renames, with a trailing slash if the OS
+ * FS Event contains sufficient information, such as
+ * MacOS.
+ *
+ * Use this to invalidate the entire cone under that
+ * directory.
+ *
+ * We do not expect an exact match because the index
+ * does not normally contain directory entries, so we
+ * start at the insertion point and scan.
*/
+ if (pos < 0)
+ pos = -pos - 1;
/* Mark all entries for the folder invalid */
- for (i = 0; i < istate->cache_nr; i++) {
- if (istate->cache[i]->ce_flags & CE_FSMONITOR_VALID &&
- starts_with(istate->cache[i]->name, name))
- istate->cache[i]->ce_flags &= ~CE_FSMONITOR_VALID;
+ for (i = pos; i < istate->cache_nr; i++) {
+ if (!starts_with(istate->cache[i]->name, name))
+ break;
+ istate->cache[i]->ce_flags &= ~CE_FSMONITOR_VALID;
}
- /* Need to remove the / from the path for the untracked cache */
+
+ /*
+ * We need to remove the traling "/" from the path
+ * for the untracked cache.
+ */
name[len - 1] = '\0';
+ } else if (pos >= 0) {
+ /*
+ * We have an exact match for this path and can just
+ * invalidate it.
+ */
+ istate->cache[pos]->ce_flags &= ~CE_FSMONITOR_VALID;
} else {
- int pos = index_name_pos(istate, name, strlen(name));
-
- if (pos >= 0) {
- struct cache_entry *ce = istate->cache[pos];
- ce->ce_flags &= ~CE_FSMONITOR_VALID;
+ /*
+ * The path is not a tracked file -or- it is a
+ * directory event on a platform that cannot
+ * distinguish between file and directory events in
+ * the event handler, such as Windows.
+ *
+ * Scan as if it is a directory and invalidate the
+ * cone under it. (But remember to ignore items
+ * between "name" and "name/", such as "name-" and
+ * "name.".
+ */
+ pos = -pos - 1;
+
+ for (i = pos; i < istate->cache_nr; i++) {
+ if (!starts_with(istate->cache[i]->name, name))
+ break;
+ if ((unsigned char)istate->cache[i]->name[len] > '/')
+ break;
+ if (istate->cache[i]->name[len] == '/')
+ istate->cache[i]->ce_flags &= ~CE_FSMONITOR_VALID;
}
}
@@ -215,7 +253,6 @@ static void fsmonitor_refresh_callback(struct index_state *istate, char *name)
* Mark the untracked cache dirty even if it wasn't found in the index
* as it could be a new untracked file.
*/
- trace_printf_key(&trace_fsmonitor, "fsmonitor_refresh_callback '%s'", name);
untracked_cache_invalidate_path(istate, name, 0);
}
@@ -543,6 +580,8 @@ void tweak_fsmonitor(struct index_state *istate)
if (fsmonitor_enabled) {
/* Mark all entries valid */
for (i = 0; i < istate->cache_nr; i++) {
+ if (S_ISGITLINK(istate->cache[i]->ce_mode))
+ continue;
istate->cache[i]->ce_flags |= CE_FSMONITOR_VALID;
}
diff --git a/fsmonitor.h b/fsmonitor.h
index 3f41f65..edf7ce5 100644
--- a/fsmonitor.h
+++ b/fsmonitor.h
@@ -68,6 +68,15 @@ static inline int is_fsmonitor_refreshed(const struct index_state *istate)
* Set the given cache entries CE_FSMONITOR_VALID bit. This should be
* called any time the cache entry has been updated to reflect the
* current state of the file on disk.
+ *
+ * However, never mark submodules as valid. When commands like "git
+ * status" run they might need to recurse into the submodule (using a
+ * child process) to get a summary of the submodule state. We don't
+ * have (and don't want to create) the facility to translate every
+ * FS event that we receive and that happens to be deep inside of a
+ * submodule back to the submodule root, so we cannot correctly keep
+ * track of this bit on the gitlink directory. Therefore, we never
+ * set it on submodules.
*/
static inline void mark_fsmonitor_valid(struct index_state *istate, struct cache_entry *ce)
{
@@ -75,6 +84,8 @@ static inline void mark_fsmonitor_valid(struct index_state *istate, struct cache
if (fsm_mode > FSMONITOR_MODE_DISABLED &&
!(ce->ce_flags & CE_FSMONITOR_VALID)) {
+ if (S_ISGITLINK(ce->ce_mode))
+ return;
istate->cache_changed = 1;
ce->ce_flags |= CE_FSMONITOR_VALID;
trace_printf_key(&trace_fsmonitor, "mark_fsmonitor_clean '%s'", ce->name);
diff --git a/git-compat-util.h b/git-compat-util.h
index 58fd813..58d7708 100644
--- a/git-compat-util.h
+++ b/git-compat-util.h
@@ -236,6 +236,12 @@
#include <sys/sysctl.h>
#endif
+/* Used by compat/win32/path-utils.h, and more */
+static inline int is_xplatform_dir_sep(int c)
+{
+ return c == '/' || c == '\\';
+}
+
#if defined(__CYGWIN__)
#include "compat/win32/path-utils.h"
#endif
@@ -416,11 +422,11 @@ static inline int git_skip_dos_drive_prefix(char **path)
#define skip_dos_drive_prefix git_skip_dos_drive_prefix
#endif
-#ifndef is_dir_sep
static inline int git_is_dir_sep(int c)
{
return c == '/';
}
+#ifndef is_dir_sep
#define is_dir_sep git_is_dir_sep
#endif
@@ -437,12 +443,68 @@ static inline int git_offset_1st_component(const char *path)
#endif
#ifndef is_path_owned_by_current_user
+
+#ifdef __TANDEM
+#define ROOT_UID 65535
+#else
+#define ROOT_UID 0
+#endif
+
+/*
+ * Do not use this function when
+ * (1) geteuid() did not say we are running as 'root', or
+ * (2) using this function will compromise the system.
+ *
+ * PORTABILITY WARNING:
+ * This code assumes uid_t is unsigned because that is what sudo does.
+ * If your uid_t type is signed and all your ids are positive then it
+ * should all work fine.
+ * If your version of sudo uses negative values for uid_t or it is
+ * buggy and return an overflowed value in SUDO_UID, then git might
+ * fail to grant access to your repository properly or even mistakenly
+ * grant access to someone else.
+ * In the unlikely scenario this happened to you, and that is how you
+ * got to this message, we would like to know about it; so sent us an
+ * email to git@vger.kernel.org indicating which platform you are
+ * using and which version of sudo, so we can improve this logic and
+ * maybe provide you with a patch that would prevent this issue again
+ * in the future.
+ */
+static inline void extract_id_from_env(const char *env, uid_t *id)
+{
+ const char *real_uid = getenv(env);
+
+ /* discard anything empty to avoid a more complex check below */
+ if (real_uid && *real_uid) {
+ char *endptr = NULL;
+ unsigned long env_id;
+
+ errno = 0;
+ /* silent overflow errors could trigger a bug here */
+ env_id = strtoul(real_uid, &endptr, 10);
+ if (!*endptr && !errno)
+ *id = env_id;
+ }
+}
+
static inline int is_path_owned_by_current_uid(const char *path)
{
struct stat st;
+ uid_t euid;
+
if (lstat(path, &st))
return 0;
- return st.st_uid == geteuid();
+
+ euid = geteuid();
+ if (euid == ROOT_UID)
+ {
+ if (st.st_uid == ROOT_UID)
+ return 1;
+ else
+ extract_id_from_env("SUDO_UID", &euid);
+ }
+
+ return st.st_uid == euid;
}
#define is_path_owned_by_current_user is_path_owned_by_current_uid
@@ -1269,15 +1331,27 @@ static inline int regexec_buf(const regex_t *preg, const char *buf, size_t size,
/* usage.c: only to be used for testing BUG() implementation (see test-tool) */
extern int BUG_exit_code;
+/* usage.c: if bug() is called we should have a BUG_if_bug() afterwards */
+extern int bug_called_must_BUG;
+
__attribute__((format (printf, 3, 4))) NORETURN
void BUG_fl(const char *file, int line, const char *fmt, ...);
#define BUG(...) BUG_fl(__FILE__, __LINE__, __VA_ARGS__)
+__attribute__((format (printf, 3, 4)))
+void bug_fl(const char *file, int line, const char *fmt, ...);
+#define bug(...) bug_fl(__FILE__, __LINE__, __VA_ARGS__)
+#define BUG_if_bug(...) do { \
+ if (bug_called_must_BUG) \
+ BUG_fl(__FILE__, __LINE__, __VA_ARGS__); \
+} while (0)
+#ifndef FSYNC_METHOD_DEFAULT
#ifdef __APPLE__
#define FSYNC_METHOD_DEFAULT FSYNC_METHOD_WRITEOUT_ONLY
#else
#define FSYNC_METHOD_DEFAULT FSYNC_METHOD_FSYNC
#endif
+#endif
enum fsync_action {
FSYNC_WRITEOUT_ONLY,
@@ -1400,8 +1474,8 @@ int cmd_main(int, const char **);
* Intercept all calls to exit() and route them to trace2 to
* optionally emit a message before calling the real exit().
*/
-int trace2_cmd_exit_fl(const char *file, int line, int code);
-#define exit(code) exit(trace2_cmd_exit_fl(__FILE__, __LINE__, (code)))
+int common_exit(const char *file, int line, int code);
+#define exit(code) exit(common_exit(__FILE__, __LINE__, (code)))
/*
* You can mark a stack variable with UNLEAK(var) to avoid it being
diff --git a/git-mergetool--lib.sh b/git-mergetool--lib.sh
index 542a6a7..9f99201 100644
--- a/git-mergetool--lib.sh
+++ b/git-mergetool--lib.sh
@@ -63,7 +63,7 @@ $(list_tool_variants)"
preamble=
fi
shown_any=yes
- printf "%s%s\n" "$per_line_prefix" "$toolname"
+ printf "%s%-15s %s\n" "$per_line_prefix" "$toolname" $(diff_mode && diff_cmd_help "$toolname" || merge_cmd_help "$toolname")
fi
done
@@ -162,10 +162,18 @@ setup_tool () {
return 1
}
+ diff_cmd_help () {
+ return 0
+ }
+
merge_cmd () {
return 1
}
+ merge_cmd_help () {
+ return 0
+ }
+
hide_resolved_enabled () {
return 0
}
diff --git a/git-p4.py b/git-p4.py
index a9b1f90..8fbf6eb 100755
--- a/git-p4.py
+++ b/git-p4.py
@@ -7,34 +7,52 @@
# 2007 Trolltech ASA
# License: MIT <http://www.opensource.org/licenses/mit-license.php>
#
-# pylint: disable=invalid-name,missing-docstring,too-many-arguments,broad-except
-# pylint: disable=no-self-use,wrong-import-position,consider-iterating-dictionary
-# pylint: disable=wrong-import-order,unused-import,too-few-public-methods
-# pylint: disable=too-many-lines,ungrouped-imports,fixme,too-many-locals
-# pylint: disable=line-too-long,bad-whitespace,superfluous-parens
-# pylint: disable=too-many-statements,too-many-instance-attributes
-# pylint: disable=too-many-branches,too-many-nested-blocks
+# pylint: disable=bad-whitespace
+# pylint: disable=broad-except
+# pylint: disable=consider-iterating-dictionary
+# pylint: disable=disable
+# pylint: disable=fixme
+# pylint: disable=invalid-name
+# pylint: disable=line-too-long
+# pylint: disable=missing-docstring
+# pylint: disable=no-self-use
+# pylint: disable=superfluous-parens
+# pylint: disable=too-few-public-methods
+# pylint: disable=too-many-arguments
+# pylint: disable=too-many-branches
+# pylint: disable=too-many-instance-attributes
+# pylint: disable=too-many-lines
+# pylint: disable=too-many-locals
+# pylint: disable=too-many-nested-blocks
+# pylint: disable=too-many-statements
+# pylint: disable=ungrouped-imports
+# pylint: disable=unused-import
+# pylint: disable=wrong-import-order
+# pylint: disable=wrong-import-position
#
+
+import struct
import sys
if sys.version_info.major < 3 and sys.version_info.minor < 7:
sys.stderr.write("git-p4: requires Python 2.7 or later.\n")
sys.exit(1)
-import os
-import optparse
+
+import ctypes
+import errno
import functools
+import glob
import marshal
-import subprocess
-import tempfile
-import time
+import optparse
+import os
import platform
import re
import shutil
import stat
+import subprocess
+import tempfile
+import time
import zipfile
import zlib
-import ctypes
-import errno
-import glob
# On python2.7 where raw_input() and input() are both availble,
# we want raw_input's semantics, but aliased to input for python3
@@ -52,17 +70,21 @@ verbose = False
defaultLabelRegexp = r'[a-zA-Z0-9_\-.]+$'
# The block size is reduced automatically if required
-defaultBlockSize = 1<<20
+defaultBlockSize = 1 << 20
+
+defaultMetadataDecodingStrategy = 'passthrough' if sys.version_info.major == 2 else 'fallback'
+defaultFallbackMetadataEncoding = 'cp1252'
p4_access_checked = False
re_ko_keywords = re.compile(br'\$(Id|Header)(:[^$\n]+)?\$')
re_k_keywords = re.compile(br'\$(Id|Header|Author|Date|DateTime|Change|File|Revision)(:[^$\n]+)?\$')
+
def format_size_human_readable(num):
- """ Returns a number of units (typically bytes) formatted as a human-readable
- string.
- """
+ """Returns a number of units (typically bytes) formatted as a
+ human-readable string.
+ """
if num < 1024:
return '{:d} B'.format(num)
for unit in ["Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
@@ -71,18 +93,19 @@ def format_size_human_readable(num):
return "{:3.1f} {}B".format(num, unit)
return "{:.1f} YiB".format(num)
+
def p4_build_cmd(cmd):
"""Build a suitable p4 command line.
- This consolidates building and returning a p4 command line into one
- location. It means that hooking into the environment, or other configuration
- can be done more easily.
- """
+ This consolidates building and returning a p4 command line into one
+ location. It means that hooking into the environment, or other
+ configuration can be done more easily.
+ """
real_cmd = ["p4"]
user = gitConfig("git-p4.user")
if len(user) > 0:
- real_cmd += ["-u",user]
+ real_cmd += ["-u", user]
password = gitConfig("git-p4.password")
if len(password) > 0:
@@ -118,31 +141,31 @@ def p4_build_cmd(cmd):
return real_cmd
+
def git_dir(path):
- """ Return TRUE if the given path is a git directory (/path/to/dir/.git).
- This won't automatically add ".git" to a directory.
- """
+ """Return TRUE if the given path is a git directory (/path/to/dir/.git).
+ This won't automatically add ".git" to a directory.
+ """
d = read_pipe(["git", "--git-dir", path, "rev-parse", "--git-dir"], True).strip()
if not d or len(d) == 0:
return None
else:
return d
+
def chdir(path, is_client_path=False):
- """Do chdir to the given path, and set the PWD environment
- variable for use by P4. It does not look at getcwd() output.
- Since we're not using the shell, it is necessary to set the
- PWD environment variable explicitly.
-
- Normally, expand the path to force it to be absolute. This
- addresses the use of relative path names inside P4 settings,
- e.g. P4CONFIG=.p4config. P4 does not simply open the filename
- as given; it looks for .p4config using PWD.
-
- If is_client_path, the path was handed to us directly by p4,
- and may be a symbolic link. Do not call os.getcwd() in this
- case, because it will cause p4 to think that PWD is not inside
- the client path.
+ """Do chdir to the given path, and set the PWD environment variable for use
+ by P4. It does not look at getcwd() output. Since we're not using the
+ shell, it is necessary to set the PWD environment variable explicitly.
+
+ Normally, expand the path to force it to be absolute. This addresses
+ the use of relative path names inside P4 settings, e.g.
+ P4CONFIG=.p4config. P4 does not simply open the filename as given; it
+ looks for .p4config using PWD.
+
+ If is_client_path, the path was handed to us directly by p4, and may be
+ a symbolic link. Do not call os.getcwd() in this case, because it will
+ cause p4 to think that PWD is not inside the client path.
"""
os.chdir(path)
@@ -150,6 +173,7 @@ def chdir(path, is_client_path=False):
path = os.getcwd()
os.environ['PWD'] = path
+
def calcDiskFree():
"""Return free space in bytes on the disk of the given dirname."""
if platform.system() == 'Windows':
@@ -160,52 +184,124 @@ def calcDiskFree():
st = os.statvfs(os.getcwd())
return st.f_bavail * st.f_frsize
+
def die(msg):
- """ Terminate execution. Make sure that any running child processes have been wait()ed for before
- calling this.
- """
+ """Terminate execution. Make sure that any running child processes have
+ been wait()ed for before calling this.
+ """
if verbose:
raise Exception(msg)
else:
sys.stderr.write(msg + "\n")
sys.exit(1)
+
def prompt(prompt_text):
- """ Prompt the user to choose one of the choices
+ """Prompt the user to choose one of the choices.
- Choices are identified in the prompt_text by square brackets around
- a single letter option.
- """
+ Choices are identified in the prompt_text by square brackets around a
+ single letter option.
+ """
choices = set(m.group(1) for m in re.finditer(r"\[(.)\]", prompt_text))
while True:
sys.stderr.flush()
sys.stdout.write(prompt_text)
sys.stdout.flush()
- response=sys.stdin.readline().strip().lower()
+ response = sys.stdin.readline().strip().lower()
if not response:
continue
response = response[0]
if response in choices:
return response
+
# We need different encoding/decoding strategies for text data being passed
# around in pipes depending on python version
if bytes is not str:
# For python3, always encode and decode as appropriate
def decode_text_stream(s):
return s.decode() if isinstance(s, bytes) else s
+
def encode_text_stream(s):
return s.encode() if isinstance(s, str) else s
else:
# For python2.7, pass read strings as-is, but also allow writing unicode
def decode_text_stream(s):
return s
+
def encode_text_stream(s):
return s.encode('utf_8') if isinstance(s, unicode) else s
+
+class MetadataDecodingException(Exception):
+ def __init__(self, input_string):
+ self.input_string = input_string
+
+ def __str__(self):
+ return """Decoding perforce metadata failed!
+The failing string was:
+---
+{}
+---
+Consider setting the git-p4.metadataDecodingStrategy config option to
+'fallback', to allow metadata to be decoded using a fallback encoding,
+defaulting to cp1252.""".format(self.input_string)
+
+
+encoding_fallback_warning_issued = False
+encoding_escape_warning_issued = False
+def metadata_stream_to_writable_bytes(s):
+ encodingStrategy = gitConfig('git-p4.metadataDecodingStrategy') or defaultMetadataDecodingStrategy
+ fallbackEncoding = gitConfig('git-p4.metadataFallbackEncoding') or defaultFallbackMetadataEncoding
+ if not isinstance(s, bytes):
+ return s.encode('utf_8')
+ if encodingStrategy == 'passthrough':
+ return s
+ try:
+ s.decode('utf_8')
+ return s
+ except UnicodeDecodeError:
+ if encodingStrategy == 'fallback' and fallbackEncoding:
+ global encoding_fallback_warning_issued
+ global encoding_escape_warning_issued
+ try:
+ if not encoding_fallback_warning_issued:
+ print("\nCould not decode value as utf-8; using configured fallback encoding %s: %s" % (fallbackEncoding, s))
+ print("\n(this warning is only displayed once during an import)")
+ encoding_fallback_warning_issued = True
+ return s.decode(fallbackEncoding).encode('utf_8')
+ except Exception as exc:
+ if not encoding_escape_warning_issued:
+ print("\nCould not decode value with configured fallback encoding %s; escaping bytes over 127: %s" % (fallbackEncoding, s))
+ print("\n(this warning is only displayed once during an import)")
+ encoding_escape_warning_issued = True
+ escaped_bytes = b''
+ # bytes and strings work very differently in python2 vs python3...
+ if str is bytes:
+ for byte in s:
+ byte_number = struct.unpack('>B', byte)[0]
+ if byte_number > 127:
+ escaped_bytes += b'%'
+ escaped_bytes += hex(byte_number)[2:].upper()
+ else:
+ escaped_bytes += byte
+ else:
+ for byte_number in s:
+ if byte_number > 127:
+ escaped_bytes += b'%'
+ escaped_bytes += hex(byte_number).upper().encode()[2:]
+ else:
+ escaped_bytes += bytes([byte_number])
+ return escaped_bytes
+
+ raise MetadataDecodingException(s)
+
+
def decode_path(path):
- """Decode a given string (bytes or otherwise) using configured path encoding options
- """
+ """Decode a given string (bytes or otherwise) using configured path
+ encoding options.
+ """
+
encoding = gitConfig('git-p4.pathEncoding') or 'utf_8'
if bytes is not str:
return path.decode(encoding, errors='replace') if isinstance(path, bytes) else path
@@ -218,6 +314,7 @@ def decode_path(path):
print('Path with non-ASCII characters detected. Used {} to decode: {}'.format(encoding, path))
return path
+
def run_git_hook(cmd, param=[]):
"""Execute a hook if the hook exists."""
args = ['git', 'hook', 'run', '--ignore-missing', cmd]
@@ -227,6 +324,7 @@ def run_git_hook(cmd, param=[]):
args.append(p)
return subprocess.call(args) == 0
+
def write_pipe(c, stdin, *k, **kw):
if verbose:
sys.stderr.write('Writing pipe: {}\n'.format(' '.join(c)))
@@ -240,33 +338,35 @@ def write_pipe(c, stdin, *k, **kw):
return val
+
def p4_write_pipe(c, stdin, *k, **kw):
real_cmd = p4_build_cmd(c)
if bytes is not str and isinstance(stdin, str):
stdin = encode_text_stream(stdin)
return write_pipe(real_cmd, stdin, *k, **kw)
+
def read_pipe_full(c, *k, **kw):
- """ Read output from command. Returns a tuple
- of the return status, stdout text and stderr
- text.
- """
+ """Read output from command. Returns a tuple of the return status, stdout
+ text and stderr text.
+ """
if verbose:
sys.stderr.write('Reading pipe: {}\n'.format(' '.join(c)))
p = subprocess.Popen(
c, stdout=subprocess.PIPE, stderr=subprocess.PIPE, *k, **kw)
- (out, err) = p.communicate()
+ out, err = p.communicate()
return (p.returncode, out, decode_text_stream(err))
+
def read_pipe(c, ignore_error=False, raw=False, *k, **kw):
- """ Read output from command. Returns the output text on
- success. On failure, terminates execution, unless
- ignore_error is True, when it returns an empty string.
+ """Read output from command. Returns the output text on success. On
+ failure, terminates execution, unless ignore_error is True, when it
+ returns an empty string.
- If raw is True, do not attempt to decode output text.
- """
- (retcode, out, err) = read_pipe_full(c, *k, **kw)
+ If raw is True, do not attempt to decode output text.
+ """
+ retcode, out, err = read_pipe_full(c, *k, **kw)
if retcode != 0:
if ignore_error:
out = ""
@@ -276,20 +376,23 @@ def read_pipe(c, ignore_error=False, raw=False, *k, **kw):
out = decode_text_stream(out)
return out
+
def read_pipe_text(c, *k, **kw):
- """ Read output from a command with trailing whitespace stripped.
- On error, returns None.
- """
- (retcode, out, err) = read_pipe_full(c, *k, **kw)
+ """Read output from a command with trailing whitespace stripped. On error,
+ returns None.
+ """
+ retcode, out, err = read_pipe_full(c, *k, **kw)
if retcode != 0:
return None
else:
return decode_text_stream(out).rstrip()
+
def p4_read_pipe(c, ignore_error=False, raw=False, *k, **kw):
real_cmd = p4_build_cmd(c)
return read_pipe(real_cmd, ignore_error, raw=raw, *k, **kw)
+
def read_pipe_lines(c, raw=False, *k, **kw):
if verbose:
sys.stderr.write('Reading pipe: {}\n'.format(' '.join(c)))
@@ -303,31 +406,36 @@ def read_pipe_lines(c, raw=False, *k, **kw):
die('Command failed: {}'.format(' '.join(c)))
return lines
+
def p4_read_pipe_lines(c, *k, **kw):
- """Specifically invoke p4 on the command supplied. """
+ """Specifically invoke p4 on the command supplied."""
real_cmd = p4_build_cmd(c)
return read_pipe_lines(real_cmd, *k, **kw)
+
def p4_has_command(cmd):
- """Ask p4 for help on this command. If it returns an error, the
- command does not exist in this version of p4."""
+ """Ask p4 for help on this command. If it returns an error, the command
+ does not exist in this version of p4.
+ """
real_cmd = p4_build_cmd(["help", cmd])
p = subprocess.Popen(real_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.communicate()
return p.returncode == 0
+
def p4_has_move_command():
- """See if the move command exists, that it supports -k, and that
- it has not been administratively disabled. The arguments
- must be correct, but the filenames do not have to exist. Use
- ones with wildcards so even if they exist, it will fail."""
+ """See if the move command exists, that it supports -k, and that it has not
+ been administratively disabled. The arguments must be correct, but the
+ filenames do not have to exist. Use ones with wildcards so even if they
+ exist, it will fail.
+ """
if not p4_has_command("move"):
return False
cmd = p4_build_cmd(["move", "-k", "@from", "@to"])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (out, err) = p.communicate()
+ out, err = p.communicate()
err = decode_text_stream(err)
# return code will be 1 in either case
if err.find("Invalid option") >= 0:
@@ -337,6 +445,7 @@ def p4_has_move_command():
# assume it failed because @... was invalid changelist
return True
+
def system(cmd, ignore_error=False, *k, **kw):
if verbose:
sys.stderr.write("executing {}\n".format(
@@ -347,19 +456,22 @@ def system(cmd, ignore_error=False, *k, **kw):
return retcode
+
def p4_system(cmd, *k, **kw):
- """Specifically invoke p4 as the system command. """
+ """Specifically invoke p4 as the system command."""
real_cmd = p4_build_cmd(cmd)
retcode = subprocess.call(real_cmd, *k, **kw)
if retcode:
raise subprocess.CalledProcessError(retcode, real_cmd)
+
def die_bad_access(s):
die("failure accessing depot: {0}".format(s.rstrip()))
+
def p4_check_access(min_expiration=1):
- """ Check if we can access Perforce - account still logged in
- """
+ """Check if we can access Perforce - account still logged in."""
+
results = p4CmdList(["login", "-s"])
if len(results) == 0:
@@ -402,62 +514,78 @@ def p4_check_access(min_expiration=1):
else:
die_bad_access("unknown error code {0}".format(code))
+
_p4_version_string = None
+
+
def p4_version_string():
- """Read the version string, showing just the last line, which
- hopefully is the interesting version bit.
+ """Read the version string, showing just the last line, which hopefully is
+ the interesting version bit.
$ p4 -V
Perforce - The Fast Software Configuration Management System.
Copyright 1995-2011 Perforce Software. All rights reserved.
Rev. P4/NTX86/2011.1/393975 (2011/12/16).
- """
+ """
global _p4_version_string
if not _p4_version_string:
a = p4_read_pipe_lines(["-V"])
_p4_version_string = a[-1].rstrip()
return _p4_version_string
+
def p4_integrate(src, dest):
p4_system(["integrate", "-Dt", wildcard_encode(src), wildcard_encode(dest)])
+
def p4_sync(f, *options):
p4_system(["sync"] + list(options) + [wildcard_encode(f)])
+
def p4_add(f):
- # forcibly add file names with wildcards
+ """Forcibly add file names with wildcards."""
if wildcard_present(f):
p4_system(["add", "-f", f])
else:
p4_system(["add", f])
+
def p4_delete(f):
p4_system(["delete", wildcard_encode(f)])
+
def p4_edit(f, *options):
p4_system(["edit"] + list(options) + [wildcard_encode(f)])
+
def p4_revert(f):
p4_system(["revert", wildcard_encode(f)])
+
def p4_reopen(type, f):
p4_system(["reopen", "-t", type, wildcard_encode(f)])
+
def p4_reopen_in_change(changelist, files):
cmd = ["reopen", "-c", str(changelist)] + files
p4_system(cmd)
+
def p4_move(src, dest):
p4_system(["move", "-k", wildcard_encode(src), wildcard_encode(dest)])
+
def p4_last_change():
results = p4CmdList(["changes", "-m", "1"], skip_info=True)
return int(results[0]['change'])
+
def p4_describe(change, shelved=False):
- """Make sure it returns a valid result by checking for
- the presence of field "time". Return a dict of the
- results."""
+ """Make sure it returns a valid result by checking for the presence of
+ field "time".
+
+ Return a dict of the results.
+ """
cmd = ["describe", "-s"]
if shelved:
@@ -482,12 +610,11 @@ def p4_describe(change, shelved=False):
return d
-#
-# Canonicalize the p4 type and return a tuple of the
-# base type, plus any modifiers. See "p4 help filetypes"
-# for a list and explanation.
-#
+
def split_p4_type(p4type):
+ """Canonicalize the p4 type and return a tuple of the base type, plus any
+ modifiers. See "p4 help filetypes" for a list and explanation.
+ """
p4_filetypes_historical = {
"ctempobj": "binary+Sw",
@@ -517,18 +644,19 @@ def split_p4_type(p4type):
mods = s[1]
return (base, mods)
-#
-# return the raw p4 type of a file (text, text+ko, etc)
-#
+
def p4_type(f):
+ """Return the raw p4 type of a file (text, text+ko, etc)."""
+
results = p4CmdList(["fstat", "-T", "headType", wildcard_encode(f)])
return results[0]['headType']
-#
-# Given a type base and modifier, return a regexp matching
-# the keywords that can be expanded in the file
-#
+
def p4_keywords_regexp_for_type(base, type_mods):
+ """Given a type base and modifier, return a regexp matching the keywords
+ that can be expanded in the file.
+ """
+
if base in ("text", "unicode", "binary"):
if "ko" in type_mods:
return re_ko_keywords
@@ -539,21 +667,23 @@ def p4_keywords_regexp_for_type(base, type_mods):
else:
return None
-#
-# Given a file, return a regexp matching the possible
-# RCS keywords that will be expanded, or None for files
-# with kw expansion turned off.
-#
+
def p4_keywords_regexp_for_file(file):
+ """Given a file, return a regexp matching the possible RCS keywords that
+ will be expanded, or None for files with kw expansion turned off.
+ """
+
if not os.path.exists(file):
return None
else:
- (type_base, type_mods) = split_p4_type(p4_type(file))
+ type_base, type_mods = split_p4_type(p4_type(file))
return p4_keywords_regexp_for_type(type_base, type_mods)
+
def setP4ExecBit(file, mode):
- # Reopens an already open file and changes the execute bit to match
- # the execute bit setting in the passed in mode.
+ """Reopens an already open file and changes the execute bit to match the
+ execute bit setting in the passed in mode.
+ """
p4Type = "+x"
@@ -566,8 +696,9 @@ def setP4ExecBit(file, mode):
p4_reopen(p4Type, file)
+
def getP4OpenedType(file):
- # Returns the perforce file type for the given file.
+ """Returns the perforce file type for the given file."""
result = p4_read_pipe(["opened", wildcard_encode(file)])
match = re.match(".*\((.+)\)( \*exclusive\*)?\r?$", result)
@@ -576,8 +707,10 @@ def getP4OpenedType(file):
else:
die("Could not determine file type for %s (result: '%s')" % (file, result))
-# Return the set of all p4 labels
+
def getP4Labels(depotPaths):
+ """Return the set of all p4 labels."""
+
labels = set()
if not isinstance(depotPaths, list):
depotPaths = [depotPaths]
@@ -588,34 +721,39 @@ def getP4Labels(depotPaths):
return labels
-# Return the set of all git tags
+
def getGitTags():
+ """Return the set of all git tags."""
+
gitTags = set()
for line in read_pipe_lines(["git", "tag"]):
tag = line.strip()
gitTags.add(tag)
return gitTags
+
_diff_tree_pattern = None
+
def parseDiffTreeEntry(entry):
"""Parses a single diff tree entry into its component elements.
- See git-diff-tree(1) manpage for details about the format of the diff
- output. This method returns a dictionary with the following elements:
-
- src_mode - The mode of the source file
- dst_mode - The mode of the destination file
- src_sha1 - The sha1 for the source file
- dst_sha1 - The sha1 fr the destination file
- status - The one letter status of the diff (i.e. 'A', 'M', 'D', etc)
- status_score - The score for the status (applicable for 'C' and 'R'
- statuses). This is None if there is no score.
- src - The path for the source file.
- dst - The path for the destination file. This is only present for
- copy or renames. If it is not present, this is None.
-
- If the pattern is not matched, None is returned."""
+ See git-diff-tree(1) manpage for details about the format of the diff
+ output. This method returns a dictionary with the following elements:
+
+ src_mode - The mode of the source file
+ dst_mode - The mode of the destination file
+ src_sha1 - The sha1 for the source file
+ dst_sha1 - The sha1 fr the destination file
+ status - The one letter status of the diff (i.e. 'A', 'M', 'D', etc)
+ status_score - The score for the status (applicable for 'C' and 'R'
+ statuses). This is None if there is no score.
+ src - The path for the source file.
+ dst - The path for the destination file. This is only present for
+ copy or renames. If it is not present, this is None.
+
+ If the pattern is not matched, None is returned.
+ """
global _diff_tree_pattern
if not _diff_tree_pattern:
@@ -635,41 +773,55 @@ def parseDiffTreeEntry(entry):
}
return None
+
def isModeExec(mode):
- # Returns True if the given git mode represents an executable file,
- # otherwise False.
+ """Returns True if the given git mode represents an executable file,
+ otherwise False.
+ """
return mode[-3:] == "755"
+
class P4Exception(Exception):
- """ Base class for exceptions from the p4 client """
+ """Base class for exceptions from the p4 client."""
+
def __init__(self, exit_code):
self.p4ExitCode = exit_code
+
class P4ServerException(P4Exception):
- """ Base class for exceptions where we get some kind of marshalled up result from the server """
+ """Base class for exceptions where we get some kind of marshalled up result
+ from the server.
+ """
+
def __init__(self, exit_code, p4_result):
super(P4ServerException, self).__init__(exit_code)
self.p4_result = p4_result
self.code = p4_result[0]['code']
self.data = p4_result[0]['data']
+
class P4RequestSizeException(P4ServerException):
- """ One of the maxresults or maxscanrows errors """
+ """One of the maxresults or maxscanrows errors."""
+
def __init__(self, exit_code, p4_result, limit):
super(P4RequestSizeException, self).__init__(exit_code, p4_result)
self.limit = limit
+
class P4CommandException(P4Exception):
- """ Something went wrong calling p4 which means we have to give up """
+ """Something went wrong calling p4 which means we have to give up."""
+
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
+
def isModeExecChanged(src_mode, dst_mode):
return isModeExec(src_mode) != isModeExec(dst_mode)
+
def p4CmdList(cmd, stdin=None, stdin_mode='w+b', cb=None, skip_info=False,
errors_as_exceptions=False, *k, **kw):
@@ -702,11 +854,12 @@ def p4CmdList(cmd, stdin=None, stdin_mode='w+b', cb=None, skip_info=False,
if bytes is not str:
# Decode unmarshalled dict to use str keys and values, except for:
# - `data` which may contain arbitrary binary data
- # - `depotFile[0-9]*`, `path`, or `clientFile` which may contain non-UTF8 encoded text
+ # - `desc` or `FullName` which may contain non-UTF8 encoded text handled below, eagerly converted to bytes
+ # - `depotFile[0-9]*`, `path`, or `clientFile` which may contain non-UTF8 encoded text, handled by decode_path()
decoded_entry = {}
for key, value in entry.items():
key = key.decode()
- if isinstance(value, bytes) and not (key in ('data', 'path', 'clientFile') or key.startswith('depotFile')):
+ if isinstance(value, bytes) and not (key in ('data', 'desc', 'FullName', 'path', 'clientFile') or key.startswith('depotFile')):
value = value.decode()
decoded_entry[key] = value
# Parse out data if it's an error response
@@ -716,6 +869,10 @@ def p4CmdList(cmd, stdin=None, stdin_mode='w+b', cb=None, skip_info=False,
if skip_info:
if 'code' in entry and entry['code'] == 'info':
continue
+ if 'desc' in entry:
+ entry['desc'] = metadata_stream_to_writable_bytes(entry['desc'])
+ if 'FullName' in entry:
+ entry['FullName'] = metadata_stream_to_writable_bytes(entry['FullName'])
if cb is not None:
cb(entry)
else:
@@ -746,12 +903,14 @@ def p4CmdList(cmd, stdin=None, stdin_mode='w+b', cb=None, skip_info=False,
return result
+
def p4Cmd(cmd, *k, **kw):
list = p4CmdList(cmd, *k, **kw)
result = {}
for entry in list:
result.update(entry)
- return result;
+ return result
+
def p4Where(depotPath):
if not depotPath.endswith("/"):
@@ -773,7 +932,7 @@ def p4Where(depotPath):
if data[:space] == depotPath:
output = entry
break
- if output == None:
+ if output is None:
return ""
if output["code"] == "error":
return ""
@@ -789,48 +948,54 @@ def p4Where(depotPath):
clientPath = clientPath[:-3]
return clientPath
+
def currentGitBranch():
return read_pipe_text(["git", "symbolic-ref", "--short", "-q", "HEAD"])
+
def isValidGitDir(path):
- return git_dir(path) != None
+ return git_dir(path) is not None
+
def parseRevision(ref):
return read_pipe(["git", "rev-parse", ref]).strip()
+
def branchExists(ref):
rev = read_pipe(["git", "rev-parse", "-q", "--verify", ref],
ignore_error=True)
return len(rev) > 0
+
def extractLogMessageFromGitCommit(commit):
logMessage = ""
- ## fixme: title is first line of commit, not 1st paragraph.
+ # fixme: title is first line of commit, not 1st paragraph.
foundTitle = False
for log in read_pipe_lines(["git", "cat-file", "commit", commit]):
- if not foundTitle:
- if len(log) == 1:
- foundTitle = True
- continue
+ if not foundTitle:
+ if len(log) == 1:
+ foundTitle = True
+ continue
- logMessage += log
+ logMessage += log
return logMessage
+
def extractSettingsGitLog(log):
values = {}
for line in log.split("\n"):
line = line.strip()
- m = re.search (r"^ *\[git-p4: (.*)\]$", line)
+ m = re.search(r"^ *\[git-p4: (.*)\]$", line)
if not m:
continue
- assignments = m.group(1).split (':')
+ assignments = m.group(1).split(':')
for a in assignments:
- vals = a.split ('=')
+ vals = a.split('=')
key = vals[0].strip()
- val = ('='.join (vals[1:])).strip()
- if val.endswith ('\"') and val.startswith('"'):
+ val = ('='.join(vals[1:])).strip()
+ if val.endswith('\"') and val.startswith('"'):
val = val[1:-1]
values[key] = val
@@ -842,41 +1007,49 @@ def extractSettingsGitLog(log):
values['depot-paths'] = paths.split(',')
return values
+
def gitBranchExists(branch):
proc = subprocess.Popen(["git", "rev-parse", branch],
- stderr=subprocess.PIPE, stdout=subprocess.PIPE);
- return proc.wait() == 0;
+ stderr=subprocess.PIPE, stdout=subprocess.PIPE)
+ return proc.wait() == 0
+
def gitUpdateRef(ref, newvalue):
subprocess.check_call(["git", "update-ref", ref, newvalue])
+
def gitDeleteRef(ref):
subprocess.check_call(["git", "update-ref", "-d", ref])
+
_gitConfig = {}
+
def gitConfig(key, typeSpecifier=None):
if key not in _gitConfig:
- cmd = [ "git", "config" ]
+ cmd = ["git", "config"]
if typeSpecifier:
- cmd += [ typeSpecifier ]
- cmd += [ key ]
+ cmd += [typeSpecifier]
+ cmd += [key]
s = read_pipe(cmd, ignore_error=True)
_gitConfig[key] = s.strip()
return _gitConfig[key]
+
def gitConfigBool(key):
"""Return a bool, using git config --bool. It is True only if the
variable is set to true, and False if set to false or not present
- in the config."""
+ in the config.
+ """
if key not in _gitConfig:
_gitConfig[key] = gitConfig(key, '--bool') == "true"
return _gitConfig[key]
+
def gitConfigInt(key):
if key not in _gitConfig:
- cmd = [ "git", "config", "--int", key ]
+ cmd = ["git", "config", "--int", key]
s = read_pipe(cmd, ignore_error=True)
v = s.strip()
try:
@@ -885,6 +1058,7 @@ def gitConfigInt(key):
_gitConfig[key] = None
return _gitConfig[key]
+
def gitConfigList(key):
if key not in _gitConfig:
s = read_pipe(["git", "config", "--get-all", key], ignore_error=True)
@@ -893,12 +1067,43 @@ def gitConfigList(key):
_gitConfig[key] = []
return _gitConfig[key]
+def fullP4Ref(incomingRef, importIntoRemotes=True):
+ """Standardize a given provided p4 ref value to a full git ref:
+ refs/foo/bar/branch -> use it exactly
+ p4/branch -> prepend refs/remotes/ or refs/heads/
+ branch -> prepend refs/remotes/p4/ or refs/heads/p4/"""
+ if incomingRef.startswith("refs/"):
+ return incomingRef
+ if importIntoRemotes:
+ prepend = "refs/remotes/"
+ else:
+ prepend = "refs/heads/"
+ if not incomingRef.startswith("p4/"):
+ prepend += "p4/"
+ return prepend + incomingRef
+
+def shortP4Ref(incomingRef, importIntoRemotes=True):
+ """Standardize to a "short ref" if possible:
+ refs/foo/bar/branch -> ignore
+ refs/remotes/p4/branch or refs/heads/p4/branch -> shorten
+ p4/branch -> shorten"""
+ if importIntoRemotes:
+ longprefix = "refs/remotes/p4/"
+ else:
+ longprefix = "refs/heads/p4/"
+ if incomingRef.startswith(longprefix):
+ return incomingRef[len(longprefix):]
+ if incomingRef.startswith("p4/"):
+ return incomingRef[3:]
+ return incomingRef
+
def p4BranchesInGit(branchesAreInRemotes=True):
"""Find all the branches whose names start with "p4/", looking
in remotes or heads as specified by the argument. Return
a dictionary of { branch: revision } for each one found.
The branch names are the short names, without any
- "p4/" prefix."""
+ "p4/" prefix.
+ """
branches = {}
@@ -925,10 +1130,11 @@ def p4BranchesInGit(branchesAreInRemotes=True):
return branches
+
def branch_exists(branch):
"""Make sure that the given ref name really exists."""
- cmd = [ "git", "rev-parse", "--symbolic", "--verify", branch ]
+ cmd = ["git", "rev-parse", "--symbolic", "--verify", branch]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, _ = p.communicate()
out = decode_text_stream(out)
@@ -937,7 +1143,8 @@ def branch_exists(branch):
# expect exactly one line of output: the branch name
return out.rstrip() == branch
-def findUpstreamBranchPoint(head = "HEAD"):
+
+def findUpstreamBranchPoint(head="HEAD"):
branches = p4BranchesInGit()
# map from depot-path to branch name
branchByDepotPath = {}
@@ -946,8 +1153,12 @@ def findUpstreamBranchPoint(head = "HEAD"):
log = extractLogMessageFromGitCommit(tip)
settings = extractSettingsGitLog(log)
if "depot-paths" in settings:
+ git_branch = "remotes/p4/" + branch
paths = ",".join(settings["depot-paths"])
- branchByDepotPath[paths] = "remotes/p4/" + branch
+ branchByDepotPath[paths] = git_branch
+ if "change" in settings:
+ paths = paths + ";" + settings["change"]
+ branchByDepotPath[paths] = git_branch
settings = None
parent = 0
@@ -957,6 +1168,10 @@ def findUpstreamBranchPoint(head = "HEAD"):
settings = extractSettingsGitLog(log)
if "depot-paths" in settings:
paths = ",".join(settings["depot-paths"])
+ if "change" in settings:
+ expaths = paths + ";" + settings["change"]
+ if expaths in branchByDepotPath:
+ return [branchByDepotPath[expaths], settings]
if paths in branchByDepotPath:
return [branchByDepotPath[paths], settings]
@@ -964,7 +1179,8 @@ def findUpstreamBranchPoint(head = "HEAD"):
return ["", settings]
-def createOrUpdateBranchesFromOrigin(localRefPrefix = "refs/remotes/p4/", silent=True):
+
+def createOrUpdateBranchesFromOrigin(localRefPrefix="refs/remotes/p4/", silent=True):
if not silent:
print("Creating/updating branch(es) in %s based on origin branch(es)"
% localRefPrefix)
@@ -981,8 +1197,7 @@ def createOrUpdateBranchesFromOrigin(localRefPrefix = "refs/remotes/p4/", silent
originHead = line
original = extractSettingsGitLog(extractLogMessageFromGitCommit(originHead))
- if ('depot-paths' not in original
- or 'change' not in original):
+ if 'depot-paths' not in original or 'change' not in original:
continue
update = False
@@ -1011,8 +1226,9 @@ def createOrUpdateBranchesFromOrigin(localRefPrefix = "refs/remotes/p4/", silent
if update:
system(["git", "update-ref", remoteHead, originHead])
+
def originP4BranchesExist():
- return gitBranchExists("origin") or gitBranchExists("origin/p4") or gitBranchExists("origin/p4/master")
+ return gitBranchExists("origin") or gitBranchExists("origin/p4") or gitBranchExists("origin/p4/master")
def p4ParseNumericChangeRange(parts):
@@ -1024,12 +1240,14 @@ def p4ParseNumericChangeRange(parts):
return (changeStart, changeEnd)
+
def chooseBlockSize(blockSize):
if blockSize:
return blockSize
else:
return defaultBlockSize
+
def p4ChangesForPaths(depotPaths, changeRange, requestedBlockSize):
assert depotPaths
@@ -1047,7 +1265,7 @@ def p4ChangesForPaths(depotPaths, changeRange, requestedBlockSize):
parts = changeRange.split(',')
assert len(parts) == 2
try:
- (changeStart, changeEnd) = p4ParseNumericChangeRange(parts)
+ changeStart, changeEnd = p4ParseNumericChangeRange(parts)
block_size = chooseBlockSize(requestedBlockSize)
except ValueError:
changeStart = parts[0][1:]
@@ -1085,7 +1303,8 @@ def p4ChangesForPaths(depotPaths, changeRange, requestedBlockSize):
else:
block_size = max(2, block_size // 2)
- if verbose: print("block size error, retrying with block size {0}".format(block_size))
+ if verbose:
+ print("block size error, retrying with block size {0}".format(block_size))
continue
except P4Exception as e:
die('Error retrieving changes description ({0})'.format(e.p4ExitCode))
@@ -1107,21 +1326,25 @@ def p4ChangesForPaths(depotPaths, changeRange, requestedBlockSize):
changes = sorted(changes)
return changes
+
def p4PathStartsWith(path, prefix):
- # This method tries to remedy a potential mixed-case issue:
- #
- # If UserA adds //depot/DirA/file1
- # and UserB adds //depot/dira/file2
- #
- # we may or may not have a problem. If you have core.ignorecase=true,
- # we treat DirA and dira as the same directory
+ """This method tries to remedy a potential mixed-case issue:
+
+ If UserA adds //depot/DirA/file1
+ and UserB adds //depot/dira/file2
+
+ we may or may not have a problem. If you have core.ignorecase=true,
+ we treat DirA and dira as the same directory.
+ """
if gitConfigBool("core.ignorecase"):
return path.lower().startswith(prefix.lower())
return path.startswith(prefix)
+
def getClientSpec():
"""Look at the p4 client spec, create a View() object that contains
- all the mappings, and return it."""
+ all the mappings, and return it.
+ """
specList = p4CmdList(["client", "-o"])
if len(specList) != 1:
@@ -1135,7 +1358,7 @@ def getClientSpec():
client_name = entry["Client"]
# just the keys that start with "View"
- view_keys = [ k for k in entry.keys() if k.startswith("View") ]
+ view_keys = [k for k in entry.keys() if k.startswith("View")]
# hold this new View
view = View(client_name)
@@ -1149,6 +1372,7 @@ def getClientSpec():
return view
+
def getClientRoot():
"""Grab the client directory."""
@@ -1162,12 +1386,15 @@ def getClientRoot():
return entry["Root"]
-#
-# P4 wildcards are not allowed in filenames. P4 complains
-# if you simply add them, but you can force it with "-f", in
-# which case it translates them into %xx encoding internally.
-#
+
def wildcard_decode(path):
+ """Decode P4 wildcards into %xx encoding
+
+ P4 wildcards are not allowed in filenames. P4 complains if you simply
+ add them, but you can force it with "-f", in which case it translates
+ them into %xx encoding internally.
+ """
+
# Search for and fix just these four characters. Do % last so
# that fixing it does not inadvertently create new %-escapes.
# Cannot have * in a filename in windows; untested as to
@@ -1179,7 +1406,10 @@ def wildcard_decode(path):
.replace("%25", "%")
return path
+
def wildcard_encode(path):
+ """Encode %xx coded wildcards into P4 coding."""
+
# do % first to avoid double-encoding the %s introduced here
path = path.replace("%", "%25") \
.replace("*", "%2A") \
@@ -1187,10 +1417,12 @@ def wildcard_encode(path):
.replace("@", "%40")
return path
+
def wildcard_present(path):
m = re.search("[*#@%]", path)
return m is not None
+
class LargeFileSystem(object):
"""Base class for large file system support."""
@@ -1199,13 +1431,15 @@ class LargeFileSystem(object):
self.writeToGitStream = writeToGitStream
def generatePointer(self, cloneDestination, contentFile):
- """Return the content of a pointer file that is stored in Git instead of
- the actual content."""
+ """Return the content of a pointer file that is stored in Git instead
+ of the actual content.
+ """
assert False, "Method 'generatePointer' required in " + self.__class__.__name__
def pushFile(self, localLargeFile):
"""Push the actual content which is not stored in the Git repository to
- a server."""
+ a server.
+ """
assert False, "Method 'pushFile' required in " + self.__class__.__name__
def hasLargeFileExtension(self, relPath):
@@ -1253,10 +1487,11 @@ class LargeFileSystem(object):
def processContent(self, git_mode, relPath, contents):
"""Processes the content of git fast import. This method decides if a
file is stored in the large file system and handles all necessary
- steps."""
+ steps.
+ """
if self.exceedsLargeFileThreshold(relPath, contents) or self.hasLargeFileExtension(relPath):
contentTempFile = self.generateTempFile(contents)
- (pointer_git_mode, contents, localLargeFile) = self.generatePointer(contentTempFile)
+ pointer_git_mode, contents, localLargeFile = self.generatePointer(contentTempFile)
if pointer_git_mode:
git_mode = pointer_git_mode
if localLargeFile:
@@ -1272,12 +1507,14 @@ class LargeFileSystem(object):
sys.stderr.write("%s moved to large file system (%s)\n" % (relPath, localLargeFile))
return (git_mode, contents)
+
class MockLFS(LargeFileSystem):
"""Mock large file system for testing."""
def generatePointer(self, contentFile):
"""The pointer content is the original content prefixed with "pointer-".
- The local filename of the large file storage is derived from the file content.
+ The local filename of the large file storage is derived from the
+ file content.
"""
with open(contentFile, 'r') as f:
content = next(f)
@@ -1287,17 +1524,19 @@ class MockLFS(LargeFileSystem):
return (gitMode, pointerContents, localLargeFile)
def pushFile(self, localLargeFile):
- """The remote filename of the large file storage is the same as the local
- one but in a different directory.
+ """The remote filename of the large file storage is the same as the
+ local one but in a different directory.
"""
remotePath = os.path.join(os.path.dirname(localLargeFile), '..', 'remote')
if not os.path.exists(remotePath):
os.makedirs(remotePath)
shutil.copyfile(localLargeFile, os.path.join(remotePath, os.path.basename(localLargeFile)))
+
class GitLFS(LargeFileSystem):
"""Git LFS as backend for the git-p4 large file system.
- See https://git-lfs.github.com/ for details."""
+ See https://git-lfs.github.com/ for details.
+ """
def __init__(self, *args):
LargeFileSystem.__init__(self, *args)
@@ -1383,9 +1622,10 @@ class GitLFS(LargeFileSystem):
else:
return LargeFileSystem.processContent(self, git_mode, relPath, contents)
+
class Command:
- delete_actions = ( "delete", "move/delete", "purge" )
- add_actions = ( "add", "branch", "move/add" )
+ delete_actions = ("delete", "move/delete", "purge")
+ add_actions = ("add", "branch", "move/add")
def __init__(self):
self.usage = "usage: %prog [options]"
@@ -1398,6 +1638,7 @@ class Command:
setattr(self, attr, value)
return getattr(self, attr)
+
class P4UserMap:
def __init__(self):
self.userMapFromPerforceServer = False
@@ -1415,7 +1656,7 @@ class P4UserMap:
die("Could not find your p4 user id")
def p4UserIsMe(self, p4User):
- # return True if the given p4 user is actually me
+ """Return True if the given p4 user is actually me."""
me = self.p4UserId()
if not p4User or p4User != me:
return False
@@ -1435,7 +1676,13 @@ class P4UserMap:
for output in p4CmdList(["users"]):
if "User" not in output:
continue
- self.users[output["User"]] = output["FullName"] + " <" + output["Email"] + ">"
+ # "FullName" is bytes. "Email" on the other hand might be bytes
+ # or unicode string depending on whether we are running under
+ # python2 or python3. To support
+ # git-p4.metadataDecodingStrategy=fallback, self.users dict values
+ # are always bytes, ready to be written to git.
+ emailbytes = metadata_stream_to_writable_bytes(output["Email"])
+ self.users[output["User"]] = output["FullName"] + b" <" + emailbytes + b">"
self.emails[output["Email"]] = output["User"]
mapUserConfigRegex = re.compile(r"^\s*(\S+)\s*=\s*(.+)\s*<(\S+)>\s*$", re.VERBOSE)
@@ -1445,29 +1692,32 @@ class P4UserMap:
user = mapUser[0][0]
fullname = mapUser[0][1]
email = mapUser[0][2]
- self.users[user] = fullname + " <" + email + ">"
+ fulluser = fullname + " <" + email + ">"
+ self.users[user] = metadata_stream_to_writable_bytes(fulluser)
self.emails[email] = user
- s = ''
+ s = b''
for (key, val) in self.users.items():
- s += "%s\t%s\n" % (key.expandtabs(1), val.expandtabs(1))
+ keybytes = metadata_stream_to_writable_bytes(key)
+ s += b"%s\t%s\n" % (keybytes.expandtabs(1), val.expandtabs(1))
- open(self.getUserCacheFilename(), 'w').write(s)
+ open(self.getUserCacheFilename(), 'wb').write(s)
self.userMapFromPerforceServer = True
def loadUserMapFromCache(self):
self.users = {}
self.userMapFromPerforceServer = False
try:
- cache = open(self.getUserCacheFilename(), 'r')
+ cache = open(self.getUserCacheFilename(), 'rb')
lines = cache.readlines()
cache.close()
for line in lines:
- entry = line.strip().split("\t")
- self.users[entry[0]] = entry[1]
+ entry = line.strip().split(b"\t")
+ self.users[entry[0].decode('utf_8')] = entry[1]
except IOError:
self.getUserMapFromPerforceServer()
+
class P4Submit(Command, P4UserMap):
conflict_behavior_choices = ("ask", "skip", "quit")
@@ -1560,20 +1810,20 @@ class P4Submit(Command, P4UserMap):
die("You have files opened with perforce! Close them before starting the sync.")
def separate_jobs_from_description(self, message):
- """Extract and return a possible Jobs field in the commit
- message. It goes into a separate section in the p4 change
- specification.
+ """Extract and return a possible Jobs field in the commit message. It
+ goes into a separate section in the p4 change specification.
- A jobs line starts with "Jobs:" and looks like a new field
- in a form. Values are white-space separated on the same
- line or on following lines that start with a tab.
+ A jobs line starts with "Jobs:" and looks like a new field in a
+ form. Values are white-space separated on the same line or on
+ following lines that start with a tab.
- This does not parse and extract the full git commit message
- like a p4 form. It just sees the Jobs: line as a marker
- to pass everything from then on directly into the p4 form,
- but outside the description section.
+ This does not parse and extract the full git commit message like a
+ p4 form. It just sees the Jobs: line as a marker to pass everything
+ from then on directly into the p4 form, but outside the description
+ section.
- Return a tuple (stripped log message, jobs string)."""
+ Return a tuple (stripped log message, jobs string).
+ """
m = re.search(r'^Jobs:', message, re.MULTILINE)
if m is None:
@@ -1584,9 +1834,10 @@ class P4Submit(Command, P4UserMap):
return (stripped_message, jobtext)
def prepareLogMessage(self, template, message, jobs):
- """Edits the template returned from "p4 change -o" to insert
- the message in the Description field, and the jobs text in
- the Jobs field."""
+ """Edits the template returned from "p4 change -o" to insert the
+ message in the Description field, and the jobs text in the Jobs
+ field.
+ """
result = ""
inDescriptionSection = False
@@ -1616,8 +1867,10 @@ class P4Submit(Command, P4UserMap):
return result
def patchRCSKeywords(self, file, regexp):
- # Attempt to zap the RCS keywords in a p4 controlled file matching the given regex
- (handle, outFileName) = tempfile.mkstemp(dir='.')
+ """Attempt to zap the RCS keywords in a p4 controlled file matching the
+ given regex.
+ """
+ handle, outFileName = tempfile.mkstemp(dir='.')
try:
with os.fdopen(handle, "wb") as outFile, open(file, "rb") as inFile:
for line in inFile.readlines():
@@ -1633,21 +1886,23 @@ class P4Submit(Command, P4UserMap):
print("Patched up RCS keywords in %s" % file)
- def p4UserForCommit(self,id):
- # Return the tuple (perforce user,git email) for a given git commit id
+ def p4UserForCommit(self, id):
+ """Return the tuple (perforce user,git email) for a given git commit
+ id.
+ """
self.getUserMapFromPerforceServer()
gitEmail = read_pipe(["git", "log", "--max-count=1",
"--format=%ae", id])
gitEmail = gitEmail.strip()
if gitEmail not in self.emails:
- return (None,gitEmail)
+ return (None, gitEmail)
else:
- return (self.emails[gitEmail],gitEmail)
+ return (self.emails[gitEmail], gitEmail)
- def checkValidP4Users(self,commits):
- # check if any git authors cannot be mapped to p4 users
+ def checkValidP4Users(self, commits):
+ """Check if any git authors cannot be mapped to p4 users."""
for id in commits:
- (user,email) = self.p4UserForCommit(id)
+ user, email = self.p4UserForCommit(id)
if not user:
msg = "Cannot find p4 user for email %s in commit %s." % (email, id)
if gitConfigBool("git-p4.allowMissingP4Users"):
@@ -1656,10 +1911,12 @@ class P4Submit(Command, P4UserMap):
die("Error: %s\nSet git-p4.allowMissingP4Users to true to allow this." % msg)
def lastP4Changelist(self):
- # Get back the last changelist number submitted in this client spec. This
- # then gets used to patch up the username in the change. If the same
- # client spec is being used by multiple processes then this might go
- # wrong.
+ """Get back the last changelist number submitted in this client spec.
+
+ This then gets used to patch up the username in the change. If the
+ same client spec is being used by multiple processes then this might
+ go wrong.
+ """
results = p4CmdList(["client", "-o"]) # find the current client
client = None
for r in results:
@@ -1675,14 +1932,16 @@ class P4Submit(Command, P4UserMap):
die("Could not get changelist number for last submit - cannot patch up user details")
def modifyChangelistUser(self, changelist, newUser):
- # fixup the user field of a changelist after it has been submitted.
+ """Fixup the user field of a changelist after it has been submitted."""
changes = p4CmdList(["change", "-o", changelist])
if len(changes) != 1:
die("Bad output from p4 change modifying %s to user %s" %
(changelist, newUser))
c = changes[0]
- if c['User'] == newUser: return # nothing to do
+ if c['User'] == newUser:
+ # Nothing to do
+ return
c['User'] = newUser
# p4 does not understand format version 3 and above
input = marshal.dumps(c, 2)
@@ -1698,8 +1957,9 @@ class P4Submit(Command, P4UserMap):
die("Could not modify user field of changelist %s to %s" % (changelist, newUser))
def canChangeChangelists(self):
- # check to see if we have p4 admin or super-user permissions, either of
- # which are required to modify changelists.
+ """Check to see if we have p4 admin or super-user permissions, either
+ of which are required to modify changelists.
+ """
results = p4CmdList(["protects", self.depotPath])
for r in results:
if 'perm' in r:
@@ -1711,13 +1971,15 @@ class P4Submit(Command, P4UserMap):
def prepareSubmitTemplate(self, changelist=None):
"""Run "p4 change -o" to grab a change specification template.
+
This does not use "p4 -G", as it is nice to keep the submission
template in original order, since a human might edit it.
Remove lines in the Files section that show changes to files
- outside the depot path we're committing into."""
+ outside the depot path we're committing into.
+ """
- [upstream, settings] = findUpstreamBranchPoint()
+ upstream, settings = findUpstreamBranchPoint()
template = """\
# A Perforce Change Specification.
@@ -1778,8 +2040,10 @@ class P4Submit(Command, P4UserMap):
return template
def edit_template(self, template_file):
- """Invoke the editor to let the user change the submission
- message. Return true if okay to continue with the submit."""
+ """Invoke the editor to let the user change the submission message.
+
+ Return true if okay to continue with the submit.
+ """
# if configured to skip the editing part, just submit
if gitConfigBool("git-p4.skipSubmitEdit"):
@@ -1838,7 +2102,9 @@ class P4Submit(Command, P4UserMap):
for line in f.readlines():
newdiff += "+" + line
except UnicodeDecodeError:
- pass # Found non-text data and skip, since diff description should only include text
+ # Found non-text data and skip, since diff description
+ # should only include text
+ pass
f.close()
return (diff + newdiff).replace('\r\n', '\n')
@@ -1849,7 +2115,7 @@ class P4Submit(Command, P4UserMap):
print("Applying", read_pipe(["git", "show", "-s",
"--format=format:%h %s", id]))
- (p4User, gitEmail) = self.p4UserForCommit(id)
+ p4User, gitEmail = self.p4UserForCommit(id)