summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Documentation/.gitignore1
-rw-r--r--Documentation/RelNotes/2.19.0.txt248
-rw-r--r--Documentation/config.txt273
-rw-r--r--Documentation/diff-options.txt36
-rwxr-xr-xDocumentation/doc-diff109
-rw-r--r--Documentation/fetch-options.txt19
-rw-r--r--Documentation/git-checkout.txt9
-rw-r--r--Documentation/git-commit-graph.txt14
-rw-r--r--Documentation/git-for-each-ref.txt2
-rw-r--r--Documentation/git-fsck.txt3
-rw-r--r--Documentation/git-gc.txt4
-rw-r--r--Documentation/git-grep.txt7
-rw-r--r--Documentation/git-interpret-trailers.txt9
-rw-r--r--Documentation/git-merge.txt16
-rw-r--r--Documentation/git-p4.txt8
-rw-r--r--Documentation/git-rebase.txt7
-rw-r--r--Documentation/git-tag.txt2
-rw-r--r--Documentation/git-worktree.txt9
-rw-r--r--Documentation/githooks.txt7
-rw-r--r--Documentation/technical/commit-graph.txt22
-rw-r--r--Documentation/technical/hash-function-transition.txt202
-rw-r--r--Documentation/technical/http-protocol.txt4
-rw-r--r--Documentation/technical/pack-protocol.txt3
-rw-r--r--Makefile37
-rw-r--r--advice.c2
-rw-r--r--advice.h1
-rw-r--r--apply.c24
-rw-r--r--apply.h23
-rw-r--r--archive-tar.c12
-rw-r--r--archive-zip.c14
-rw-r--r--archive.c2
-rw-r--r--attr.h24
-rw-r--r--banned.h30
-rw-r--r--bisect.c2
-rw-r--r--blame.c18
-rw-r--r--blame.h28
-rw-r--r--blob.c10
-rw-r--r--blob.h2
-rw-r--r--branch.c2
-rw-r--r--builtin/add.c3
-rw-r--r--builtin/am.c16
-rw-r--r--builtin/blame.c6
-rw-r--r--builtin/branch.c7
-rw-r--r--builtin/checkout.c59
-rw-r--r--builtin/clone.c27
-rw-r--r--builtin/commit-graph.c101
-rw-r--r--builtin/commit-tree.c4
-rw-r--r--builtin/commit.c6
-rw-r--r--builtin/config.c50
-rw-r--r--builtin/describe.c13
-rw-r--r--builtin/diff-tree.c9
-rw-r--r--builtin/diff.c7
-rw-r--r--builtin/difftool.c10
-rw-r--r--builtin/fast-export.c56
-rw-r--r--builtin/fetch.c82
-rw-r--r--builtin/fmt-merge-msg.c28
-rw-r--r--builtin/fsck.c44
-rw-r--r--builtin/gc.c7
-rw-r--r--builtin/grep.c21
-rw-r--r--builtin/index-pack.c7
-rw-r--r--builtin/init-db.c3
-rw-r--r--builtin/log.c16
-rw-r--r--builtin/merge-base.c7
-rw-r--r--builtin/merge-recursive.c4
-rw-r--r--builtin/merge-tree.c3
-rw-r--r--builtin/merge.c45
-rw-r--r--builtin/name-rev.c13
-rw-r--r--builtin/notes.c3
-rw-r--r--builtin/pack-objects.c140
-rw-r--r--builtin/prune.c4
-rw-r--r--builtin/pull.c21
-rw-r--r--builtin/push.c4
-rw-r--r--builtin/read-tree.c2
-rw-r--r--builtin/receive-pack.c6
-rw-r--r--builtin/reflog.c15
-rw-r--r--builtin/remote.c5
-rw-r--r--builtin/replace.c94
-rw-r--r--builtin/reset.c6
-rw-r--r--builtin/rev-list.c2
-rw-r--r--builtin/rev-parse.c6
-rw-r--r--builtin/rm.c2
-rw-r--r--builtin/send-pack.c2
-rw-r--r--builtin/shortlog.c5
-rw-r--r--builtin/show-branch.c7
-rw-r--r--builtin/tag.c2
-rw-r--r--builtin/unpack-objects.c9
-rw-r--r--builtin/update-index.c44
-rw-r--r--builtin/upload-pack.c2
-rw-r--r--builtin/verify-commit.c4
-rw-r--r--builtin/worktree.c4
-rw-r--r--builtin/write-tree.c5
-rw-r--r--bundle.c10
-rw-r--r--cache-tree.c3
-rw-r--r--cache-tree.h2
-rw-r--r--cache.h115
-rw-r--r--checkout.c37
-rw-r--r--checkout.h4
-rwxr-xr-xci/lib-travisci.sh2
-rwxr-xr-xci/print-test-failures.sh55
-rwxr-xr-xci/run-static-analysis.sh21
-rwxr-xr-xci/util/extract-trash-dirs.sh50
-rw-r--r--color.c3
-rw-r--r--commit-graph.c379
-rw-r--r--commit-graph.h22
-rw-r--r--commit-slab-impl.h3
-rw-r--r--commit.c98
-rw-r--r--commit.h19
-rw-r--r--config.c116
-rw-r--r--config.h21
-rw-r--r--config.mak.dev5
-rw-r--r--config.mak.uname2
-rw-r--r--connect.c77
-rw-r--r--contrib/coccinelle/commit.cocci2
-rwxr-xr-xcontrib/mw-to-git/t/t9360-mw-to-git-clone.sh2
-rwxr-xr-xcontrib/subtree/t/t7900-subtree.sh121
-rw-r--r--contrib/vscode/.gitattributes1
-rw-r--r--contrib/vscode/README.md14
-rwxr-xr-xcontrib/vscode/init.sh375
-rw-r--r--convert.c48
-rw-r--r--convert.h56
-rw-r--r--diff-lib.c3
-rw-r--r--diff.c274
-rw-r--r--diff.h129
-rw-r--r--diffcore.h50
-rw-r--r--dir.c8
-rw-r--r--environment.c9
-rw-r--r--exec-cmd.c2
-rw-r--r--fast-import.c6
-rw-r--r--fetch-negotiator.c20
-rw-r--r--fetch-negotiator.h58
-rw-r--r--fetch-object.c2
-rw-r--r--fetch-pack.c388
-rw-r--r--fetch-pack.h7
-rw-r--r--fsck.c18
-rw-r--r--git-compat-util.h6
-rwxr-xr-xgit-p4.py16
-rwxr-xr-xgit-send-email.perl3
-rw-r--r--git.c5
-rw-r--r--gpg-interface.c108
-rw-r--r--grep.c91
-rw-r--r--grep.h1
-rw-r--r--help.c1
-rw-r--r--hex.c6
-rw-r--r--http-backend.c104
-rw-r--r--http-push.c16
-rw-r--r--json-writer.c414
-rw-r--r--json-writer.h105
-rw-r--r--line-log.c6
-rw-r--r--line-range.c2
-rw-r--r--line-range.h12
-rw-r--r--list-objects.c4
-rw-r--r--log-tree.c17
-rw-r--r--match-trees.c43
-rw-r--r--mem-pool.c114
-rw-r--r--mem-pool.h23
-rw-r--r--merge-recursive.c51
-rw-r--r--merge.c31
-rw-r--r--negotiator/default.c176
-rw-r--r--negotiator/default.h8
-rw-r--r--negotiator/skipping.c250
-rw-r--r--negotiator/skipping.h8
-rw-r--r--notes-cache.c3
-rw-r--r--notes-merge.c5
-rw-r--r--notes-utils.c4
-rw-r--r--object-store.h6
-rw-r--r--object.c76
-rw-r--r--object.h15
-rw-r--r--packfile.c2
-rw-r--r--packfile.h2
-rw-r--r--parse-options-cb.c2
-rw-r--r--parse-options.c3
-rw-r--r--pkt-line.c26
-rw-r--r--pretty.c10
-rw-r--r--reachable.c8
-rw-r--r--read-cache.c317
-rw-r--r--ref-filter.c236
-rw-r--r--reflog-walk.c7
-rw-r--r--refs.c60
-rw-r--r--refs/files-backend.c8
-rw-r--r--refspec.c2
-rw-r--r--remote.c32
-rw-r--r--replace-object.c8
-rw-r--r--replace-object.h2
-rw-r--r--repository.h25
-rw-r--r--rerere.h14
-rw-r--r--resolve-undo.c4
-rw-r--r--revision.c29
-rw-r--r--revision.h69
-rw-r--r--sequencer.c251
-rw-r--r--server-info.c4
-rw-r--r--sha1-file.c122
-rw-r--r--sha1-name.c45
m---------sha1collisiondetection0
-rw-r--r--sha1dc/sha1.c12
-rw-r--r--shallow.c30
-rw-r--r--split-index.c58
-rw-r--r--strbuf.c22
-rw-r--r--submodule-config.c2
-rw-r--r--submodule.c4
-rw-r--r--submodule.h112
-rw-r--r--t/.gitignore1
-rw-r--r--t/Makefile25
-rw-r--r--t/annotate-tests.sh4
-rw-r--r--t/chainlint.sed346
-rw-r--r--t/chainlint/arithmetic-expansion.expect9
-rw-r--r--t/chainlint/arithmetic-expansion.test11
-rw-r--r--t/chainlint/bash-array.expect10
-rw-r--r--t/chainlint/bash-array.test12
-rw-r--r--t/chainlint/blank-line.expect4
-rw-r--r--t/chainlint/blank-line.test10
-rw-r--r--t/chainlint/block.expect12
-rw-r--r--t/chainlint/block.test15
-rw-r--r--t/chainlint/broken-chain.expect6
-rw-r--r--t/chainlint/broken-chain.test8
-rw-r--r--t/chainlint/case.expect19
-rw-r--r--t/chainlint/case.test23
-rw-r--r--t/chainlint/close-nested-and-parent-together.expect4
-rw-r--r--t/chainlint/close-nested-and-parent-together.test3
-rw-r--r--t/chainlint/close-subshell.expect25
-rw-r--r--t/chainlint/close-subshell.test27
-rw-r--r--t/chainlint/command-substitution.expect9
-rw-r--r--t/chainlint/command-substitution.test11
-rw-r--r--t/chainlint/comment.expect4
-rw-r--r--t/chainlint/comment.test11
-rw-r--r--t/chainlint/complex-if-in-cuddled-loop.expect10
-rw-r--r--t/chainlint/complex-if-in-cuddled-loop.test11
-rw-r--r--t/chainlint/cuddled-if-then-else.expect7
-rw-r--r--t/chainlint/cuddled-if-then-else.test7
-rw-r--r--t/chainlint/cuddled-loop.expect5
-rw-r--r--t/chainlint/cuddled-loop.test7
-rw-r--r--t/chainlint/cuddled.expect21
-rw-r--r--t/chainlint/cuddled.test23
-rw-r--r--t/chainlint/exit-loop.expect24
-rw-r--r--t/chainlint/exit-loop.test27
-rw-r--r--t/chainlint/exit-subshell.expect5
-rw-r--r--t/chainlint/exit-subshell.test6
-rw-r--r--t/chainlint/for-loop.expect11
-rw-r--r--t/chainlint/for-loop.test19
-rw-r--r--t/chainlint/here-doc.expect3
-rw-r--r--t/chainlint/here-doc.test16
-rw-r--r--t/chainlint/if-in-loop.expect12
-rw-r--r--t/chainlint/if-in-loop.test15
-rw-r--r--t/chainlint/if-then-else.expect19
-rw-r--r--t/chainlint/if-then-else.test28
-rw-r--r--t/chainlint/incomplete-line.expect4
-rw-r--r--t/chainlint/incomplete-line.test12
-rw-r--r--t/chainlint/inline-comment.expect9
-rw-r--r--t/chainlint/inline-comment.test12
-rw-r--r--t/chainlint/loop-in-if.expect12
-rw-r--r--t/chainlint/loop-in-if.test15
-rw-r--r--t/chainlint/multi-line-nested-command-substitution.expect9
-rw-r--r--t/chainlint/multi-line-nested-command-substitution.test9
-rw-r--r--t/chainlint/multi-line-string.expect9
-rw-r--r--t/chainlint/multi-line-string.test15
-rw-r--r--t/chainlint/negated-one-liner.expect5
-rw-r--r--t/chainlint/negated-one-liner.test7
-rw-r--r--t/chainlint/nested-cuddled-subshell.expect19
-rw-r--r--t/chainlint/nested-cuddled-subshell.test31
-rw-r--r--t/chainlint/nested-here-doc.expect5
-rw-r--r--t/chainlint/nested-here-doc.test23
-rw-r--r--t/chainlint/nested-subshell-comment.expect11
-rw-r--r--t/chainlint/nested-subshell-comment.test13
-rw-r--r--t/chainlint/nested-subshell.expect12
-rw-r--r--t/chainlint/nested-subshell.test14
-rw-r--r--t/chainlint/one-liner.expect9
-rw-r--r--t/chainlint/one-liner.test12
-rw-r--r--t/chainlint/p4-filespec.expect4
-rw-r--r--t/chainlint/p4-filespec.test5
-rw-r--r--t/chainlint/pipe.expect8
-rw-r--r--t/chainlint/pipe.test12
-rw-r--r--t/chainlint/semicolon.expect20
-rw-r--r--t/chainlint/semicolon.test25
-rw-r--r--t/chainlint/subshell-here-doc.expect5
-rw-r--r--t/chainlint/subshell-here-doc.test23
-rw-r--r--t/chainlint/subshell-one-liner.expect14
-rw-r--r--t/chainlint/subshell-one-liner.test24
-rw-r--r--t/chainlint/while-loop.expect11
-rw-r--r--t/chainlint/while-loop.test19
-rw-r--r--t/helper/test-drop-caches.c4
-rw-r--r--t/helper/test-json-writer.c565
-rw-r--r--t/helper/test-repository.c82
-rw-r--r--t/helper/test-tool.c2
-rw-r--r--t/helper/test-tool.h2
-rwxr-xr-xt/lib-gpg.sh28
-rw-r--r--t/lib-gpg/gpgsm-gen-key.in8
-rw-r--r--t/lib-gpg/gpgsm_cert.p12bin0 -> 2652 bytes
-rw-r--r--t/lib-httpd.sh21
-rwxr-xr-xt/lib-submodule-update.sh5
-rwxr-xr-xt/t0000-basic.sh2
-rwxr-xr-xt/t0001-init.sh4
-rwxr-xr-xt/t0003-attributes.sh24
-rwxr-xr-xt/t0019-json-writer.sh331
-rw-r--r--t/t0019/parse_json.perl52
-rwxr-xr-xt/t0021-conversion.sh6
-rwxr-xr-xt/t0090-cache-tree.sh2
-rwxr-xr-xt/t1004-read-tree-m-u-wf.sh8
-rwxr-xr-xt/t1005-read-tree-reset.sh10
-rwxr-xr-xt/t1008-read-tree-overlay.sh2
-rwxr-xr-xt/t1015-read-index-unmerged.sh123
-rwxr-xr-xt/t1020-subdirectory.sh2
-rwxr-xr-xt/t1050-large.sh6
-rwxr-xr-xt/t1300-config.sh2
-rwxr-xr-xt/t1305-config-include.sh2
-rwxr-xr-xt/t1308-config-set.sh2
-rwxr-xr-xt/t1400-update-ref.sh20
-rwxr-xr-xt/t1404-update-ref-errors.sh4
-rwxr-xr-xt/t1411-reflog-show.sh6
-rwxr-xr-xt/t1450-fsck.sh2
-rwxr-xr-xt/t1507-rev-parse-upstream.sh6
-rwxr-xr-xt/t1512-rev-parse-disambiguation.sh6
-rwxr-xr-xt/t1700-split-index.sh2
-rwxr-xr-xt/t2016-checkout-patch.sh24
-rwxr-xr-xt/t2024-checkout-dwim.sh58
-rwxr-xr-xt/t2025-worktree-add.sh20
-rwxr-xr-xt/t2103-update-index-ignore-missing.sh2
-rwxr-xr-xt/t2202-add-addremove.sh14
-rwxr-xr-xt/t3000-ls-files-others.sh2
-rwxr-xr-xt/t3005-ls-files-relative.sh8
-rwxr-xr-xt/t3006-ls-files-long.sh2
-rwxr-xr-xt/t3008-ls-files-lazy-init-name-hash.sh8
-rwxr-xr-xt/t3030-merge-recursive.sh340
-rwxr-xr-xt/t3031-merge-criscross.sh2
-rwxr-xr-xt/t3050-subprojects-fetch.sh8
-rwxr-xr-xt/t3102-ls-tree-wildcards.sh2
-rwxr-xr-xt/t3210-pack-refs.sh6
-rwxr-xr-xt/t3301-notes.sh8
-rwxr-xr-xt/t3310-notes-merge-manual-resolve.sh6
-rwxr-xr-xt/t3400-rebase.sh8
-rwxr-xr-xt/t3402-rebase-merge.sh4
-rwxr-xr-xt/t3404-rebase-interactive.sh25
-rwxr-xr-xt/t3418-rebase-continue.sh4
-rwxr-xr-xt/t3430-rebase-merges.sh34
-rwxr-xr-xt/t3507-cherry-pick-conflict.sh13
-rwxr-xr-xt/t3700-add.sh8
-rwxr-xr-xt/t3701-add-interactive.sh16
-rwxr-xr-xt/t3904-stash-patch.sh8
-rwxr-xr-xt/t4001-diff-rename.sh2
-rwxr-xr-xt/t4010-diff-pathspec.sh4
-rwxr-xr-xt/t4012-diff-binary.sh6
-rwxr-xr-xt/t4015-diff-whitespace.sh243
-rwxr-xr-xt/t4024-diff-optimize-common.sh16
-rwxr-xr-xt/t4025-hunk-header.sh8
-rwxr-xr-xt/t4041-diff-submodule-option.sh4
-rwxr-xr-xt/t4060-diff-submodule-option-diff-format.sh2
-rwxr-xr-xt/t4121-apply-diffs.sh2
-rwxr-xr-xt/t4150-am.sh39
-rwxr-xr-xt/t4202-log.sh37
-rwxr-xr-xt/t4211-line-log.sh5
-rwxr-xr-xt/t5300-pack-object.sh2
-rwxr-xr-xt/t5302-pack-index.sh2
-rwxr-xr-xt/t5318-commit-graph.sh240
-rwxr-xr-xt/t5400-send-pack.sh4
-rwxr-xr-xt/t5401-update-hooks.sh4
-rwxr-xr-xt/t5405-send-pack-rewind.sh3
-rwxr-xr-xt/t5406-remote-rejects.sh5
-rwxr-xr-xt/t5500-fetch-pack.sh37
-rwxr-xr-xt/t5504-fetch-receive-strict.sh126
-rwxr-xr-xt/t5505-remote.sh12
-rwxr-xr-xt/t5510-fetch.sh115
-rwxr-xr-xt/t5512-ls-remote.sh4
-rwxr-xr-xt/t5516-fetch-push.sh10
-rwxr-xr-xt/t5517-push-mirror.sh10
-rwxr-xr-xt/t5520-pull.sh24
-rwxr-xr-xt/t5526-fetch-submodules.sh2
-rwxr-xr-xt/t5531-deep-submodule-push.sh2
-rwxr-xr-xt/t5534-push-signed.sh63
-rwxr-xr-xt/t5541-http-push-smart.sh28
-rwxr-xr-xt/t5543-atomic-push.sh2
-rwxr-xr-xt/t5551-http-fetch-smart.sh26
-rwxr-xr-xt/t5552-skipping-fetch-negotiator.sh202
-rwxr-xr-xt/t5561-http-backend.sh8
-rwxr-xr-xt/t5562-http-backend-content-length.sh156
-rwxr-xr-xt/t5562/invoke-with-content-length.pl37
-rwxr-xr-xt/t5570-git-daemon.sh6
-rwxr-xr-xt/t5601-clone.sh2
-rwxr-xr-xt/t5605-clone-local.sh2
-rwxr-xr-xt/t5608-clone-2gb.sh2
-rwxr-xr-xt/t5616-partial-clone.sh44
-rwxr-xr-xt/t5702-protocol-v2.sh78
-rwxr-xr-xt/t5801-remote-helpers.sh10
-rwxr-xr-xt/t6010-merge-base.sh2
-rwxr-xr-xt/t6020-merge-df.sh3
-rwxr-xr-xt/t6029-merge-subtree.sh44
-rwxr-xr-xt/t6036-recursive-corner-cases.sh713
-rwxr-xr-xt/t6042-merge-rename-corner-cases.sh254
-rwxr-xr-xt/t6043-merge-rename-directories.sh2
-rwxr-xr-xt/t6044-merge-unrelated-index-changes.sh67
-rwxr-xr-xt/t6050-replace.sh6
-rwxr-xr-xt/t7001-mv.sh2
-rwxr-xr-xt/t7004-tag.sh13
-rwxr-xr-xt/t7030-verify-tag.sh34
-rwxr-xr-xt/t7063-status-untracked-cache.sh2
-rwxr-xr-xt/t7105-reset-patch.sh12
-rwxr-xr-xt/t7201-co.sh41
-rwxr-xr-xt/t7301-clean-interactive.sh41
-rwxr-xr-xt/t7400-submodule-basic.sh15
-rwxr-xr-xt/t7405-submodule-merge.sh173
-rwxr-xr-xt/t7406-submodule-update.sh17
-rwxr-xr-xt/t7408-submodule-reference.sh2
-rwxr-xr-xt/t7415-submodule-names.sh15
-rwxr-xr-xt/t7501-commit.sh56
-rwxr-xr-xt/t7504-commit-msg-hook.sh4
-rwxr-xr-xt/t7506-status-submodule.sh10
-rwxr-xr-xt/t7510-signed-commit.sh7
-rwxr-xr-xt/t7610-mergetool.sh8
-rwxr-xr-xt/t7611-merge-abort.sh118
-rwxr-xr-xt/t7810-grep.sh22
-rwxr-xr-xt/t8003-blame-corner-cases.sh12
-rwxr-xr-xt/t9001-send-email.sh10
-rwxr-xr-xt/t9100-git-svn-basic.sh2
-rwxr-xr-xt/t9101-git-svn-props.sh2
-rwxr-xr-xt/t9119-git-svn-info.sh120
-rwxr-xr-xt/t9122-git-svn-author.sh6
-rwxr-xr-xt/t9129-git-svn-i18n-commitencoding.sh2
-rwxr-xr-xt/t9130-git-svn-authors-file.sh4
-rwxr-xr-xt/t9134-git-svn-ignore-paths.sh6
-rwxr-xr-xt/t9137-git-svn-dcommit-clobber-series.sh2
-rwxr-xr-xt/t9138-git-svn-authors-prog.sh6
-rwxr-xr-xt/t9146-git-svn-empty-dirs.sh20
-rwxr-xr-xt/t9147-git-svn-include-paths.sh6
-rwxr-xr-xt/t9152-svn-empty-dirs-after-gc.sh2
-rwxr-xr-xt/t9164-git-svn-dcommit-concurrent.sh2
-rwxr-xr-xt/t9165-git-svn-fetch-merge-branch-of-branch.sh2
-rwxr-xr-xt/t9200-git-cvsexportcommit.sh6
-rwxr-xr-xt/t9300-fast-import.sh5
-rwxr-xr-xt/t9302-fast-import-unpack-limit.sh2
-rwxr-xr-xt/t9400-git-cvsserver-server.sh8
-rwxr-xr-xt/t9600-cvsimport.sh2
-rwxr-xr-xt/t9800-git-p4-basic.sh29
-rwxr-xr-xt/t9806-git-p4-options.sh2
-rwxr-xr-xt/t9810-git-p4-rcs.sh2
-rwxr-xr-xt/t9811-git-p4-label-import.sh2
-rwxr-xr-xt/t9814-git-p4-rename.sh18
-rwxr-xr-xt/t9815-git-p4-submit-fail.sh2
-rwxr-xr-xt/t9830-git-p4-symlink-dir.sh2
-rwxr-xr-xt/t9831-git-p4-triggers.sh2
-rwxr-xr-xt/t9902-completion.sh4
-rwxr-xr-xt/t9903-bash-prompt.sh2
-rw-r--r--t/test-lib.sh3
-rw-r--r--tag.c39
-rw-r--r--tag.h7
-rw-r--r--transport-helper.c98
-rw-r--r--transport-internal.h9
-rw-r--r--transport.c53
-rw-r--r--transport.h13
-rw-r--r--tree-walk.c3
-rw-r--r--tree.c23
-rw-r--r--tree.h2
-rw-r--r--unpack-trees.c40
-rw-r--r--upload-pack.c17
-rw-r--r--utf8.c10
-rw-r--r--utf8.h10
-rw-r--r--walker.c12
-rw-r--r--wt-status.c12
-rw-r--r--xdiff/xdiff.h8
-rw-r--r--xdiff/xdiffi.c29
-rw-r--r--xdiff/xhistogram.c133
458 files changed, 12217 insertions, 3397 deletions
diff --git a/.gitignore b/.gitignore
index 3284a1e..3524803 100644
--- a/.gitignore
+++ b/.gitignore
@@ -207,6 +207,7 @@
/config.mak.autogen
/config.mak.append
/configure
+/.vscode/
/tags
/TAGS
/cscope*
diff --git a/Documentation/.gitignore b/Documentation/.gitignore
index c7096f1..3ef54e0 100644
--- a/Documentation/.gitignore
+++ b/Documentation/.gitignore
@@ -12,3 +12,4 @@ cmds-*.txt
mergetools-*.txt
manpage-base-url.xsl
SubmittingPatches.txt
+tmp-doc-diff/
diff --git a/Documentation/RelNotes/2.19.0.txt b/Documentation/RelNotes/2.19.0.txt
index 7a5c5b8..8f8da9f 100644
--- a/Documentation/RelNotes/2.19.0.txt
+++ b/Documentation/RelNotes/2.19.0.txt
@@ -32,6 +32,47 @@ UI, Workflows & Features
automatically switch to quoted-printable when there is such a line
in the payload has been introduced and is made the default.
+ * "git checkout" and "git worktree add" learned to honor
+ checkout.defaultRemote when auto-vivifying a local branch out of a
+ remote tracking branch in a repository with multiple remotes that
+ have tracking branches that share the same names.
+ (merge 8d7b558bae ab/checkout-default-remote later to maint).
+
+ * "git grep" learned the "--only-matching" option.
+
+ * "git rebase --rebase-merges" mode now handles octopus merges as
+ well.
+
+ * Add a server-side knob to skip commits in exponential/fibbonacci
+ stride in an attempt to cover wider swath of history with a smaller
+ number of iterations, potentially accepting a larger packfile
+ transfer, instead of going back one commit a time during common
+ ancestor discovery during the "git fetch" transaction.
+ (merge 42cc7485a2 jt/fetch-negotiator-skipping later to maint).
+
+ * A new configuration variable core.usereplacerefs has been added,
+ primarily to help server installations that want to ignore the
+ replace mechanism altogether.
+
+ * Teach "git tag -s" etc. a few configuration variables (gpg.format
+ that can be set to "openpgp" or "x509", and gpg.<format>.program
+ that is used to specify what program to use to deal with the format)
+ to allow x.509 certs with CMS via "gpgsm" to be used instead of
+ openpgp via "gnupg".
+
+ * Many more strings are prepared for l10n.
+
+ * "git p4 submit" learns to ask its own pre-submit hook if it should
+ continue with submitting.
+
+ * The test performed at the receiving end of "git push" to prevent
+ bad objects from entering repository can be customized via
+ receive.fsck.* configuration variables; we now have gained a
+ counterpart to do the same on the "git fetch" side, with
+ fetch.fsck.* configuration variables.
+
+ * "git pull --rebase=interactive" learned "i" as a short-hand for
+ "interactive".
Performance, Internal Implementation, Development Support etc.
@@ -94,6 +135,91 @@ Performance, Internal Implementation, Development Support etc.
* test-lint now looks for broken use of "VAR=VAL shell_func" in test
scripts.
+ * Conversion from uchar[40] to struct object_id continues.
+
+ * Recent "security fix" to pay attention to contents of ".gitmodules"
+ while accepting "git push" was a bit overly strict than necessary,
+ which has been adjusted.
+
+ * "git fsck" learns to make sure the optional commit-graph file is in
+ a sane state.
+
+ * "git diff --color-moved" feature has further been tweaked.
+
+ * Code restructuring and a small fix to transport protocol v2 during
+ fetching.
+
+ * Parsing of -L[<N>][,[<M>]] parameters "git blame" and "git log"
+ take has been tweaked.
+
+ * lookup_commit_reference() and friends have been updated to find
+ in-core object for a specific in-core repository instance.
+
+ * Various glitches in the heuristics of merge-recursive strategy have
+ been documented in new tests.
+
+ * "git fetch" learned a new option "--negotiation-tip" to limit the
+ set of commits it tells the other end as "have", to reduce wasted
+ bandwidth and cycles, which would be helpful when the receiving
+ repository has a lot of refs that have little to do with the
+ history at the remote it is fetching from.
+
+ * For a large tree, the index needs to hold many cache entries
+ allocated on heap. These cache entries are now allocated out of a
+ dedicated memory pool to amortize malloc(3) overhead.
+
+ * Tests to cover various conflicting cases have been added for
+ merge-recursive.
+
+ * Tests to cover conflict cases that involve submodules have been
+ added for merge-recursive.
+
+ * Look for broken "&&" chains that are hidden in subshell, many of
+ which have been found and corrected.
+
+ * The singleton commit-graph in-core instance is made per in-core
+ repository instance.
+
+ * "make DEVELOPER=1 DEVOPTS=pedantic" allows developers to compile
+ with -pedantic option, which may catch more problematic program
+ constructs and potential bugs.
+
+ * Preparatory code to later add json output for telemetry data has
+ been added.
+
+ * Update the way we use Coccinelle to find out-of-style code that
+ need to be modernised.
+
+ * It is too easy to misuse system API functions such as strcat();
+ these selected functions are now forbidden in this codebase and
+ will cause a compilation failure.
+
+ * Add a script (in contrib/) to help users of VSCode work better with
+ our codebase.
+
+ * The Travis CI scripts were taught to ship back the test data from
+ failed tests.
+ (merge aea8879a6a sg/travis-retrieve-trash-upon-failure later to maint).
+
+ * The parse-options machinery learned to refrain from enclosing
+ placeholder string inside a "<bra" and "ket>" pair automatically
+ without PARSE_OPT_LITERAL_ARGHELP. Existing help text for option
+ arguments that are not formatted correctly have been identified and
+ fixed.
+ (merge 5f0df44cd7 rs/parse-opt-lithelp later to maint).
+
+ * Noiseword "extern" has been removed from function decls in the
+ header files.
+
+ * A few atoms like %(objecttype) and %(objectsize) in the format
+ specifier of "for-each-ref --format=<format>" can be filled without
+ getting the full contents of the object, but just with the object
+ header. These cases have been optimized by calling
+ oid_object_info() API (instead of reading and inspecting the data).
+
+ * The end result of documentation update has been made to be
+ inspected more easily to help developers.
+
Fixes since v2.18
-----------------
@@ -223,6 +349,116 @@ Fixes since v2.18
* core.commentchar is now honored when preparing the list of commits
to replay in "rebase -i".
+ * "git pull --rebase" on a corrupt HEAD caused a segfault. In
+ general we substitute an empty tree object when running the in-core
+ equivalent of the diff-index command, and the codepath has been
+ corrected to do so as well to fix this issue.
+ (merge 3506dc9445 jk/has-uncommitted-changes-fix later to maint).
+
+ * httpd tests saw occasional breakage due to the way its access log
+ gets inspected by the tests, which has been updated to make them
+ less flaky.
+ (merge e8b3b2e275 sg/httpd-test-unflake later to maint).
+
+ * Tests to cover more D/F conflict cases have been added for
+ merge-recursive.
+
+ * "git gc --auto" opens file descriptors for the packfiles before
+ spawning "git repack/prune", which would upset Windows that does
+ not want a process to work on a file that is open by another
+ process. The issue has been worked around.
+ (merge 12e73a3ce4 kg/gc-auto-windows-workaround later to maint).
+
+ * The recursive merge strategy did not properly ensure there was no
+ change between HEAD and the index before performing its operation,
+ which has been corrected.
+ (merge 55f39cf755 en/dirty-merge-fixes later to maint).
+
+ * "git rebase" started exporting GIT_DIR environment variable and
+ exposing it to hook scripts when part of it got rewritten in C.
+ Instead of matching the old scripted Porcelains' behaviour,
+ compensate by also exporting GIT_WORK_TREE environment as well to
+ lessen the damage. This can harm existing hooks that want to
+ operate on different repository, but the current behaviour is
+ already broken for them anyway.
+ (merge ab5e67d751 bc/sequencer-export-work-tree-as-well later to maint).
+
+ * "git send-email" when using in a batched mode that limits the
+ number of messages sent in a single SMTP session lost the contents
+ of the variable used to choose between tls/ssl, unable to send the
+ second and later batches, which has been fixed.
+ (merge 636f3d7ac5 jm/send-email-tls-auth-on-batch later to maint).
+
+ * The lazy clone support had a few places where missing but promised
+ objects were not correctly tolerated, which have been fixed.
+
+ * One of the "diff --color-moved" mode "dimmed_zebra" that was named
+ in an unusual way has been deprecated and replaced by
+ "dimmed-zebra".
+ (merge e3f2f5f9cd es/diff-color-moved-fix later to maint).
+
+ * The wire-protocol v2 relies on the client to send "ref prefixes" to
+ limit the bandwidth spent on the initial ref advertisement. "git
+ clone" when learned to speak v2 forgot to do so, which has been
+ corrected.
+ (merge 402c47d939 bw/clone-ref-prefixes later to maint).
+
+ * "git diff --histogram" had a bad memory usage pattern, which has
+ been rearranged to reduce the peak usage.
+ (merge 79cb2ebb92 sb/histogram-less-memory later to maint).
+
+ * Code clean-up to use size_t/ssize_t when they are the right type.
+ (merge 7726d360b5 jk/size-t later to maint).
+
+ * The wire-protocol v2 relies on the client to send "ref prefixes" to
+ limit the bandwidth spent on the initial ref advertisement. "git
+ fetch $remote branch:branch" that asks tags that point into the
+ history leading to the "branch" automatically followed sent to
+ narrow prefix and broke the tag following, which has been fixed.
+ (merge 2b554353a5 jt/tag-following-with-proto-v2-fix later to maint).
+
+ * When the sparse checkout feature is in use, "git cherry-pick" and
+ other mergy operations lost the skip_worktree bit when a path that
+ is excluded from checkout requires content level merge, which is
+ resolved as the same as the HEAD version, without materializing the
+ merge result in the working tree, which made the path appear as
+ deleted. This has been corrected by preserving the skip_worktree
+ bit (and not materializing the file in the working tree).
+ (merge 2b75fb601c en/merge-recursive-skip-fix later to maint).
+
+ * The "author-script" file "git rebase -i" creates got broken when
+ we started to move the command away from shell script, which is
+ getting fixed now.
+ (merge 5522bbac20 es/rebase-i-author-script-fix later to maint).
+
+ * The automatic tree-matching in "git merge -s subtree" was broken 5
+ years ago and nobody has noticed since then, which is now fixed.
+ (merge 2ec4150713 jk/merge-subtree-heuristics later to maint).
+
+ * "git fetch $there refs/heads/s" ought to fetch the tip of the
+ branch 's', but when "refs/heads/refs/heads/s", i.e. a branch whose
+ name is "refs/heads/s" exists at the same time, fetched that one
+ instead by mistake. This has been corrected to honor the usual
+ disambiguation rules for abbreviated refnames.
+ (merge 60650a48c0 jt/refspec-dwim-precedence-fix later to maint).
+
+ * Futureproofing a helper function that can easily be misused.
+ (merge 65bb21e77e es/want-color-fd-defensive later to maint).
+
+ * The http-backend (used for smart-http transport) used to slurp the
+ whole input until EOF, without paying attention to CONTENT_LENGTH
+ that is supplied in the environment and instead expecting the Web
+ server to close the input stream. This has been fixed.
+ (merge eebfe40962 mk/http-backend-content-length later to maint).
+
+ * "git merge --abort" etc. did not clean things up properly when
+ there were conflicted entries in the index in certain order that
+ are involved in D/F conflicts. This has been corrected.
+ (merge ad3762042a en/abort-df-conflict-fixes later to maint).
+
+ * "git diff --indent-heuristic" had a bad corner case performance.
+ (merge 301ef85401 sb/indent-heuristic-optim later to maint).
+
* Code cleanup, docfix, build fix, etc.
(merge aee9be2ebe sg/update-ref-stdin-cleanup later to maint).
(merge 037714252f jc/clean-after-sanity-tests later to maint).
@@ -237,3 +473,15 @@ Fixes since v2.18
(merge 5cf8e06474 js/enhanced-version-info later to maint).
(merge 6aaded5509 tb/config-default later to maint).
(merge 022d2ac1f3 sb/blame-color later to maint).
+ (merge 5a06a20e0c bp/test-drop-caches-for-windows later to maint).
+ (merge dd61cc1c2e jk/ui-color-always-to-auto later to maint).
+ (merge 1e83b9bfdd sb/trailers-docfix later to maint).
+ (merge ab29f1b329 sg/fast-import-dump-refs-on-checkpoint-fix later to maint).
+ (merge 6a8ad880f0 jn/subtree-test-fixes later to maint).
+ (merge ffbd51cc60 nd/pack-objects-threading-doc later to maint).
+ (merge e9dac7be60 es/mw-to-git-chain-fix later to maint).
+ (merge fe583c6c7a rs/remote-mv-leakfix later to maint).
+ (merge 69885ab015 en/t3031-title-fix later to maint).
+ (merge 8578037bed nd/config-blame-sort later to maint).
+ (merge 8ad169c4ba hn/config-in-code-comment later to maint).
+ (merge b7446fcfdf ar/t4150-am-scissors-test-fix later to maint).
diff --git a/Documentation/config.txt b/Documentation/config.txt
index 43b2de7b..95ad715 100644
--- a/Documentation/config.txt
+++ b/Documentation/config.txt
@@ -344,6 +344,16 @@ advice.*::
Advice shown when you used linkgit:git-checkout[1] to
move to the detach HEAD state, to instruct how to create
a local branch after the fact.
+ checkoutAmbiguousRemoteBranchName::
+ Advice shown when the argument to
+ linkgit:git-checkout[1] ambiguously resolves to a
+ remote tracking branch on more than one remote in
+ situations where an unambiguous argument would have
+ otherwise caused a remote-tracking branch to be
+ checked out. See the `checkout.defaultRemote`
+ configuration variable for how to set a given remote
+ to used by default in some situations where this
+ advice would be printed.
amWorkDir::
Advice that shows the location of the patch file when
linkgit:git-am[1] fails to apply it.
@@ -907,9 +917,17 @@ core.notesRef::
This setting defaults to "refs/notes/commits", and it can be overridden by
the `GIT_NOTES_REF` environment variable. See linkgit:git-notes[1].
-core.commitGraph::
- Enable git commit graph feature. Allows reading from the
- commit-graph file.
+gc.commitGraph::
+ If true, then gc will rewrite the commit-graph file when
+ linkgit:git-gc[1] is run. When using linkgit:git-gc[1]
+ '--auto' the commit-graph will be updated if housekeeping is
+ required. Default is false. See linkgit:git-commit-graph[1]
+ for details.
+
+core.useReplaceRefs::
+ If set to `false`, behave as if the `--no-replace-objects`
+ option was given on the command line. See linkgit:git[1] and
+ linkgit:git-replace[1] for more information.
core.sparseCheckout::
Enable "sparse checkout" feature. See section "Sparse checkout" in
@@ -977,23 +995,28 @@ apply.whitespace::
Tells 'git apply' how to handle whitespaces, in the same way
as the `--whitespace` option. See linkgit:git-apply[1].
-blame.showRoot::
- Do not treat root commits as boundaries in linkgit:git-blame[1].
- This option defaults to false.
-
blame.blankBoundary::
Show blank commit object name for boundary commits in
linkgit:git-blame[1]. This option defaults to false.
-blame.showEmail::
- Show the author email instead of author name in linkgit:git-blame[1].
- This option defaults to false.
+blame.coloring::
+ This determines the coloring scheme to be applied to blame
+ output. It can be 'repeatedLines', 'highlightRecent',
+ or 'none' which is the default.
blame.date::
Specifies the format used to output dates in linkgit:git-blame[1].
If unset the iso format is used. For supported values,
see the discussion of the `--date` option at linkgit:git-log[1].
+blame.showEmail::
+ Show the author email instead of author name in linkgit:git-blame[1].
+ This option defaults to false.
+
+blame.showRoot::
+ Do not treat root commits as boundaries in linkgit:git-blame[1].
+ This option defaults to false.
+
branch.autoSetupMerge::
Tells 'git branch' and 'git checkout' to set up new branches
so that linkgit:git-pull[1] will appropriately merge from the
@@ -1101,6 +1124,22 @@ browser.<tool>.path::
browse HTML help (see `-w` option in linkgit:git-help[1]) or a
working repository in gitweb (see linkgit:git-instaweb[1]).
+checkout.defaultRemote::
+ When you run 'git checkout <something>' and only have one
+ remote, it may implicitly fall back on checking out and
+ tracking e.g. 'origin/<something>'. This stops working as soon
+ as you have more than one remote with a '<something>'
+ reference. This setting allows for setting the name of a
+ preferred remote that should always win when it comes to
+ disambiguation. The typical use-case is to set this to
+ `origin`.
++
+Currently this is used by linkgit:git-checkout[1] when 'git checkout
+<something>' will checkout the '<something>' branch on another remote,
+and by linkgit:git-worktree[1] when 'git worktree add' refers to a
+remote branch. This setting might be used for other checkout-like
+commands or functionality in the future.
+
clean.requireForce::
A boolean to make git-clean do nothing unless given -f,
-i or -n. Defaults to true.
@@ -1115,6 +1154,28 @@ color.advice::
color.advice.hint::
Use customized color for hints.
+color.blame.highlightRecent::
+ This can be used to color the metadata of a blame line depending
+ on age of the line.
++
+This setting should be set to a comma-separated list of color and date settings,
+starting and ending with a color, the dates should be set from oldest to newest.
+The metadata will be colored given the colors if the the line was introduced
+before the given timestamp, overwriting older timestamped colors.
++
+Instead of an absolute timestamp relative timestamps work as well, e.g.
+2.weeks.ago is valid to address anything older than 2 weeks.
++
+It defaults to 'blue,12 month ago,white,1 month ago,red', which colors
+everything older than one year blue, recent changes between one month and
+one year old are kept white, and lines introduced within the last month are
+colored red.
+
+color.blame.repeatedLines::
+ Use the customized color for the part of git-blame output that
+ is repeated meta information per line (such as commit id,
+ author name, date and timezone). Defaults to cyan.
+
color.branch::
A boolean to enable/disable color in the output of
linkgit:git-branch[1]. May be set to `always`,
@@ -1149,6 +1210,11 @@ diff.colorMoved::
true the default color mode will be used. When set to false,
moved lines are not colored.
+diff.colorMovedWS::
+ When moved lines are colored using e.g. the `diff.colorMoved` setting,
+ this option controls the `<mode>` how spaces are treated
+ for details of valid modes see '--color-moved-ws' in linkgit:git-diff[1].
+
color.diff.<slot>::
Use customized color for diff colorization. `<slot>` specifies
which part of the patch to use the specified color, and is one
@@ -1257,33 +1323,6 @@ color.status.<slot>::
status short-format), or
`unmerged` (files which have unmerged changes).
-color.blame.repeatedLines::
- Use the customized color for the part of git-blame output that
- is repeated meta information per line (such as commit id,
- author name, date and timezone). Defaults to cyan.
-
-color.blame.highlightRecent::
- This can be used to color the metadata of a blame line depending
- on age of the line.
-+
-This setting should be set to a comma-separated list of color and date settings,
-starting and ending with a color, the dates should be set from oldest to newest.
-The metadata will be colored given the colors if the the line was introduced
-before the given timestamp, overwriting older timestamped colors.
-+
-Instead of an absolute timestamp relative timestamps work as well, e.g.
-2.weeks.ago is valid to address anything older than 2 weeks.
-+
-It defaults to 'blue,12 month ago,white,1 month ago,red', which colors
-everything older than one year blue, recent changes between one month and
-one year old are kept white, and lines introduced within the last month are
-colored red.
-
-blame.coloring::
- This determines the coloring scheme to be applied to blame
- output. It can be 'repeatedLines', 'highlightRecent',
- or 'none' which is the default.
-
color.transport::
A boolean to enable/disable color when pushes are rejected. May be
set to `always`, `false` (or `never`) or `auto` (or `true`), in which
@@ -1463,10 +1502,19 @@ fetch.recurseSubmodules::
fetch.fsckObjects::
If it is set to true, git-fetch-pack will check all fetched
- objects. It will abort in the case of a malformed object or a
- broken link. The result of an abort are only dangling objects.
- Defaults to false. If not set, the value of `transfer.fsckObjects`
- is used instead.
+ objects. See `transfer.fsckObjects` for what's
+ checked. Defaults to false. If not set, the value of
+ `transfer.fsckObjects` is used instead.
+
+fetch.fsck.<msg-id>::
+ Acts like `fsck.<msg-id>`, but is used by
+ linkgit:git-fetch-pack[1] instead of linkgit:git-fsck[1]. See
+ the `fsck.<msg-id>` documentation for details.
+
+fetch.fsck.skipList::
+ Acts like `fsck.skipList`, but is used by
+ linkgit:git-fetch-pack[1] instead of linkgit:git-fsck[1]. See
+ the `fsck.skipList` documentation for details.
fetch.unpackLimit::
If the number of objects fetched over the Git native
@@ -1497,6 +1545,18 @@ fetch.output::
`full` and `compact`. Default value is `full`. See section
OUTPUT in linkgit:git-fetch[1] for detail.
+fetch.negotiationAlgorithm::
+ Control how information about the commits in the local repository is
+ sent when negotiating the contents of the packfile to be sent by the
+ server. Set to "skipping" to use an algorithm that skips commits in an
+ effort to converge faster, but may result in a larger-than-necessary
+ packfile; The default is "default" which instructs Git to use the default algorithm
+ that never skips commits (unless the server has acknowledged it or one
+ of its descendants).
+ Unknown values will cause 'git fetch' to error out.
++
+See also the `--negotiation-tip` option for linkgit:git-fetch[1].
+
format.attach::
Enable multipart/mixed attachments as the default for
'format-patch'. The value can also be a double quoted string
@@ -1596,15 +1656,42 @@ filter.<driver>.smudge::
linkgit:gitattributes[5] for details.
fsck.<msg-id>::
- Allows overriding the message type (error, warn or ignore) of a
- specific message ID such as `missingEmail`.
-+
-For convenience, fsck prefixes the error/warning with the message ID,
-e.g. "missingEmail: invalid author/committer line - missing email" means
-that setting `fsck.missingEmail = ignore` will hide that issue.
-+
-This feature is intended to support working with legacy repositories
-which cannot be repaired without disruptive changes.
+ During fsck git may find issues with legacy data which
+ wouldn't be generated by current versions of git, and which
+ wouldn't be sent over the wire if `transfer.fsckObjects` was
+ set. This feature is intended to support working with legacy
+ repositories containing such data.
++
+Setting `fsck.<msg-id>` will be picked up by linkgit:git-fsck[1], but
+to accept pushes of such data set `receive.fsck.<msg-id>` instead, or
+to clone or fetch it set `fetch.fsck.<msg-id>`.
++
+The rest of the documentation discusses `fsck.*` for brevity, but the
+same applies for the corresponding `receive.fsck.*` and
+`fetch.<msg-id>.*`. variables.
++
+Unlike variables like `color.ui` and `core.editor` the
+`receive.fsck.<msg-id>` and `fetch.fsck.<msg-id>` variables will not
+fall back on the `fsck.<msg-id>` configuration if they aren't set. To
+uniformly configure the same fsck settings in different circumstances
+all three of them they must all set to the same values.
++
+When `fsck.<msg-id>` is set, errors can be switched to warnings and
+vice versa by configuring the `fsck.<msg-id>` setting where the
+`<msg-id>` is the fsck message ID and the value is one of `error`,
+`warn` or `ignore`. For convenience, fsck prefixes the error/warning
+with the message ID, e.g. "missingEmail: invalid author/committer line
+- missing email" means that setting `fsck.missingEmail = ignore` will
+hide that issue.
++
+In general, it is better to enumerate existing objects with problems
+with `fsck.skipList`, instead of listing the kind of breakages these
+problematic objects share to be ignored, as doing the latter will
+allow new instances of the same breakages go unnoticed.
++
+Setting an unknown `fsck.<msg-id>` value will cause fsck to die, but
+doing the same for `receive.fsck.<msg-id>` and `fetch.fsck.<msg-id>`
+will only cause git to warn.
fsck.skipList::
The path to a sorted list of object names (i.e. one SHA-1 per
@@ -1613,6 +1700,15 @@ fsck.skipList::
should be accepted despite early commits containing errors that
can be safely ignored such as invalid committer email addresses.
Note: corrupt objects cannot be skipped with this setting.
++
+Like `fsck.<msg-id>` this variable has corresponding
+`receive.fsck.skipList` and `fetch.fsck.skipList` variants.
++
+Unlike variables like `color.ui` and `core.editor` the
+`receive.fsck.skipList` and `fetch.fsck.skipList` variables will not
+fall back on the `fsck.skipList` configuration if they aren't set. To
+uniformly configure the same fsck settings in different circumstances
+all three of them they must all set to the same values.
gc.aggressiveDepth::
The depth parameter used in the delta compression
@@ -1836,6 +1932,16 @@ gpg.program::
signed, and the program is expected to send the result to its
standard output.
+gpg.format::
+ Specifies which key format to use when signing with `--gpg-sign`.
+ Default is "openpgp" and another possible value is "x509".
+
+gpg.<format>.program::
+ Use this to customize the program used for the signing format you
+ chose. (see `gpg.program` and `gpg.format`) `gpg.program` can still
+ be used as a legacy synonym for `gpg.openpgp.program`. The default
+ value for `gpg.x509.program` is "gpgsm".
+
gui.commitMsgWidth::
Defines how wide the commit message window is in the
linkgit:git-gui[1]. "75" is the default.
@@ -2889,32 +2995,21 @@ receive.certNonceSlop::
receive.fsckObjects::
If it is set to true, git-receive-pack will check all received
- objects. It will abort in the case of a malformed object or a
- broken link. The result of an abort are only dangling objects.
- Defaults to false. If not set, the value of `transfer.fsckObjects`
- is used instead.
+ objects. See `transfer.fsckObjects` for what's checked.
+ Defaults to false. If not set, the value of
+ `transfer.fsckObjects` is used instead.
receive.fsck.<msg-id>::
- When `receive.fsckObjects` is set to true, errors can be switched
- to warnings and vice versa by configuring the `receive.fsck.<msg-id>`
- setting where the `<msg-id>` is the fsck message ID and the value
- is one of `error`, `warn` or `ignore`. For convenience, fsck prefixes
- the error/warning with the message ID, e.g. "missingEmail: invalid
- author/committer line - missing email" means that setting
- `receive.fsck.missingEmail = ignore` will hide that issue.
-+
-This feature is intended to support working with legacy repositories
-which would not pass pushing when `receive.fsckObjects = true`, allowing
-the host to accept repositories with certain known issues but still catch
-other issues.
+ Acts like `fsck.<msg-id>`, but is used by
+ linkgit:git-receive-pack[1] instead of
+ linkgit:git-fsck[1]. See the `fsck.<msg-id>` documentation for
+ details.
receive.fsck.skipList::
- The path to a sorted list of object names (i.e. one SHA-1 per
- line) that are known to be broken in a non-fatal way and should
- be ignored. This feature is useful when an established project
- should be accepted despite early commits containing errors that
- can be safely ignored such as invalid committer email addresses.
- Note: corrupt objects cannot be skipped with this setting.
+ Acts like `fsck.skipList`, but is used by
+ linkgit:git-receive-pack[1] instead of
+ linkgit:git-fsck[1]. See the `fsck.skipList` documentation for
+ details.
receive.keepAlive::
After receiving the pack from the client, `receive-pack` may
@@ -3389,6 +3484,40 @@ transfer.fsckObjects::
When `fetch.fsckObjects` or `receive.fsckObjects` are
not set, the value of this variable is used instead.
Defaults to false.
++
+When set, the fetch or receive will abort in the case of a malformed
+object or a link to a nonexistent object. In addition, various other
+issues are checked for, including legacy issues (see `fsck.<msg-id>`),
+and potential security issues like the existence of a `.GIT` directory
+or a malicious `.gitmodules` file (see the release notes for v2.2.1
+and v2.17.1 for details). Other sanity and security checks may be
+added in future releases.
++
+On the receiving side, failing fsckObjects will make those objects
+unreachable, see "QUARANTINE ENVIRONMENT" in
+linkgit:git-receive-pack[1]. On the fetch side, malformed objects will
+instead be left unreferenced in the repository.
++
+Due to the non-quarantine nature of the `fetch.fsckObjects`
+implementation it can not be relied upon to leave the object store
+clean like `receive.fsckObjects` can.
++
+As objects are unpacked they're written to the object store, so there
+can be cases where malicious objects get introduced even though the
+"fetch" failed, only to have a subsequent "fetch" succeed because only
+new incoming objects are checked, not those that have already been
+written to the object store. That difference in behavior should not be
+relied upon. In the future, such objects may be quarantined for
+"fetch" as well.
++
+For now, the paranoid need to find some way to emulate the quarantine
+environment if they'd like the same protection as "push". E.g. in the
+case of an internal mirror do the mirroring in two steps, one to fetch
+the untrusted objects, and then do a second "push" (which will use the
+quarantine) to another internal repo, and have internal clients
+consume this pushed-to repository, or embargo internal fetches and
+only allow them once a full "fsck" has run (and no new fetches have
+happened in the meantime).
transfer.hideRefs::
String(s) `receive-pack` and `upload-pack` use to decide which
diff --git a/Documentation/diff-options.txt b/Documentation/diff-options.txt
index 4106490..0378cd5 100644
--- a/Documentation/diff-options.txt
+++ b/Documentation/diff-options.txt
@@ -276,16 +276,46 @@ plain::
that are added somewhere else in the diff. This mode picks up any
moved line, but it is not very useful in a review to determine
if a block of code was moved without permutation.
-zebra::
+blocks::
Blocks of moved text of at least 20 alphanumeric characters
are detected greedily. The detected blocks are
- painted using either the 'color.diff.{old,new}Moved' color or
+ painted using either the 'color.diff.{old,new}Moved' color.
+ Adjacent blocks cannot be told apart.
+zebra::
+ Blocks of moved text are detected as in 'blocks' mode. The blocks
+ are painted using either the 'color.diff.{old,new}Moved' color or
'color.diff.{old,new}MovedAlternative'. The change between
the two colors indicates that a new block was detected.
-dimmed_zebra::
+dimmed-zebra::
Similar to 'zebra', but additional dimming of uninteresting parts
of moved code is performed. The bordering lines of two adjacent
blocks are considered interesting, the rest is uninteresting.
+ `dimmed_zebra` is a deprecated synonym.
+--
+
+--color-moved-ws=<modes>::
+ This configures how white spaces are ignored when performing the
+ move detection for `--color-moved`.
+ifdef::git-diff[]
+ It can be set by the `diff.colorMovedWS` configuration setting.
+endif::git-diff[]
+ These modes can be given as a comma separated list:
++
+--
+ignore-space-at-eol::
+ Ignore changes in whitespace at EOL.
+ignore-space-change::
+ Ignore changes in amount of whitespace. This ignores whitespace
+ at line end, and considers all other sequences of one or
+ more whitespace characters to be equivalent.
+ignore-all-space::
+ Ignore whitespace when comparing lines. This ignores differences
+ even if one line has whitespace where the other line has none.
+allow-indentation-change::
+ Initially ignore any white spaces in the move detection, then
+ group the moved code blocks only into a block if the change in
+ whitespace is the same per line. This is incompatible with the
+ other modes.
--
--word-diff[=<mode>]::
diff --git a/Documentation/doc-diff b/Documentation/doc-diff
new file mode 100755
index 0000000..f483fe4
--- /dev/null
+++ b/Documentation/doc-diff
@@ -0,0 +1,109 @@
+#!/bin/sh
+
+OPTIONS_SPEC="\
+doc-diff [options] <from> <to> [-- <diff-options>]
+--
+j=n parallel argument to pass to make
+f force rebuild; do not rely on cached results
+"
+SUBDIRECTORY_OK=1
+. "$(git --exec-path)/git-sh-setup"
+
+parallel=
+force=
+while test $# -gt 0
+do
+ case "$1" in
+ -j)
+ parallel=$2; shift ;;
+ -f)
+ force=t ;;
+ --)
+ shift; break ;;
+ *)
+ usage ;;
+ esac
+ shift
+done
+
+if test -z "$parallel"
+then
+ parallel=$(getconf _NPROCESSORS_ONLN 2>/dev/null)
+ if test $? != 0 || test -z "$parallel"
+ then
+ parallel=1
+ fi
+fi
+
+test $# -gt 1 || usage
+from=$1; shift
+to=$1; shift
+
+from_oid=$(git rev-parse --verify "$from") || exit 1
+to_oid=$(git rev-parse --verify "$to") || exit 1
+
+cd_to_toplevel
+tmp=Documentation/tmp-doc-diff
+
+if test -n "$force"
+then
+ rm -rf "$tmp"
+fi
+
+# We'll do both builds in a single worktree, which lets "make" reuse
+# results that don't differ between the two trees.
+if ! test -d "$tmp/worktree"
+then
+ git worktree add --detach "$tmp/worktree" "$from" &&
+ dots=$(echo "$tmp/worktree" | sed 's#[^/]*#..#g') &&
+ ln -s "$dots/config.mak" "$tmp/worktree/config.mak"
+fi
+
+# generate_render_makefile <srcdir> <dstdir>
+generate_render_makefile () {
+ find "$1" -type f |
+ while read src
+ do
+ dst=$2/${src#$1/}
+ printf 'all:: %s\n' "$dst"
+ printf '%s: %s\n' "$dst" "$src"
+ printf '\t@echo >&2 " RENDER $(notdir $@)" && \\\n'
+ printf '\tmkdir -p $(dir $@) && \\\n'
+ printf '\tMANWIDTH=80 man -l $< >$@+ && \\\n'
+ printf '\tmv $@+ $@\n'
+ done
+}
+
+# render_tree <dirname> <committish>
+render_tree () {
+ # Skip install-man entirely if we already have an installed directory.
+ # We can't rely on make here, since "install-man" unconditionally
+ # copies the files (spending effort, but also updating timestamps that
+ # we then can't rely on during the render step). We use "mv" to make
+ # sure we don't get confused by a previous run that failed partway
+ # through.
+ if ! test -d "$tmp/installed/$1"
+ then
+ git -C "$tmp/worktree" checkout "$2" &&
+ make -j$parallel -C "$tmp/worktree" \
+ GIT_VERSION=omitted \
+ SOURCE_DATE_EPOCH=0 \
+ DESTDIR="$PWD/$tmp/installed/$1+" \
+ install-man &&
+ mv "$tmp/installed/$1+" "$tmp/installed/$1"
+ fi &&
+
+ # As with "installed" above, we skip the render if it's already been
+ # done. So using make here is primarily just about running in
+ # parallel.
+ if ! test -d "$tmp/rendered/$1"
+ then
+ generate_render_makefile "$tmp/installed/$1" "$tmp/rendered/$1+" |
+ make -j$parallel -f - &&
+ mv "$tmp/rendered/$1+" "$tmp/rendered/$1"
+ fi
+}
+
+render_tree $from_oid "$from" &&
+render_tree $to_oid "$to" &&
+git -C $tmp/rendered diff --no-index "$@" $from_oid $to_oid
diff --git a/Documentation/fetch-options.txt b/Documentation/fetch-options.txt
index 97d3217..8bc36af 100644
--- a/Documentation/fetch-options.txt
+++ b/Documentation/fetch-options.txt
@@ -42,6 +42,25 @@ the current repository has the same history as the source repository.
.git/shallow. This option updates .git/shallow and accept such
refs.
+--negotiation-tip=<commit|glob>::
+ By default, Git will report, to the server, commits reachable
+ from all local refs to find common commits in an attempt to
+ reduce the size of the to-be-received packfile. If specified,
+ Git will only report commits reachable from the given tips.
+ This is useful to speed up fetches when the user knows which
+ local ref is likely to have commits in common with the
+ upstream ref being fetched.
++
+This option may be specified more than once; if so, Git will report
+commits reachable from any of the given commits.
++
+The argument to this option may be a glob on ref names, a ref, or the (possibly
+abbreviated) SHA-1 of a commit. Specifying a glob is equivalent to specifying
+this option multiple times, one for each matching ref name.
++
+See also the `fetch.negotiationAlgorithm` configuration variable
+documented in linkgit:git-config[1].
+
ifndef::git-pull[]
--dry-run::
Show what would be done, without making any changes.
diff --git a/Documentation/git-checkout.txt b/Documentation/git-checkout.txt
index ca5fc9c..9db0292 100644
--- a/Documentation/git-checkout.txt
+++ b/Documentation/git-checkout.txt
@@ -38,6 +38,15 @@ equivalent to
$ git checkout -b <branch> --track <remote>/<branch>
------------
+
+If the branch exists in multiple remotes and one of them is named by
+the `checkout.defaultRemote` configuration variable, we'll use that
+one for the purposes of disambiguation, even if the `<branch>` isn't
+unique across all remotes. Set it to
+e.g. `checkout.defaultRemote=origin` to always checkout remote
+branches from there if `<branch>` is ambiguous but exists on the
+'origin' remote. See also `checkout.defaultRemote` in
+linkgit:git-config[1].
++
You could omit <branch>, in which case the command degenerates to
"check out the current branch", which is a glorified no-op with
rather expensive side-effects to show only the tracking information,
diff --git a/Documentation/git-commit-graph.txt b/Documentation/git-commit-graph.txt
index 4c97b55..dececb7 100644
--- a/Documentation/git-commit-graph.txt
+++ b/Documentation/git-commit-graph.txt
@@ -10,6 +10,7 @@ SYNOPSIS
--------
[verse]
'git commit-graph read' [--object-dir <dir>]
+'git commit-graph verify' [--object-dir <dir>]
'git commit-graph write' <options> [--object-dir <dir>]
@@ -37,12 +38,16 @@ Write a commit graph file based on the commits found in packfiles.
+
With the `--stdin-packs` option, generate the new commit graph by
walking objects only in the specified pack-indexes. (Cannot be combined
-with --stdin-commits.)
+with `--stdin-commits` or `--reachable`.)
+
With the `--stdin-commits` option, generate the new commit graph by
walking commits starting at the commits specified in stdin as a list
of OIDs in hex, one OID per line. (Cannot be combined with
---stdin-packs.)
+`--stdin-packs` or `--reachable`.)
++
+With the `--reachable` option, generate the new commit graph by walking
+commits starting at all refs. (Cannot be combined with `--stdin-commits`
+or `--stdin-packs`.)
+
With the `--append` option, include all commits that are present in the
existing commit-graph file.
@@ -52,6 +57,11 @@ existing commit-graph file.
Read a graph file given by the commit-graph file and output basic
details about the graph file. Used for debugging purposes.
+'verify'::
+
+Read the commit-graph file and verify its contents against the object
+database. Used to check for corrupted data.
+
EXAMPLES
--------
diff --git a/Documentation/git-for-each-ref.txt b/Documentation/git-for-each-ref.txt
index 085d177..901faef 100644
--- a/Documentation/git-for-each-ref.txt
+++ b/Documentation/git-for-each-ref.txt
@@ -57,7 +57,7 @@ OPTIONS
`xx`; for example `%00` interpolates to `\0` (NUL),
`%09` to `\t` (TAB) and `%0a` to `\n` (LF).
---color[=<when>]:
+--color[=<when>]::
Respect any colors specified in the `--format` option. The
`<when>` field must be one of `always`, `never`, or `auto` (if
`<when>` is absent, behave as if `always` was given).
diff --git a/Documentation/git-fsck.txt b/Documentation/git-fsck.txt
index b9f060e..ab9a93f 100644
--- a/Documentation/git-fsck.txt
+++ b/Documentation/git-fsck.txt
@@ -110,6 +110,9 @@ Any corrupt objects you will have to find in backups or other archives
(i.e., you can just remove them and do an 'rsync' with some other site in
the hopes that somebody else has the object you have corrupted).
+If core.commitGraph is true, the commit-graph file will also be inspected
+using 'git commit-graph verify'. See linkgit:git-commit-graph[1].
+
Extracted Diagnostics
---------------------
diff --git a/Documentation/git-gc.txt b/Documentation/git-gc.txt
index 24b2dd4..f5bc98c 100644
--- a/Documentation/git-gc.txt
+++ b/Documentation/git-gc.txt
@@ -136,6 +136,10 @@ The optional configuration variable `gc.packRefs` determines if
it within all non-bare repos or it can be set to a boolean value.
This defaults to true.
+The optional configuration variable `gc.commitGraph` determines if
+'git gc' should run 'git commit-graph write'. This can be set to a
+boolean value. This defaults to false.
+
The optional configuration variable `gc.aggressiveWindow` controls how
much time is spent optimizing the delta compression of the objects in
the repository when the --aggressive option is specified. The larger
diff --git a/Documentation/git-grep.txt b/Documentation/git-grep.txt
index 0de3493..a3049af 100644
--- a/Documentation/git-grep.txt
+++ b/Documentation/git-grep.txt
@@ -17,7 +17,7 @@ SYNOPSIS
[-l | --files-with-matches] [-L | --files-without-match]
[(-O | --open-files-in-pager) [<pager>]]
[-z | --null]
- [-c | --count] [--all-match] [-q | --quiet]
+ [ -o | --only-matching ] [-c | --count] [--all-match] [-q | --quiet]
[--max-depth <depth>]
[--color[=<when>] | --no-color]
[--break] [--heading] [-p | --show-function]
@@ -201,6 +201,11 @@ providing this option will cause it to die.
Output \0 instead of the character that normally follows a
file name.
+-o::
+--only-matching::
+ Print only the matched (non-empty) parts of a matching line, with each such
+ part on a separate output line.
+
-c::
--count::
Instead of showing every matched line, show the number of
diff --git a/Documentation/git-interpret-trailers.txt b/Documentation/git-interpret-trailers.txt
index 9111c47..b8fafb1 100644
--- a/Documentation/git-interpret-trailers.txt
+++ b/Documentation/git-interpret-trailers.txt
@@ -88,7 +88,8 @@ OPTIONS
Specify where all new trailers will be added. A setting
provided with '--where' overrides all configuration variables
and applies to all '--trailer' options until the next occurrence of
- '--where' or '--no-where'.
+ '--where' or '--no-where'. Possible values are `after`, `before`,
+ `end` or `start`.
--if-exists <action>::
--no-if-exists::
@@ -96,7 +97,8 @@ OPTIONS
least one trailer with the same <token> in the message. A setting
provided with '--if-exists' overrides all configuration variables
and applies to all '--trailer' options until the next occurrence of
- '--if-exists' or '--no-if-exists'.
+ '--if-exists' or '--no-if-exists'. Possible actions are `addIfDifferent`,
+ `addIfDifferentNeighbor`, `add`, `replace` and `doNothing`.
--if-missing <action>::
--no-if-missing::
@@ -104,7 +106,8 @@ OPTIONS
trailer with the same <token> in the message. A setting
provided with '--if-missing' overrides all configuration variables
and applies to all '--trailer' options until the next occurrence of
- '--if-missing' or '--no-if-missing'.
+ '--if-missing' or '--no-if-missing'. Possible actions are `doNothing`
+ or `add`.
--only-trailers::
Output only the trailers, not any other parts of the input.
diff --git a/Documentation/git-merge.txt b/Documentation/git-merge.txt
index 6a5c00e..eb36837 100644
--- a/Documentation/git-merge.txt
+++ b/Documentation/git-merge.txt
@@ -12,7 +12,7 @@ SYNOPSIS
'git merge' [-n] [--stat] [--no-commit] [--squash] [--[no-]edit]
[-s <strategy>] [-X <strategy-option>] [-S[<keyid>]]
[--[no-]allow-unrelated-histories]
- [--[no-]rerere-autoupdate] [-m <msg>] [<commit>...]
+ [--[no-]rerere-autoupdate] [-m <msg>] [-F <file>] [<commit>...]
'git merge' --abort
'git merge' --continue
@@ -75,6 +75,14 @@ The 'git fmt-merge-msg' command can be
used to give a good default for automated 'git merge'
invocations. The automated message can include the branch description.
+-F <file>::
+--file=<file>::
+ Read the commit message to be used for the merge commit (in
+ case one is created).
++
+If `--log` is specified, a shortlog of the commits being merged
+will be appended to the specified message.
+
--[no-]rerere-autoupdate::
Allow the rerere mechanism to update the index with the
result of auto-conflict resolution if possible.
@@ -122,9 +130,9 @@ merge' may need to update.
To avoid recording unrelated changes in the merge commit,
'git pull' and 'git merge' will also abort if there are any changes
-registered in the index relative to the `HEAD` commit. (One
-exception is when the changed index entries are in the state that
-would result from the merge already.)
+registered in the index relative to the `HEAD` commit. (Special
+narrow exceptions to this rule may exist depending on which merge
+strategy is in use, but generally, the index must match HEAD.)
If all named commits are already ancestors of `HEAD`, 'git merge'
will exit early with the message "Already up to date."
diff --git a/Documentation/git-p4.txt b/Documentation/git-p4.txt
index f0de3b8..41780a5 100644
--- a/Documentation/git-p4.txt
+++ b/Documentation/git-p4.txt
@@ -374,6 +374,14 @@ These options can be used to modify 'git p4 submit' behavior.
been submitted. Implies --disable-rebase. Can also be set with
git-p4.disableP4Sync. Sync with origin/master still goes ahead if possible.
+Hook for submit
+~~~~~~~~~~~~~~~
+The `p4-pre-submit` hook is executed if it exists and is executable.
+The hook takes no parameters and nothing from standard input. Exiting with
+non-zero status from this script prevents `git-p4 submit` from launching.
+
+One usage scenario is to run unit tests in the hook.
+
Rebase options
~~~~~~~~~~~~~~
These options can be used to modify 'git p4 rebase' behavior.
diff --git a/Documentation/git-rebase.txt b/Documentation/git-rebase.txt
index a785041..1fbc6eb 100644
--- a/Documentation/git-rebase.txt
+++ b/Documentation/git-rebase.txt
@@ -960,8 +960,8 @@ rescheduled immediately, with a helpful message how to edit the todo list
(this typically happens when a `reset` command was inserted into the todo
list manually and contains a typo).
-The `merge` command will merge the specified revision into whatever is
-HEAD at that time. With `-C <original-commit>`, the commit message of
+The `merge` command will merge the specified revision(s) into whatever
+is HEAD at that time. With `-C <original-commit>`, the commit message of
the specified merge commit will be used. When the `-C` is changed to
a lower-case `-c`, the message will be opened in an editor after a
successful merge so that the user can edit the message.
@@ -970,7 +970,8 @@ If a `merge` command fails for any reason other than merge conflicts (i.e.
when the merge operation did not even start), it is rescheduled immediately.
At this time, the `merge` command will *always* use the `recursive`
-merge strategy, with no way to choose a different one. To work around
+merge strategy for regular merges, and `octopus` for octopus merges,
+strategy, with no way to choose a different one. To work around
this, an `exec` command can be used to call `git merge` explicitly,
using the fact that the labels are worktree-local refs (the ref
`refs/rewritten/onto` would correspond to the label `onto`, for example).
diff --git a/Documentation/git-tag.txt b/Documentation/git-tag.txt
index 87c4288..92f9c12 100644
--- a/Documentation/git-tag.txt
+++ b/Documentation/git-tag.txt
@@ -115,7 +115,7 @@ options for details.
variable if it exists, or lexicographic order otherwise. See
linkgit:git-config[1].
---color[=<when>]:
+--color[=<when>]::
Respect any colors specified in the `--format` option. The
`<when>` field must be one of `always`, `never`, or `auto` (if
`<when>` is absent, behave as if `always` was given).
diff --git a/Documentation/git-worktree.txt b/Documentation/git-worktree.txt
index afc6576..9c26be4 100644
--- a/Documentation/git-worktree.txt
+++ b/Documentation/git-worktree.txt
@@ -60,6 +60,15 @@ with a matching name, treat as equivalent to:
$ git worktree add --track -b <branch> <path> <remote>/<branch>
------------
+
+If the branch exists in multiple remotes and one of them is named by
+the `checkout.defaultRemote` configuration variable, we'll use that
+one for the purposes of disambiguation, even if the `<branch>` isn't
+unique across all remotes. Set it to
+e.g. `checkout.defaultRemote=origin` to always checkout remote
+branches from there if `<branch>` is ambiguous but exists on the
+'origin' remote. See also `checkout.defaultRemote` in
+linkgit:git-config[1].
++
If `<commit-ish>` is omitted and neither `-b` nor `-B` nor `--detach` used,
then, as a convenience, the new worktree is associated with a branch
(call it `<branch>`) named after `$(basename <path>)`. If `<branch>`
diff --git a/Documentation/githooks.txt b/Documentation/githooks.txt
index e3c283a..9590443 100644
--- a/Documentation/githooks.txt
+++ b/Documentation/githooks.txt
@@ -485,6 +485,13 @@ The exit status determines whether git will use the data from the
hook to limit its search. On error, it will fall back to verifying
all files and folders.
+p4-pre-submit
+~~~~~~~~~~~~~
+
+This hook is invoked by `git-p4 submit`. It takes no parameters and nothing
+from standard input. Exiting with non-zero status from this script prevent
+`git-p4 submit` from launching. Run `git-p4 submit --help` for details.
+
GIT
---
Part of the linkgit:git[1] suite
diff --git a/Documentation/technical/commit-graph.txt b/Documentation/technical/commit-graph.txt
index e1a883e..c664acb 100644
--- a/Documentation/technical/commit-graph.txt
+++ b/Documentation/technical/commit-graph.txt
@@ -118,9 +118,6 @@ Future Work
- The commit graph feature currently does not honor commit grafts. This can
be remedied by duplicating or refactoring the current graft logic.
-- The 'commit-graph' subcommand does not have a "verify" mode that is
- necessary for integration with fsck.
-
- After computing and storing generation numbers, we must make graph
walks aware of generation numbers to gain the performance benefits they
enable. This will mostly be accomplished by swapping a commit-date-ordered
@@ -130,25 +127,6 @@ Future Work
- 'log --topo-order'
- 'tag --merged'
-- Currently, parse_commit_gently() requires filling in the root tree
- object for a commit. This passes through lookup_tree() and consequently
- lookup_object(). Also, it calls lookup_commit() when loading the parents.
- These method calls check the ODB for object existence, even if the
- consumer does not need the content. For example, we do not need the
- tree contents when computing merge bases. Now that commit parsing is
- removed from the computation time, these lookup operations are the
- slowest operations keeping graph walks from being fast. Consider
- loading these objects without verifying their existence in the ODB and
- only loading them fully when consumers need them. Consider a method
- such as "ensure_tree_loaded(commit)" that fully loads a tree before
- using commit->tree.
-
-- The current design uses the 'commit-graph' subcommand to generate the graph.
- When this feature stabilizes enough to recommend to most users, we should
- add automatic graph writes to common operations that create many commits.
- For example, one could compute a graph on 'clone', 'fetch', or 'repack'
- commands.
-
- A server could provide a commit graph file as part of the network protocol
to avoid extra calculations by clients. This feature is only of benefit if
the user is willing to trust the file, because verifying the file is correct
diff --git a/Documentation/technical/hash-function-transition.txt b/Documentation/technical/hash-function-transition.txt
index 4ab6cd1..bc2ace2 100644
--- a/Documentation/technical/hash-function-transition.txt
+++ b/Documentation/technical/hash-function-transition.txt
@@ -59,14 +59,11 @@ that are believed to be cryptographically secure.
Goals
-----
-Where NewHash is a strong 256-bit hash function to replace SHA-1 (see
-"Selection of a New Hash", below):
-
-1. The transition to NewHash can be done one local repository at a time.
+1. The transition to SHA-256 can be done one local repository at a time.
a. Requiring no action by any other party.
- b. A NewHash repository can communicate with SHA-1 Git servers
+ b. A SHA-256 repository can communicate with SHA-1 Git servers
(push/fetch).
- c. Users can use SHA-1 and NewHash identifiers for objects
+ c. Users can use SHA-1 and SHA-256 identifiers for objects
interchangeably (see "Object names on the command line", below).
d. New signed objects make use of a stronger hash function than
SHA-1 for their security guarantees.
@@ -79,7 +76,7 @@ Where NewHash is a strong 256-bit hash function to replace SHA-1 (see
Non-Goals
---------
-1. Add NewHash support to Git protocol. This is valuable and the
+1. Add SHA-256 support to Git protocol. This is valuable and the
logical next step but it is out of scope for this initial design.
2. Transparently improving the security of existing SHA-1 signed
objects.
@@ -87,26 +84,26 @@ Non-Goals
repository.
4. Taking the opportunity to fix other bugs in Git's formats and
protocols.
-5. Shallow clones and fetches into a NewHash repository. (This will
- change when we add NewHash support to Git protocol.)
-6. Skip fetching some submodules of a project into a NewHash
- repository. (This also depends on NewHash support in Git
+5. Shallow clones and fetches into a SHA-256 repository. (This will
+ change when we add SHA-256 support to Git protocol.)
+6. Skip fetching some submodules of a project into a SHA-256
+ repository. (This also depends on SHA-256 support in Git
protocol.)
Overview
--------
We introduce a new repository format extension. Repositories with this
-extension enabled use NewHash instead of SHA-1 to name their objects.
+extension enabled use SHA-256 instead of SHA-1 to name their objects.
This affects both object names and object content --- both the names
of objects and all references to other objects within an object are
switched to the new hash function.
-NewHash repositories cannot be read by older versions of Git.
+SHA-256 repositories cannot be read by older versions of Git.
-Alongside the packfile, a NewHash repository stores a bidirectional
-mapping between NewHash and SHA-1 object names. The mapping is generated
+Alongside the packfile, a SHA-256 repository stores a bidirectional
+mapping between SHA-256 and SHA-1 object names. The mapping is generated
locally and can be verified using "git fsck". Object lookups use this
-mapping to allow naming objects using either their SHA-1 and NewHash names
+mapping to allow naming objects using either their SHA-1 and SHA-256 names
interchangeably.
"git cat-file" and "git hash-object" gain options to display an object
@@ -116,7 +113,7 @@ object database so that they can be named using the appropriate name
(using the bidirectional hash mapping).
Fetches from a SHA-1 based server convert the fetched objects into
-NewHash form and record the mapping in the bidirectional mapping table
+SHA-256 form and record the mapping in the bidirectional mapping table
(see below for details). Pushes to a SHA-1 based server convert the
objects being pushed into sha1 form so the server does not have to be
aware of the hash function the client is using.
@@ -125,19 +122,19 @@ Detailed Design
---------------
Repository format extension
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-A NewHash repository uses repository format version `1` (see
+A SHA-256 repository uses repository format version `1` (see
Documentation/technical/repository-version.txt) with extensions
`objectFormat` and `compatObjectFormat`:
[core]
repositoryFormatVersion = 1
[extensions]
- objectFormat = newhash
+ objectFormat = sha256
compatObjectFormat = sha1
The combination of setting `core.repositoryFormatVersion=1` and
populating `extensions.*` ensures that all versions of Git later than
-`v0.99.9l` will die instead of trying to operate on the NewHash
+`v0.99.9l` will die instead of trying to operate on the SHA-256
repository, instead producing an error message.
# Between v0.99.9l and v2.7.0
@@ -155,36 +152,36 @@ repository extensions.
Object names
~~~~~~~~~~~~
Objects can be named by their 40 hexadecimal digit sha1-name or 64
-hexadecimal digit newhash-name, plus names derived from those (see
+hexadecimal digit sha256-name, plus names derived from those (see
gitrevisions(7)).
The sha1-name of an object is the SHA-1 of the concatenation of its
type, length, a nul byte, and the object's sha1-content. This is the
traditional <sha1> used in Git to name objects.
-The newhash-name of an object is the NewHash of the concatenation of its
-type, length, a nul byte, and the object's newhash-content.
+The sha256-name of an object is the SHA-256 of the concatenation of its
+type, length, a nul byte, and the object's sha256-content.
Object format
~~~~~~~~~~~~~
The content as a byte sequence of a tag, commit, or tree object named
-by sha1 and newhash differ because an object named by newhash-name refers to
-other objects by their newhash-names and an object named by sha1-name
+by sha1 and sha256 differ because an object named by sha256-name refers to
+other objects by their sha256-names and an object named by sha1-name
refers to other objects by their sha1-names.
-The newhash-content of an object is the same as its sha1-content, except
-that objects referenced by the object are named using their newhash-names
+The sha256-content of an object is the same as its sha1-content, except
+that objects referenced by the object are named using their sha256-names
instead of sha1-names. Because a blob object does not refer to any
-other object, its sha1-content and newhash-content are the same.
+other object, its sha1-content and sha256-content are the same.
-The format allows round-trip conversion between newhash-content and
+The format allows round-trip conversion between sha256-content and
sha1-content.
Object storage
~~~~~~~~~~~~~~
Loose objects use zlib compression and packed objects use the packed
format described in Documentation/technical/pack-format.txt, just like
-today. The content that is compressed and stored uses newhash-content
+today. The content that is compressed and stored uses sha256-content
instead of sha1-content.
Pack index
@@ -255,10 +252,10 @@ network byte order):
up to and not including the table of CRC32 values.
- Zero or more NUL bytes.
- The trailer consists of the following:
- - A copy of the 20-byte NewHash checksum at the end of the
+ - A copy of the 20-byte SHA-256 checksum at the end of the
corresponding packfile.
- - 20-byte NewHash checksum of all of the above.
+ - 20-byte SHA-256 checksum of all of the above.
Loose object index
~~~~~~~~~~~~~~~~~~
@@ -266,7 +263,7 @@ A new file $GIT_OBJECT_DIR/loose-object-idx contains information about
all loose objects. Its format is
# loose-object-idx
- (newhash-name SP sha1-name LF)*
+ (sha256-name SP sha1-name LF)*
where the object names are in hexadecimal format. The file is not
sorted.
@@ -292,8 +289,8 @@ To remove entries (e.g. in "git pack-refs" or "git-prune"):
Translation table
~~~~~~~~~~~~~~~~~
The index files support a bidirectional mapping between sha1-names
-and newhash-names. The lookup proceeds similarly to ordinary object
-lookups. For example, to convert a sha1-name to a newhash-name:
+and sha256-names. The lookup proceeds similarly to ordinary object
+lookups. For example, to convert a sha1-name to a sha256-name:
1. Look for the object in idx files. If a match is present in the
idx's sorted list of truncated sha1-names, then:
@@ -301,8 +298,8 @@ lookups. For example, to convert a sha1-name to a newhash-name:
name order mapping.
b. Read the corresponding entry in the full sha1-name table to
verify we found the right object. If it is, then
- c. Read the corresponding entry in the full newhash-name table.
- That is the object's newhash-name.
+ c. Read the corresponding entry in the full sha256-name table.
+ That is the object's sha256-name.
2. Check for a loose object. Read lines from loose-object-idx until
we find a match.
@@ -318,25 +315,25 @@ for all objects in the object store.
Reading an object's sha1-content
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The sha1-content of an object can be read by converting all newhash-names
-its newhash-content references to sha1-names using the translation table.
+The sha1-content of an object can be read by converting all sha256-names
+its sha256-content references to sha1-names using the translation table.
Fetch
~~~~~
Fetching from a SHA-1 based server requires translating between SHA-1
-and NewHash based representations on the fly.
+and SHA-256 based representations on the fly.
SHA-1s named in the ref advertisement that are present on the client
-can be translated to NewHash and looked up as local objects using the
+can be translated to SHA-256 and looked up as local objects using the
translation table.
Negotiation proceeds as today. Any "have"s generated locally are
converted to SHA-1 before being sent to the server, and SHA-1s
-mentioned by the server are converted to NewHash when looking them up
+mentioned by the server are converted to SHA-256 when looking them up
locally.
After negotiation, the server sends a packfile containing the
-requested objects. We convert the packfile to NewHash format using
+requested objects. We convert the packfile to SHA-256 format using
the following steps:
1. index-pack: inflate each object in the packfile and compute its
@@ -351,12 +348,12 @@ the following steps:
(This list only contains objects reachable from the "wants". If the
pack from the server contained additional extraneous objects, then
they will be discarded.)
-3. convert to newhash: open a new (newhash) packfile. Read the topologically
+3. convert to sha256: open a new (sha256) packfile. Read the topologically
sorted list just generated. For each object, inflate its
- sha1-content, convert to newhash-content, and write it to the newhash
- pack. Record the new sha1<->newhash mapping entry for use in the idx.
+ sha1-content, convert to sha256-content, and write it to the sha256
+ pack. Record the new sha1<->sha256 mapping entry for use in the idx.
4. sort: reorder entries in the new pack to match the order of objects
- in the pack the server generated and include blobs. Write a newhash idx
+ in the pack the server generated and include blobs. Write a sha256 idx
file
5. clean up: remove the SHA-1 based pack file, index, and
topologically sorted list obtained from the server in steps 1
@@ -388,16 +385,16 @@ send-pack.
Signed Commits
~~~~~~~~~~~~~~
-We add a new field "gpgsig-newhash" to the commit object format to allow
+We add a new field "gpgsig-sha256" to the commit object format to allow
signing commits without relying on SHA-1. It is similar to the
-existing "gpgsig" field. Its signed payload is the newhash-content of the
-commit object with any "gpgsig" and "gpgsig-newhash" fields removed.
+existing "gpgsig" field. Its signed payload is the sha256-content of the
+commit object with any "gpgsig" and "gpgsig-sha256" fields removed.
This means commits can be signed
1. using SHA-1 only, as in existing signed commit objects
-2. using both SHA-1 and NewHash, by using both gpgsig-newhash and gpgsig
+2. using both SHA-1 and SHA-256, by using both gpgsig-sha256 and gpgsig
fields.
-3. using only NewHash, by only using the gpgsig-newhash field.
+3. using only SHA-256, by only using the gpgsig-sha256 field.
Old versions of "git verify-commit" can verify the gpgsig signature in
cases (1) and (2) without modifications and view case (3) as an
@@ -405,24 +402,24 @@ ordinary unsigned commit.
Signed Tags
~~~~~~~~~~~
-We add a new field "gpgsig-newhash" to the tag object format to allow
+We add a new field "gpgsig-sha256" to the tag object format to allow
signing tags without relying on SHA-1. Its signed payload is the
-newhash-content of the tag with its gpgsig-newhash field and "-----BEGIN PGP
+sha256-content of the tag with its gpgsig-sha256 field and "-----BEGIN PGP
SIGNATURE-----" delimited in-body signature removed.
This means tags can be signed
1. using SHA-1 only, as in existing signed tag objects
-2. using both SHA-1 and NewHash, by using gpgsig-newhash and an in-body
+2. using both SHA-1 and SHA-256, by using gpgsig-sha256 and an in-body
signature.
-3. using only NewHash, by only using the gpgsig-newhash field.
+3. using only SHA-256, by only using the gpgsig-sha256 field.
Mergetag embedding
~~~~~~~~~~~~~~~~~~
The mergetag field in the sha1-content of a commit contains the
sha1-content of a tag that was merged by that commit.
-The mergetag field in the newhash-content of the same commit contains the
-newhash-content of the same tag.
+The mergetag field in the sha256-content of the same commit contains the
+sha256-content of the same tag.
Submodules
~~~~~~~~~~
@@ -497,7 +494,7 @@ Caveats
-------
Invalid objects
~~~~~~~~~~~~~~~
-The conversion from sha1-content to newhash-content retains any
+The conversion from sha1-content to sha256-content retains any
brokenness in the original object (e.g., tree entry modes encoded with
leading 0, tree objects whose paths are not sorted correctly, and
commit objects without an author or committer). This is a deliberate
@@ -516,7 +513,7 @@ allow lifting this restriction.
Alternates
~~~~~~~~~~
-For the same reason, a newhash repository cannot borrow objects from a
+For the same reason, a sha256 repository cannot borrow objects from a
sha1 repository using objects/info/alternates or
$GIT_ALTERNATE_OBJECT_REPOSITORIES.
@@ -524,20 +521,20 @@ git notes
~~~~~~~~~
The "git notes" tool annotates objects using their sha1-name as key.
This design does not describe a way to migrate notes trees to use
-newhash-names. That migration is expected to happen separately (for
+sha256-names. That migration is expected to happen separately (for
example using a file at the root of the notes tree to describe which
hash it uses).
Server-side cost
~~~~~~~~~~~~~~~~
-Until Git protocol gains NewHash support, using NewHash based storage
+Until Git protocol gains SHA-256 support, using SHA-256 based storage
on public-facing Git servers is strongly discouraged. Once Git
-protocol gains NewHash support, NewHash based servers are likely not
+protocol gains SHA-256 support, SHA-256 based servers are likely not
to support SHA-1 compatibility, to avoid what may be a very expensive
hash reencode during clone and to encourage peers to modernize.
The design described here allows fetches by SHA-1 clients of a
-personal NewHash repository because it's not much more difficult than
+personal SHA-256 repository because it's not much more difficult than
allowing pushes from that repository. This support needs to be guarded
by a configuration option --- servers like git.kernel.org that serve a
large number of clients would not be expected to bear that cost.
@@ -547,7 +544,7 @@ Meaning of signatures
The signed payload for signed commits and tags does not explicitly
name the hash used to identify objects. If some day Git adopts a new
hash function with the same length as the current SHA-1 (40
-hexadecimal digit) or NewHash (64 hexadecimal digit) objects then the
+hexadecimal digit) or SHA-256 (64 hexadecimal digit) objects then the
intent behind the PGP signed payload in an object signature is
unclear:
@@ -562,7 +559,7 @@ Does this mean Git v2.12.0 is the commit with sha1-name
e7e07d5a4fcc2a203d9873968ad3e6bd4d7419d7 or the commit with
new-40-digit-hash-name e7e07d5a4fcc2a203d9873968ad3e6bd4d7419d7?
-Fortunately NewHash and SHA-1 have different lengths. If Git starts
+Fortunately SHA-256 and SHA-1 have different lengths. If Git starts
using another hash with the same length to name objects, then it will
need to change the format of signed payloads using that hash to
address this issue.
@@ -574,24 +571,24 @@ supports four different modes of operation:
1. ("dark launch") Treat object names input by the user as SHA-1 and
convert any object names written to output to SHA-1, but store
- objects using NewHash. This allows users to test the code with no
+ objects using SHA-256. This allows users to test the code with no
visible behavior change except for performance. This allows
allows running even tests that assume the SHA-1 hash function, to
sanity-check the behavior of the new mode.
- 2. ("early transition") Allow both SHA-1 and NewHash object names in
+ 2. ("early transition") Allow both SHA-1 and SHA-256 object names in
input. Any object names written to output use SHA-1. This allows
users to continue to make use of SHA-1 to communicate with peers
(e.g. by email) that have not migrated yet and prepares for mode 3.
- 3. ("late transition") Allow both SHA-1 and NewHash object names in
- input. Any object names written to output use NewHash. In this
+ 3. ("late transition") Allow both SHA-1 and SHA-256 object names in
+ input. Any object names written to output use SHA-256. In this
mode, users are using a more secure object naming method by
default. The disruption is minimal as long as most of their peers
are in mode 2 or mode 3.
4. ("post-transition") Treat object names input by the user as
- NewHash and write output using NewHash. This is safer than mode 3
+ SHA-256 and write output using SHA-256. This is safer than mode 3
because there is less risk that input is incorrectly interpreted
using the wrong hash function.
@@ -601,27 +598,31 @@ The user can also explicitly specify which format to use for a
particular revision specifier and for output, overriding the mode. For
example:
-git --output-format=sha1 log abac87a^{sha1}..f787cac^{newhash}
+git --output-format=sha1 log abac87a^{sha1}..f787cac^{sha256}
-Selection of a New Hash
------------------------
+Choice of Hash
+--------------
In early 2005, around the time that Git was written, Xiaoyun Wang,
Yiqun Lisa Yin, and Hongbo Yu announced an attack finding SHA-1
collisions in 2^69 operations. In August they published details.
Luckily, no practical demonstrations of a collision in full SHA-1 were
published until 10 years later, in 2017.
-The hash function NewHash to replace SHA-1 should be stronger than
-SHA-1 was: we would like it to be trustworthy and useful in practice
-for at least 10 years.
+Git v2.13.0 and later subsequently moved to a hardened SHA-1
+implementation by default that mitigates the SHAttered attack, but
+SHA-1 is still believed to be weak.
+
+The hash to replace this hardened SHA-1 should be stronger than SHA-1
+was: we would like it to be trustworthy and useful in practice for at
+least 10 years.
Some other relevant properties:
1. A 256-bit hash (long enough to match common security practice; not
excessively long to hurt performance and disk usage).
-2. High quality implementations should be widely available (e.g. in
- OpenSSL).
+2. High quality implementations should be widely available (e.g., in
+ OpenSSL and Apple CommonCrypto).
3. The hash function's properties should match Git's needs (e.g. Git
requires collision and 2nd preimage resistance and does not require
@@ -630,14 +631,13 @@ Some other relevant properties:
4. As a tiebreaker, the hash should be fast to compute (fortunately
many contenders are faster than SHA-1).
-Some hashes under consideration are SHA-256, SHA-512/256, SHA-256x16,
-K12, and BLAKE2bp-256.
+We choose SHA-256.
Transition plan
---------------
Some initial steps can be implemented independently of one another:
- adding a hash function API (vtable)
-- teaching fsck to tolerate the gpgsig-newhash field
+- teaching fsck to tolerate the gpgsig-sha256 field
- excluding gpgsig-* from the fields copied by "git commit --amend"
- annotating tests that depend on SHA-1 values with a SHA1 test
prerequisite
@@ -664,7 +664,7 @@ Next comes introduction of compatObjectFormat:
- adding appropriate index entries when adding a new object to the
object store
- --output-format option
-- ^{sha1} and ^{newhash} revision notation
+- ^{sha1} and ^{sha256} revision notation
- configuration to specify default input and output format (see
"Object names on the command line" above)
@@ -672,7 +672,7 @@ The next step is supporting fetches and pushes to SHA-1 repositories:
- allow pushes to a repository using the compat format
- generate a topologically sorted list of the SHA-1 names of fetched
objects
-- convert the fetched packfile to newhash format and generate an idx
+- convert the fetched packfile to sha256 format and generate an idx
file
- re-sort to match the order of objects in the fetched packfile
@@ -680,30 +680,30 @@ The infrastructure supporting fetch also allows converting an existing
repository. In converted repositories and new clones, end users can
gain support for the new hash function without any visible change in
behavior (see "dark launch" in the "Object names on the command line"
-section). In particular this allows users to verify NewHash signatures
+section). In particular this allows users to verify SHA-256 signatures
on objects in the repository, and it should ensure the transition code
is stable in production in preparation for using it more widely.
Over time projects would encourage their users to adopt the "early
transition" and then "late transition" modes to take advantage of the
-new, more futureproof NewHash object names.
+new, more futureproof SHA-256 object names.
When objectFormat and compatObjectFormat are both set, commands
-generating signatures would generate both SHA-1 and NewHash signatures
+generating signatures would generate both SHA-1 and SHA-256 signatures
by default to support both new and old users.
-In projects using NewHash heavily, users could be encouraged to adopt
+In projects using SHA-256 heavily, users could be encouraged to adopt
the "post-transition" mode to avoid accidentally making implicit use
of SHA-1 object names.
Once a critical mass of users have upgraded to a version of Git that
-can verify NewHash signatures and have converted their existing
+can verify SHA-256 signatures and have converted their existing
repositories to support verifying them, we can add support for a
-setting to generate only NewHash signatures. This is expected to be at
+setting to generate only SHA-256 signatures. This is expected to be at
least a year later.
That is also a good moment to advertise the ability to convert
-repositories to use NewHash only, stripping out all SHA-1 related
+repositories to use SHA-256 only, stripping out all SHA-1 related
metadata. This improves performance by eliminating translation
overhead and security by avoiding the possibility of accidentally
relying on the safety of SHA-1.
@@ -742,16 +742,16 @@ using the old hash function.
Signed objects with multiple hashes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Instead of introducing the gpgsig-newhash field in commit and tag objects
-for newhash-content based signatures, an earlier version of this design
-added "hash newhash <newhash-name>" fields to strengthen the existing
+Instead of introducing the gpgsig-sha256 field in commit and tag objects
+for sha256-content based signatures, an earlier version of this design
+added "hash sha256 <sha256-name>" fields to strengthen the existing
sha1-content based signatures.
In other words, a single signature was used to attest to the object
content using both hash functions. This had some advantages:
* Using one signature instead of two speeds up the signing process.
* Having one signed payload with both hashes allows the signer to
- attest to the sha1-name and newhash-name referring to the same object.
+ attest to the sha1-name and sha256-name referring to the same object.
* All users consume the same signature. Broken signatures are likely
to be detected quickly using current versions of git.
@@ -760,11 +760,11 @@ However, it also came with disadvantages:
objects it references, even after the transition is complete and
translation table is no longer needed for anything else. To support
this, the design added fields such as "hash sha1 tree <sha1-name>"
- and "hash sha1 parent <sha1-name>" to the newhash-content of a signed
+ and "hash sha1 parent <sha1-name>" to the sha256-content of a signed
commit, complicating the conversion process.
* Allowing signed objects without a sha1 (for after the transition is
complete) complicated the design further, requiring a "nohash sha1"
- field to suppress including "hash sha1" fields in the newhash-content
+ field to suppress including "hash sha1" fields in the sha256-content
and signed payload.
Lazily populated translation table
@@ -772,7 +772,7 @@ Lazily populated translation table
Some of the work of building the translation table could be deferred to
push time, but that would significantly complicate and slow down pushes.
Calculating the sha1-name at object creation time at the same time it is
-being streamed to disk and having its newhash-name calculated should be
+being streamed to disk and having its sha256-name calculated should be
an acceptable cost.
Document History
@@ -814,6 +814,12 @@ Incorporated suggestions from jonathantanmy and sbeller:
* avoid loose object overhead by packing more aggressively in
"git gc --auto"
+Later history:
+
+ See the history of this file in git.git for the history of subsequent
+ edits. This document history is no longer being maintained as it
+ would now be superfluous to the commit log
+
[1] http://public-inbox.org/git/CA+55aFzJtejiCjV0e43+9oR3QuJK2PiFiLQemytoLpyJWe6P9w@mail.gmail.com/
[2] http://public-inbox.org/git/CA+55aFz+gkAsDZ24zmePQuEs1XPS9BP_s8O7Q4wQ7LV7X5-oDA@mail.gmail.com/
[3] http://public-inbox.org/git/20170306084353.nrns455dvkdsfgo5@sigill.intra.peff.net/
diff --git a/Documentation/technical/http-protocol.txt b/Documentation/technical/http-protocol.txt
index 64f49d0..9c5b6f0 100644
--- a/Documentation/technical/http-protocol.txt
+++ b/Documentation/technical/http-protocol.txt
@@ -338,11 +338,11 @@ server advertises capability `allow-tip-sha1-in-want` or
request_end
request_end = "0000" / "done"
- want_list = PKT-LINE(want NUL cap_list LF)
+ want_list = PKT-LINE(want SP cap_list LF)
*(want_pkt)
want_pkt = PKT-LINE(want LF)
want = "want" SP id
- cap_list = *(SP capability) SP
+ cap_list = capability *(SP capability)
have_list = *PKT-LINE("have" SP id LF)
diff --git a/Documentation/technical/pack-protocol.txt b/Documentation/technical/pack-protocol.txt
index 508a344..6ac774d 100644
--- a/Documentation/technical/pack-protocol.txt
+++ b/Documentation/technical/pack-protocol.txt
@@ -50,7 +50,8 @@ Each Extra Parameter takes the form of `<key>=<value>` or `<key>`.
Servers that receive any such Extra Parameters MUST ignore all
unrecognized keys. Currently, the only Extra Parameter recognized is
-"version=1".
+"version" with a value of '1' or '2'. See protocol-v2.txt for more
+information on protocol version 2.
Git Transport
-------------
diff --git a/Makefile b/Makefile
index 08e5c54..73aa943 100644
--- a/Makefile
+++ b/Makefile
@@ -484,6 +484,11 @@ all::
# The DEVELOPER mode enables -Wextra with a few exceptions. By
# setting this flag the exceptions are removed, and all of
# -Wextra is used.
+#
+# pedantic:
+#
+# Enable -pedantic compilation. This also disables
+# USE_PARENS_AROUND_GETTEXT_N to produce only relevant warnings.
GIT-VERSION-FILE: FORCE
@$(SHELL_PATH) ./GIT-VERSION-GEN
@@ -564,7 +569,7 @@ SPATCH = spatch
export TCL_PATH TCLTK_PATH
SPARSE_FLAGS =
-SPATCH_FLAGS = --all-includes
+SPATCH_FLAGS = --all-includes --patch .
@@ -709,6 +714,7 @@ TEST_BUILTINS_OBJS += test-example-decorate.o
TEST_BUILTINS_OBJS += test-genrandom.o
TEST_BUILTINS_OBJS += test-hashmap.o
TEST_BUILTINS_OBJS += test-index-version.o
+TEST_BUILTINS_OBJS += test-json-writer.o
TEST_BUILTINS_OBJS += test-lazy-init-name-hash.o
TEST_BUILTINS_OBJS += test-match-trees.o
TEST_BUILTINS_OBJS += test-mergesort.o
@@ -719,6 +725,7 @@ TEST_BUILTINS_OBJS += test-prio-queue.o
TEST_BUILTINS_OBJS += test-read-cache.o
TEST_BUILTINS_OBJS += test-ref-store.o
TEST_BUILTINS_OBJS += test-regex.o
+TEST_BUILTINS_OBJS += test-repository.o
TEST_BUILTINS_OBJS += test-revision-walking.o
TEST_BUILTINS_OBJS += test-run-command.o
TEST_BUILTINS_OBJS += test-scrap-cache-tree.o
@@ -859,6 +866,7 @@ LIB_OBJS += ewah/ewah_bitmap.o
LIB_OBJS += ewah/ewah_io.o
LIB_OBJS += ewah/ewah_rlw.o
LIB_OBJS += exec-cmd.o
+LIB_OBJS += fetch-negotiator.o
LIB_OBJS += fetch-object.o
LIB_OBJS += fetch-pack.o
LIB_OBJS += fsck.o
@@ -871,6 +879,7 @@ LIB_OBJS += hashmap.o
LIB_OBJS += help.o
LIB_OBJS += hex.o
LIB_OBJS += ident.o
+LIB_OBJS += json-writer.o
LIB_OBJS += kwset.o
LIB_OBJS += levenshtein.o
LIB_OBJS += line-log.o
@@ -891,6 +900,8 @@ LIB_OBJS += merge-blobs.o
LIB_OBJS += merge-recursive.o
LIB_OBJS += mergesort.o
LIB_OBJS += name-hash.o
+LIB_OBJS += negotiator/default.o
+LIB_OBJS += negotiator/skipping.o
LIB_OBJS += notes.o
LIB_OBJS += notes-cache.o
LIB_OBJS += notes-merge.o
@@ -2033,7 +2044,7 @@ $(BUILT_INS): git$X
command-list.h: generate-cmdlist.sh command-list.txt
-command-list.h: $(wildcard Documentation/git*.txt)
+command-list.h: $(wildcard Documentation/git*.txt) Documentation/config.txt
$(QUIET_GEN)$(SHELL_PATH) ./generate-cmdlist.sh command-list.txt >$@+ && mv $@+ $@
SCRIPT_DEFINES = $(SHELL_PATH_SQ):$(DIFF_SQ):$(GIT_VERSION):\
@@ -2669,10 +2680,16 @@ check: command-list.h
fi
C_SOURCES = $(patsubst %.o,%.c,$(C_OBJ))
-%.cocci.patch: %.cocci $(C_SOURCES)
+ifdef DC_SHA1_SUBMODULE
+COCCI_SOURCES = $(filter-out sha1collisiondetection/%,$(C_SOURCES))
+else
+COCCI_SOURCES = $(filter-out sha1dc/%,$(C_SOURCES))
+endif
+
+%.cocci.patch: %.cocci $(COCCI_SOURCES)
@echo ' ' SPATCH $<; \
ret=0; \
- for f in $(C_SOURCES); do \
+ for f in $(COCCI_SOURCES); do \
$(SPATCH) --sp-file $< $$f $(SPATCH_FLAGS) || \
{ ret=$$?; break; }; \
done >$@+ 2>$@.log; \
@@ -2686,7 +2703,9 @@ C_SOURCES = $(patsubst %.o,%.c,$(C_OBJ))
then \
echo ' ' SPATCH result: $@; \
fi
-coccicheck: $(patsubst %.cocci,%.cocci.patch,$(wildcard contrib/coccinelle/*.cocci))
+coccicheck: $(addsuffix .patch,$(wildcard contrib/coccinelle/*.cocci))
+
+.PHONY: coccicheck
### Installation rules
@@ -2898,7 +2917,10 @@ profile-clean:
$(RM) $(addsuffix *.gcda,$(addprefix $(PROFILE_DIR)/, $(object_dirs)))
$(RM) $(addsuffix *.gcno,$(addprefix $(PROFILE_DIR)/, $(object_dirs)))
-clean: profile-clean coverage-clean
+cocciclean:
+ $(RM) contrib/coccinelle/*.cocci.patch*
+
+clean: profile-clean coverage-clean cocciclean
$(RM) *.res
$(RM) $(OBJECTS)
$(RM) $(LIB_FILE) $(XDIFF_LIB) $(VCSSVN_LIB)
@@ -2910,7 +2932,6 @@ clean: profile-clean coverage-clean
$(RM) -r $(GIT_TARNAME) .doc-tmp-dir
$(RM) $(GIT_TARNAME).tar.gz git-core_$(GIT_VERSION)-*.tar.gz
$(RM) $(htmldocs).tar.gz $(manpages).tar.gz
- $(RM) contrib/coccinelle/*.cocci.patch*
$(MAKE) -C Documentation/ clean
ifndef NO_PERL
$(MAKE) -C gitweb clean
@@ -2926,7 +2947,7 @@ endif
$(RM) GIT-USER-AGENT GIT-PREFIX
$(RM) GIT-SCRIPT-DEFINES GIT-PERL-DEFINES GIT-PERL-HEADER GIT-PYTHON-VARS
-.PHONY: all install profile-clean clean strip
+.PHONY: all install profile-clean cocciclean clean strip
.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
.PHONY: FORCE cscope
diff --git a/advice.c b/advice.c
index 52aa85b..3561cd6 100644
--- a/advice.c
+++ b/advice.c
@@ -23,6 +23,7 @@ int advice_add_embedded_repo = 1;
int advice_ignored_hook = 1;
int advice_waiting_for_editor = 1;
int advice_graft_file_deprecated = 1;
+int advice_checkout_ambiguous_remote_branch_name = 1;
static int advice_use_color = -1;
static char advice_colors[][COLOR_MAXLEN] = {
@@ -75,6 +76,7 @@ static struct {
{ "ignoredHook", &advice_ignored_hook },
{ "waitingForEditor", &advice_waiting_for_editor },
{ "graftFileDeprecated", &advice_graft_file_deprecated },
+ { "checkoutAmbiguousRemoteBranchName", &advice_checkout_ambiguous_remote_branch_name },
/* make this an alias for backward compatibility */
{ "pushNonFastForward", &advice_push_update_rejected }
diff --git a/advice.h b/advice.h
index 7e93778..ab24df0 100644
--- a/advice.h
+++ b/advice.h
@@ -23,6 +23,7 @@ extern int advice_add_embedded_repo;
extern int advice_ignored_hook;
extern int advice_waiting_for_editor;
extern int advice_graft_file_deprecated;
+extern int advice_checkout_ambiguous_remote_branch_name;
int git_default_advice_config(const char *var, const char *value);
__attribute__((format (printf, 1, 2)))
diff --git a/apply.c b/apply.c
index 49752cd..2594927 100644
--- a/apply.c
+++ b/apply.c
@@ -4093,12 +4093,12 @@ static int build_fake_ancestor(struct apply_state *state, struct patch *list)
return error(_("sha1 information is lacking or useless "
"(%s)."), name);
- ce = make_cache_entry(patch->old_mode, oid.hash, name, 0, 0);
+ ce = make_cache_entry(&result, patch->old_mode, &oid, name, 0, 0);
if (!ce)
return error(_("make_cache_entry failed for path '%s'"),
name);
if (add_index_entry(&result, ce, ADD_CACHE_OK_TO_ADD)) {
- free(ce);
+ discard_cache_entry(ce);
return error(_("could not add %s to temporary index"),
name);
}
@@ -4266,9 +4266,8 @@ static int add_index_file(struct apply_state *state,
struct stat st;
struct cache_entry *ce;
int namelen = strlen(path);
- unsigned ce_size = cache_entry_size(namelen);
- ce = xcalloc(1, ce_size);
+ ce = make_empty_cache_entry(&the_index, namelen);
memcpy(ce->name, path, namelen);
ce->ce_mode = create_ce_mode(mode);
ce->ce_flags = create_ce_flags(0);
@@ -4281,13 +4280,13 @@ static int add_index_file(struct apply_state *state,
if (!skip_prefix(buf, "Subproject commit ", &s) ||
get_oid_hex(s, &ce->oid)) {
- free(ce);
- return error(_("corrupt patch for submodule %s"), path);
+ discard_cache_entry(ce);
+ return error(_("corrupt patch for submodule %s"), path);
}
} else {
if (!state->cached) {
if (lstat(path, &st) < 0) {
- free(ce);
+ discard_cache_entry(ce);
return error_errno(_("unable to stat newly "
"created file '%s'"),
path);
@@ -4295,13 +4294,13 @@ static int add_index_file(struct apply_state *state,
fill_stat_cache_info(ce, &st);
}
if (write_object_file(buf, size, blob_type, &ce->oid) < 0) {
- free(ce);
+ discard_cache_entry(ce);
return error(_("unable to create backing store "
"for newly created file %s"), path);
}
}
if (add_cache_entry(ce, ADD_CACHE_OK_TO_ADD) < 0) {
- free(ce);
+ discard_cache_entry(ce);
return error(_("unable to add cache entry for %s"), path);
}
@@ -4425,27 +4424,26 @@ static int add_conflicted_stages_file(struct apply_state *state,
struct patch *patch)
{
int stage, namelen;
- unsigned ce_size, mode;
+ unsigned mode;
struct cache_entry *ce;
if (!state->update_index)
return 0;
namelen = strlen(patch->new_name);
- ce_size = cache_entry_size(namelen);
mode = patch->new_mode ? patch->new_mode : (S_IFREG | 0644);
remove_file_from_cache(patch->new_name);
for (stage = 1; stage < 4; stage++) {
if (is_null_oid(&patch->threeway_stage[stage - 1]))
continue;
- ce = xcalloc(1, ce_size);
+ ce = make_empty_cache_entry(&the_index, namelen);
memcpy(ce->name, patch->new_name, namelen);
ce->ce_mode = create_ce_mode(mode);
ce->ce_flags = create_ce_flags(stage);
ce->ce_namelen = namelen;
oidcpy(&ce->oid, &patch->threeway_stage[stage - 1]);
if (add_cache_entry(ce, ADD_CACHE_OK_TO_ADD) < 0) {
- free(ce);
+ discard_cache_entry(ce);
return error(_("unable to add cache entry for %s"),
patch->new_name);
}
diff --git a/apply.h b/apply.h
index b80d8ba..01963b5 100644
--- a/apply.h
+++ b/apply.h
@@ -111,14 +111,14 @@ struct apply_state {
int applied_after_fixing_ws;
};
-extern int apply_parse_options(int argc, const char **argv,
- struct apply_state *state,
- int *force_apply, int *options,
- const char * const *apply_usage);
-extern int init_apply_state(struct apply_state *state,
- const char *prefix);
-extern void clear_apply_state(struct apply_state *state);
-extern int check_apply_state(struct apply_state *state, int force_apply);
+int apply_parse_options(int argc, const char **argv,
+ struct apply_state *state,
+ int *force_apply, int *options,
+ const char * const *apply_usage);
+int init_apply_state(struct apply_state *state,
+ const char *prefix);
+void clear_apply_state(struct apply_state *state);
+int check_apply_state(struct apply_state *state, int force_apply);
/*
* Some aspects of the apply behavior are controlled by the following
@@ -127,9 +127,8 @@ extern int check_apply_state(struct apply_state *state, int force_apply);
#define APPLY_OPT_INACCURATE_EOF (1<<0) /* accept inaccurate eof */
#define APPLY_OPT_RECOUNT (1<<1) /* accept inaccurate line count */
-extern int apply_all_patches(struct apply_state *state,
- int argc,
- const char **argv,
- int options);
+int apply_all_patches(struct apply_state *state,
+ int argc, const char **argv,
+ int options);
#endif
diff --git a/archive-tar.c b/archive-tar.c
index 7df8565..0bc50f6 100644
--- a/archive-tar.c
+++ b/archive-tar.c
@@ -122,7 +122,7 @@ static int stream_blocked(const struct object_id *oid)
st = open_istream(oid, &type, &sz, NULL);
if (!st)
- return error("cannot stream blob %s", oid_to_hex(oid));
+ return error(_("cannot stream blob %s"), oid_to_hex(oid));
for (;;) {
readlen = read_istream(st, buf, sizeof(buf));
if (readlen <= 0)
@@ -257,7 +257,7 @@ static int write_tar_entry(struct archiver_args *args,
*header.typeflag = TYPEFLAG_REG;
mode = (mode | ((mode & 0100) ? 0777 : 0666)) & ~tar_umask;
} else {
- return error("unsupported file mode: 0%o (SHA1: %s)",
+ return error(_("unsupported file mode: 0%o (SHA1: %s)"),
mode, oid_to_hex(oid));
}
if (pathlen > sizeof(header.name)) {
@@ -284,7 +284,7 @@ static int write_tar_entry(struct archiver_args *args,
enum object_type type;
buffer = object_file_to_archive(args, path, oid, old_mode, &type, &size);
if (!buffer)
- return error("cannot read %s", oid_to_hex(oid));
+ return error(_("cannot read %s"), oid_to_hex(oid));
} else {
buffer = NULL;
size = 0;
@@ -455,17 +455,17 @@ static int write_tar_filter_archive(const struct archiver *ar,
filter.in = -1;
if (start_command(&filter) < 0)
- die_errno("unable to start '%s' filter", argv[0]);
+ die_errno(_("unable to start '%s' filter"), argv[0]);
close(1);
if (dup2(filter.in, 1) < 0)
- die_errno("unable to redirect descriptor");
+ die_errno(_("unable to redirect descriptor"));
close(filter.in);
r = write_tar_archive(ar, args);
close(1);
if (finish_command(&filter) != 0)
- die("'%s' filter reported error", argv[0]);
+ die(_("'%s' filter reported error"), argv[0]);
strbuf_release(&cmd);
return r;
diff --git a/archive-zip.c b/archive-zip.c
index abc556e..024267f 100644
--- a/archive-zip.c
+++ b/archive-zip.c
@@ -310,11 +310,11 @@ static int write_zip_entry(struct archiver_args *args,
if (is_utf8(path))
flags |= ZIP_UTF8;
else
- warning("Path is not valid UTF-8: %s", path);
+ warning(_("path is not valid UTF-8: %s"), path);
}
if (pathlen > 0xffff) {
- return error("path too long (%d chars, SHA1: %s): %s",
+ return error(_("path too long (%d chars, SHA1: %s): %s"),
(int)pathlen, oid_to_hex(oid), path);
}
@@ -341,7 +341,7 @@ static int write_zip_entry(struct archiver_args *args,
size > big_file_threshold) {
stream = open_istream(oid, &type, &size, NULL);
if (!stream)
- return error("cannot stream blob %s",
+ return error(_("cannot stream blob %s"),
oid_to_hex(oid));
flags |= ZIP_STREAM;
out = buffer = NULL;
@@ -349,7 +349,7 @@ static int write_zip_entry(struct archiver_args *args,
buffer = object_file_to_archive(args, path, oid, mode,
&type, &size);
if (!buffer)
- return error("cannot read %s",
+ return error(_("cannot read %s"),
oid_to_hex(oid));
crc = crc32(crc, buffer, size);
is_binary = entry_is_binary(path_without_prefix,
@@ -358,7 +358,7 @@ static int write_zip_entry(struct archiver_args *args,
}
compressed_size = (method == 0) ? size : 0;
} else {
- return error("unsupported file mode: 0%o (SHA1: %s)", mode,
+ return error(_("unsupported file mode: 0%o (SHA1: %s)"), mode,
oid_to_hex(oid));
}
@@ -467,7 +467,7 @@ static int write_zip_entry(struct archiver_args *args,
zstream.avail_in = readlen;
result = git_deflate(&zstream, 0);
if (result != Z_OK)
- die("deflate error (%d)", result);
+ die(_("deflate error (%d)"), result);
out_len = zstream.next_out - compressed;
if (out_len > 0) {
@@ -602,7 +602,7 @@ static void dos_time(timestamp_t *timestamp, int *dos_date, int *dos_time)
struct tm *t;
if (date_overflows(*timestamp))
- die("timestamp too large for this system: %"PRItime,
+ die(_("timestamp too large for this system: %"PRItime),
*timestamp);
time = (time_t)*timestamp;
t = localtime(&time);
diff --git a/archive.c b/archive.c
index 875dab6..78b0a39 100644
--- a/archive.c
+++ b/archive.c
@@ -380,7 +380,7 @@ static void parse_treeish_arg(const char **argv,
if (get_oid(name, &oid))
die("Not a valid object name");
- commit = lookup_commit_reference_gently(&oid, 1);
+ commit = lookup_commit_reference_gently(the_repository, &oid, 1);
if (commit) {
commit_sha1 = commit->object.oid.hash;
archive_time = commit->date;
diff --git a/attr.h b/attr.h
index 442d464..4634001 100644
--- a/attr.h
+++ b/attr.h
@@ -42,31 +42,31 @@ struct attr_check {
struct attr_stack *stack;
};
-extern struct attr_check *attr_check_alloc(void);
-extern struct attr_check *attr_check_initl(const char *, ...);
-extern struct attr_check *attr_check_dup(const struct attr_check *check);
+struct attr_check *attr_check_alloc(void);
+struct attr_check *attr_check_initl(const char *, ...);
+struct attr_check *attr_check_dup(const struct attr_check *check);
-extern struct attr_check_item *attr_check_append(struct attr_check *check,
- const struct git_attr *attr);
+struct attr_check_item *attr_check_append(struct attr_check *check,
+ const struct git_attr *attr);
-extern void attr_check_reset(struct attr_check *check);
-extern void attr_check_clear(struct attr_check *check);
-extern void attr_check_free(struct attr_check *check);
+void attr_check_reset(struct attr_check *check);
+void attr_check_clear(struct attr_check *check);
+void attr_check_free(struct attr_check *check);
/*
* Return the name of the attribute represented by the argument. The
* return value is a pointer to a null-delimited string that is part
* of the internal data structure; it should not be modified or freed.
*/
-extern const char *git_attr_name(const struct git_attr *);
+const char *git_attr_name(const struct git_attr *);
-extern int git_check_attr(const char *path, struct attr_check *check);
+int git_check_attr(const char *path, struct attr_check *check);
/*
* Retrieve all attributes that apply to the specified path.
* check holds the attributes and their values.
*/
-extern void git_all_attrs(const char *path, struct attr_check *check);
+void git_all_attrs(const char *path, struct attr_check *check);
enum git_attr_direction {
GIT_ATTR_CHECKIN,
@@ -76,6 +76,6 @@ enum git_attr_direction {
void git_attr_set_direction(enum git_attr_direction new_direction,
struct index_state *istate);
-extern void attr_start(void);
+void attr_start(void);
#endif /* ATTR_H */
diff --git a/banned.h b/banned.h
new file mode 100644
index 0000000..28f5937
--- /dev/null
+++ b/banned.h
@@ -0,0 +1,30 @@
+#ifndef BANNED_H
+#define BANNED_H
+
+/*
+ * This header lists functions that have been banned from our code base,
+ * because they're too easy to misuse (and even if used correctly,
+ * complicate audits). Including this header turns them into compile-time
+ * errors.
+ */
+
+#define BANNED(func) sorry_##func##_is_a_banned_function
+
+#undef strcpy
+#define strcpy(x,y) BANNED(strcpy)
+#undef strcat
+#define strcat(x,y) BANNED(strcat)
+#undef strncpy
+#define strncpy(x,y,n) BANNED(strncpy)
+
+#undef sprintf
+#undef vsprintf
+#ifdef HAVE_VARIADIC_MACROS
+#define sprintf(...) BANNED(sprintf)
+#define vsprintf(...) BANNED(vsprintf)
+#else
+#define sprintf(buf,fmt,arg) BANNED(sprintf)
+#define vsprintf(buf,fmt,arg) BANNED(sprintf)
+#endif
+
+#endif /* BANNED_H */
diff --git a/bisect.c b/bisect.c
index 6de1abd..e1275ba 100644
--- a/bisect.c
+++ b/bisect.c
@@ -724,7 +724,7 @@ static int bisect_checkout(const struct object_id *bisect_rev, int no_checkout)
static struct commit *get_commit_reference(const struct object_id *oid)
{
- struct commit *r = lookup_commit_reference(oid);
+ struct commit *r = lookup_commit_reference(the_repository, oid);
if (!r)
die(_("Not a valid commit name %s"), oid_to_hex(oid));
return r;
diff --git a/blame.c b/blame.c
index 0c4490a..58a7036 100644
--- a/blame.c
+++ b/blame.c
@@ -119,7 +119,7 @@ static struct commit_list **append_parent(struct commit_list **tail, const struc
{
struct commit *parent;
- parent = lookup_commit_reference(oid);
+ parent = lookup_commit_reference(the_repository, oid);
if (!parent)
die("no such commit %s", oid_to_hex(oid));
return &commit_list_insert(parent, tail)->next;
@@ -158,7 +158,7 @@ static void set_commit_buffer_from_strbuf(struct commit *c, struct strbuf *sb)
{
size_t len;
void *buf = strbuf_detach(sb, &len);
- set_commit_buffer(c, buf, len);
+ set_commit_buffer(the_repository, c, buf, len);
}
/*
@@ -176,7 +176,7 @@ static struct commit *fake_working_tree_commit(struct diff_options *opt,
struct strbuf buf = STRBUF_INIT;
const char *ident;
time_t now;
- int size, len;
+ int len;
struct cache_entry *ce;
unsigned mode;
struct strbuf msg = STRBUF_INIT;
@@ -274,8 +274,7 @@ static struct commit *fake_working_tree_commit(struct diff_options *opt,
/* Let's not bother reading from HEAD tree */
mode = S_IFREG | 0644;
}
- size = cache_entry_size(len);
- ce = xcalloc(1, size);
+ ce = make_empty_cache_entry(&the_index, len);
oidcpy(&ce->oid, &origin->blob_oid);
memcpy(ce->name, path, len);
ce->ce_flags = create_ce_flags(0);
@@ -1674,7 +1673,7 @@ static struct commit *find_single_final(struct rev_info *revs,
struct object *obj = revs->pending.objects[i].item;
if (obj->flags & UNINTERESTING)
continue;
- obj = deref_tag(obj, NULL, 0);
+ obj = deref_tag(the_repository, obj, NULL, 0);
if (obj->type != OBJ_COMMIT)
die("Non commit %s?", revs->pending.objects[i].name);
if (found)
@@ -1705,14 +1704,15 @@ static struct commit *dwim_reverse_initial(struct rev_info *revs,
/* Is that sole rev a committish? */
obj = revs->pending.objects[0].item;
- obj = deref_tag(obj, NULL, 0);
+ obj = deref_tag(the_repository, obj, NULL, 0);
if (obj->type != OBJ_COMMIT)
return NULL;
/* Do we have HEAD? */
if (!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING, &head_oid, NULL))
return NULL;
- head_commit = lookup_commit_reference_gently(&head_oid, 1);
+ head_commit = lookup_commit_reference_gently(the_repository,
+ &head_oid, 1);
if (!head_commit)
return NULL;
@@ -1740,7 +1740,7 @@ static struct commit *find_single_initial(struct rev_info *revs,
struct object *obj = revs->pending.objects[i].item;
if (!(obj->flags & UNINTERESTING))
continue;
- obj = deref_tag(obj, NULL, 0);
+ obj = deref_tag(the_repository, obj, NULL, 0);
if (obj->type != OBJ_COMMIT)
die("Non commit %s?", revs->pending.objects[i].name);
if (found)
diff --git a/blame.h b/blame.h
index 2092f95..9b5240f 100644
--- a/blame.h
+++ b/blame.h
@@ -159,18 +159,22 @@ static inline struct blame_origin *blame_origin_incref(struct blame_origin *o)
o->refcnt++;
return o;
}
-extern void blame_origin_decref(struct blame_origin *o);
-
-extern void blame_coalesce(struct blame_scoreboard *sb);
-extern void blame_sort_final(struct blame_scoreboard *sb);
-extern unsigned blame_entry_score(struct blame_scoreboard *sb, struct blame_entry *e);
-extern void assign_blame(struct blame_scoreboard *sb, int opt);
-extern const char *blame_nth_line(struct blame_scoreboard *sb, long lno);
-
-extern void init_scoreboard(struct blame_scoreboard *sb);
-extern void setup_scoreboard(struct blame_scoreboard *sb, const char *path, struct blame_origin **orig);
-
-extern struct blame_entry *blame_entry_prepend(struct blame_entry *head, long start, long end, struct blame_origin *o);
+void blame_origin_decref(struct blame_origin *o);
+
+void blame_coalesce(struct blame_scoreboard *sb);
+void blame_sort_final(struct blame_scoreboard *sb);
+unsigned blame_entry_score(struct blame_scoreboard *sb, struct blame_entry *e);
+void assign_blame(struct blame_scoreboard *sb, int opt);
+const char *blame_nth_line(struct blame_scoreboard *sb, long lno);
+
+void init_scoreboard(struct blame_scoreboard *sb);
+void setup_scoreboard(struct blame_scoreboard *sb,
+ const char *path,
+ struct blame_origin **orig);
+
+struct blame_entry *blame_entry_prepend(struct blame_entry *head,
+ long start, long end,
+ struct blame_origin *o);
extern struct blame_origin *get_blame_suspects(struct commit *commit);
diff --git a/blob.c b/blob.c
index 458dafa..342bdbb 100644
--- a/blob.c
+++ b/blob.c
@@ -5,13 +5,13 @@
const char *blob_type = "blob";
-struct blob *lookup_blob(const struct object_id *oid)
+struct blob *lookup_blob(struct repository *r, const struct object_id *oid)
{
- struct object *obj = lookup_object(oid->hash);
+ struct object *obj = lookup_object(r, oid->hash);
if (!obj)
- return create_object(the_repository, oid->hash,
- alloc_blob_node(the_repository));
- return object_as_type(obj, OBJ_BLOB, 0);
+ return create_object(r, oid->hash,
+ alloc_blob_node(r));
+ return object_as_type(r, obj, OBJ_BLOB, 0);
}
int parse_blob_buffer(struct blob *item, void *buffer, unsigned long size)
diff --git a/blob.h b/blob.h
index 4460616..1664872 100644
--- a/blob.h
+++ b/blob.h
@@ -9,7 +9,7 @@ struct blob {
struct object object;
};
-struct blob *lookup_blob(const struct object_id *oid);
+struct blob *lookup_blob(struct repository *r, const struct object_id *oid);
int parse_blob_buffer(struct blob *item, void *buffer, unsigned long size);
diff --git a/branch.c b/branch.c
index 6a35dd3..ecd710d 100644
--- a/branch.c
+++ b/branch.c
@@ -302,7 +302,7 @@ void create_branch(const char *name, const char *start_name,
break;
}
- if ((commit = lookup_commit_reference(&oid)) == NULL)
+ if ((commit = lookup_commit_reference(the_repository, &oid)) == NULL)
die(_("Not a valid branch point: '%s'."), start_name);
oidcpy(&oid, &commit->object.oid);
diff --git a/builtin/add.c b/builtin/add.c
index 8a155dd..ba1ff56 100644
--- a/builtin/add.c
+++ b/builtin/add.c
@@ -304,7 +304,8 @@ static struct option builtin_add_options[] = {
OPT_BOOL( 0 , "refresh", &refresh_only, N_("don't add, only refresh the index")),
OPT_BOOL( 0 , "ignore-errors", &ignore_add_errors, N_("just skip files which cannot be added because of errors")),
OPT_BOOL( 0 , "ignore-missing", &ignore_missing, N_("check if - even missing - files are ignored in dry run")),
- OPT_STRING( 0 , "chmod", &chmod_arg, N_("(+/-)x"), N_("override the executable bit of the listed files")),
+ OPT_STRING(0, "chmod", &chmod_arg, "(+|-)x",
+ N_("override the executable bit of the listed files")),
OPT_HIDDEN_BOOL(0, "warn-embedded-repo", &warn_on_embedded_repo,
N_("warn when adding an embedded repository")),
OPT_END(),
diff --git a/builtin/am.c b/builtin/am.c
index 6273ea5..2c19e69 100644
--- a/builtin/am.c
+++ b/builtin/am.c
@@ -32,6 +32,7 @@
#include "apply.h"
#include "string-list.h"
#include "packfile.h"
+#include "repository.h"
/**
* Returns 1 if the file is empty or does not exist, 0 otherwise.
@@ -1400,9 +1401,10 @@ static void write_index_patch(const struct am_state *state)
FILE *fp;
if (!get_oid_tree("HEAD", &head))
- tree = lookup_tree(&head);
+ tree = lookup_tree(the_repository, &head);
else
- tree = lookup_tree(the_hash_algo->empty_tree);
+ tree = lookup_tree(the_repository,
+ the_repository->hash_algo->empty_tree);
fp = xfopen(am_path(state, "patch"), "w");
init_revisions(&rev_info, NULL);
@@ -1631,7 +1633,8 @@ static void do_commit(const struct am_state *state)
if (!get_oid_commit("HEAD", &parent)) {
old_oid = &parent;
- commit_list_insert(lookup_commit(&parent), &parents);
+ commit_list_insert(lookup_commit(the_repository, &parent),
+ &parents);
} else {
old_oid = NULL;
say(state, stderr, _("applying to an empty history"));
@@ -1763,7 +1766,7 @@ static void am_run(struct am_state *state, int resume)
refresh_and_write_cache();
- if (index_has_changes(&sb)) {
+ if (index_has_changes(&the_index, NULL, &sb)) {
write_state_bool(state, "dirtyindex", 1);
die(_("Dirty index: cannot apply patches (dirty: %s)"), sb.buf);
}
@@ -1820,7 +1823,8 @@ static void am_run(struct am_state *state, int resume)
* Applying the patch to an earlier tree and merging
* the result may have produced the same tree as ours.
*/
- if (!apply_status && !index_has_changes(NULL)) {
+ if (!apply_status &&
+ !index_has_changes(&the_index, NULL, NULL)) {
say(state, stdout, _("No changes -- Patch already applied."));
goto next;
}
@@ -1874,7 +1878,7 @@ static void am_resolve(struct am_state *state)
say(state, stdout, _("Applying: %.*s"), linelen(state->msg), state->msg);
- if (!index_has_changes(NULL)) {
+ if (!index_has_changes(&the_index, NULL, NULL)) {
printf_ln(_("No changes - did you forget to use 'git add'?\n"
"If there is nothing left to stage, chances are that something else\n"
"already introduced the same changes; you might want to skip this patch."));
diff --git a/builtin/blame.c b/builtin/blame.c
index 468b17c..97f6eca 100644
--- a/builtin/blame.c
+++ b/builtin/blame.c
@@ -410,7 +410,7 @@ static void parse_color_fields(const char *s)
}
if (next == EXPECT_COLOR)
- die (_("must end with a color"));
+ die(_("must end with a color"));
colorfield[colorfield_nr].hop = TIME_MAX;
string_list_clear(&l, 0);
@@ -1002,13 +1002,13 @@ parse_done:
nth_line_cb, &sb, lno, anchor,
&bottom, &top, sb.path))
usage(blame_usage);
- if (lno < top || ((lno || bottom) && lno < bottom))
+ if ((!lno && (top || bottom)) || lno < bottom)
die(Q_("file %s has only %lu line",
"file %s has only %lu lines",
lno), path, lno);
if (bottom < 1)
bottom = 1;
- if (top < 1)
+ if (top < 1 || lno < top)
top = lno;
bottom--;
range_set_append_unsafe(&ranges, bottom, top);
diff --git a/builtin/branch.c b/builtin/branch.c
index 0192d4a..4fc55c3 100644
--- a/builtin/branch.c
+++ b/builtin/branch.c
@@ -122,7 +122,8 @@ static int branch_merged(int kind, const char *name,
(reference_name = reference_name_to_free =
resolve_refdup(upstream, RESOLVE_REF_READING,
&oid, NULL)) != NULL)
- reference_rev = lookup_commit_reference(&oid);
+ reference_rev = lookup_commit_reference(the_repository,
+ &oid);
}
if (!reference_rev)
reference_rev = head_rev;
@@ -155,7 +156,7 @@ static int check_branch_commit(const char *branchname, const char *refname,
const struct object_id *oid, struct commit *head_rev,
int kinds, int force)
{
- struct commit *rev = lookup_commit_reference(oid);
+ struct commit *rev = lookup_commit_reference(the_repository, oid);
if (!rev) {
error(_("Couldn't look up commit object for '%s'"), refname);
return -1;
@@ -209,7 +210,7 @@ static int delete_branches(int argc, const char **argv, int force, int kinds,
}
if (!force) {
- head_rev = lookup_commit_reference(&head_oid);
+ head_rev = lookup_commit_reference(the_repository, &head_oid);
if (!head_rev)
die(_("Couldn't look up commit object for HEAD"));
}
diff --git a/builtin/checkout.c b/builtin/checkout.c
index 2862765..cb6bb76 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -23,6 +23,7 @@
#include "resolve-undo.h"
#include "submodule-config.h"
#include "submodule.h"
+#include "advice.h"
static const char * const checkout_usage[] = {
N_("git checkout [<options>] <branch>"),
@@ -78,7 +79,7 @@ static int update_some(const struct object_id *oid, struct strbuf *base,
return READ_TREE_RECURSIVE;
len = base->len + strlen(pathname);
- ce = xcalloc(1, cache_entry_size(len));
+ ce = make_empty_cache_entry(&the_index, len);
oidcpy(&ce->oid, oid);
memcpy(ce->name, base->buf, base->len);
memcpy(ce->name + base->len, pathname, len - base->len);
@@ -97,7 +98,7 @@ static int update_some(const struct object_id *oid, struct strbuf *base,
if (ce->ce_mode == old->ce_mode &&
!oidcmp(&ce->oid, &old->oid)) {
old->ce_flags |= CE_UPDATE;
- free(ce);
+ discard_cache_entry(ce);
return 0;
}
}
@@ -231,11 +232,11 @@ static int checkout_merged(int pos, const struct checkout *state)
if (write_object_file(result_buf.ptr, result_buf.size, blob_type, &oid))
die(_("Unable to add merge result for '%s'"), path);
free(result_buf.ptr);
- ce = make_cache_entry(mode, oid.hash, path, 2, 0);
+ ce = make_transient_cache_entry(mode, &oid, path, 2);
if (!ce)
die(_("make_cache_entry failed for path '%s'"), path);
status = checkout_entry(ce, state, NULL);
- free(ce);
+ discard_cache_entry(ce);
return status;
}
@@ -379,7 +380,7 @@ static int checkout_paths(const struct checkout_opts *opts,
die(_("unable to write new index file"));
read_ref_full("HEAD", 0, &rev, NULL);
- head = lookup_commit_reference_gently(&rev, 1);
+ head = lookup_commit_reference_gently(the_repository, &rev, 1);
errs |= post_checkout_hook(head, head, 0);
return errs;
@@ -830,7 +831,7 @@ static int switch_branches(const struct checkout_opts *opts,
memset(&old_branch_info, 0, sizeof(old_branch_info));
old_branch_info.path = path_to_free = resolve_refdup("HEAD", 0, &rev, &flag);
if (old_branch_info.path)
- old_branch_info.commit = lookup_commit_reference_gently(&rev, 1);
+ old_branch_info.commit = lookup_commit_reference_gently(the_repository, &rev, 1);
if (!(flag & REF_ISSYMREF))
old_branch_info.path = NULL;
@@ -879,7 +880,8 @@ static int parse_branchname_arg(int argc, const char **argv,
int dwim_new_local_branch_ok,
struct branch_info *new_branch_info,
struct checkout_opts *opts,
- struct object_id *rev)
+ struct object_id *rev,
+ int *dwim_remotes_matched)
{
struct tree **source_tree = &opts->source_tree;
const char **new_branch = &opts->new_branch;
@@ -911,8 +913,10 @@ static int parse_branchname_arg(int argc, const char **argv,
* (b) If <something> is _not_ a commit, either "--" is present
* or <something> is not a path, no -t or -b was given, and
* and there is a tracking branch whose name is <something>
- * in one and only one remote, then this is a short-hand to
- * fork local <something> from that remote-tracking branch.
+ * in one and only one remote (or if the branch exists on the
+ * remote named in checkout.defaultRemote), then this is a
+ * short-hand to fork local <something> from that
+ * remote-tracking branch.
*
* (c) Otherwise, if "--" is present, treat it like case (1).
*
@@ -973,7 +977,8 @@ static int parse_branchname_arg(int argc, const char **argv,
recover_with_dwim = 0;
if (recover_with_dwim) {
- const char *remote = unique_tracking_name(arg, rev);
+ const char *remote = unique_tracking_name(arg, rev,
+ dwim_remotes_matched);
if (remote) {
*new_branch = arg;
arg = remote;
@@ -1004,7 +1009,7 @@ static int parse_branchname_arg(int argc, const char **argv,
else
new_branch_info->path = NULL; /* not an existing branch */
- new_branch_info->commit = lookup_commit_reference_gently(rev, 1);
+ new_branch_info->commit = lookup_commit_reference_gently(the_repository, rev, 1);
if (!new_branch_info->commit) {
/* not a commit */
*source_tree = parse_tree_indirect(rev);
@@ -1110,6 +1115,7 @@ int cmd_checkout(int argc, const char **argv, const char *prefix)
struct branch_info new_branch_info;
char *conflict_style = NULL;
int dwim_new_local_branch = 1;
+ int dwim_remotes_matched = 0;
struct option options[] = {
OPT__QUIET(&opts.quiet, N_("suppress progress reporting")),
OPT_STRING('b', NULL, &opts.new_branch, N_("branch"),
@@ -1192,12 +1198,12 @@ int cmd_checkout(int argc, const char **argv, const char *prefix)
if (opts.track != BRANCH_TRACK_UNSPECIFIED && !opts.new_branch) {
const char *argv0 = argv[0];
if (!argc || !strcmp(argv0, "--"))
- die (_("--track needs a branch name"));
+ die(_("--track needs a branch name"));
skip_prefix(argv0, "refs/", &argv0);
skip_prefix(argv0, "remotes/", &argv0);
argv0 = strchr(argv0, '/');
if (!argv0 || !argv0[1])
- die (_("Missing branch name; try -b"));
+ die(_("missing branch name; try -b"));
opts.new_branch = argv0 + 1;
}
@@ -1222,7 +1228,8 @@ int cmd_checkout(int argc, const char **argv, const char *prefix)
opts.track == BRANCH_TRACK_UNSPECIFIED &&
!opts.new_branch;
int n = parse_branchname_arg(argc, argv, dwim_ok,
- &new_branch_info, &opts, &rev);
+ &new_branch_info, &opts, &rev,
+ &dwim_remotes_matched);
argv += n;
argc -= n;
}
@@ -1264,8 +1271,26 @@ int cmd_checkout(int argc, const char **argv, const char *prefix)
}
UNLEAK(opts);
- if (opts.patch_mode || opts.pathspec.nr)
- return checkout_paths(&opts, new_branch_info.name);
- else
+ if (opts.patch_mode || opts.pathspec.nr) {
+ int ret = checkout_paths(&opts, new_branch_info.name);
+ if (ret && dwim_remotes_matched > 1 &&
+ advice_checkout_ambiguous_remote_branch_name)
+ advise(_("'%s' matched more than one remote tracking branch.\n"
+ "We found %d remotes with a reference that matched. So we fell back\n"
+ "on trying to resolve the argument as a path, but failed there too!\n"
+ "\n"
+ "If you meant to check out a remote tracking branch on, e.g. 'origin',\n"
+ "you can do so by fully qualifying the name with the --track option:\n"
+ "\n"
+ " git checkout --track origin/<name>\n"
+ "\n"
+ "If you'd like to always have checkouts of an ambiguous <name> prefer\n"
+ "one remote, e.g. the 'origin' remote, consider setting\n"
+ "checkout.defaultRemote=origin in your config."),
+ argv[0],
+ dwim_remotes_matched);
+ return ret;
+ } else {
return checkout_branch(&opts, &new_branch_info);
+ }
}
diff --git a/builtin/clone.c b/builtin/clone.c
index 5c439f1..fd2c3ef 100644
--- a/builtin/clone.c
+++ b/builtin/clone.c
@@ -696,7 +696,8 @@ static void update_head(const struct ref *our, const struct ref *remote,
install_branch_config(0, head, option_origin, our->name);
}
} else if (our) {
- struct commit *c = lookup_commit_reference(&our->old_oid);
+ struct commit *c = lookup_commit_reference(the_repository,
+ &our->old_oid);
/* --branch specifies a non-branch (i.e. tags), detach HEAD */
update_ref(msg, "HEAD", &c->object.oid, NULL, REF_NO_DEREF,
UPDATE_REFS_DIE_ON_ERR);
@@ -896,7 +897,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
int err = 0, complete_refs_before_fetch = 1;
int submodule_progress;
- struct refspec_item refspec;
+ struct refspec rs = REFSPEC_INIT_FETCH;
+ struct argv_array ref_prefixes = ARGV_ARRAY_INIT;
fetch_if_missing = 0;
@@ -1078,7 +1080,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
if (option_required_reference.nr || option_optional_reference.nr)
setup_reference();
- refspec_item_init_or_die(&refspec, value.buf, REFSPEC_FETCH);
+ refspec_append(&rs, value.buf);
strbuf_reset(&value);
@@ -1135,10 +1137,18 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
if (transport->smart_options && !deepen && !filter_options.choice)
transport->smart_options->check_self_contained_and_connected = 1;
- refs = transport_get_remote_refs(transport, NULL);
+
+ argv_array_push(&ref_prefixes, "HEAD");
+ refspec_ref_prefixes(&rs, &ref_prefixes);
+ if (option_branch)
+ expand_ref_prefix(&ref_prefixes, option_branch);
+ if (!option_no_tags)
+ argv_array_push(&ref_prefixes, "refs/tags/");
+
+ refs = transport_get_remote_refs(transport, &ref_prefixes);
if (refs) {
- mapped_refs = wanted_peer_refs(refs, &refspec);
+ mapped_refs = wanted_peer_refs(refs, &rs.items[0]);
/*
* transport_get_remote_refs() may return refs with null sha-1
* in mapped_refs (see struct transport->get_refs_list
@@ -1156,7 +1166,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
}
if (!is_local && !complete_refs_before_fetch)
- transport_fetch_refs(transport, mapped_refs, NULL);
+ transport_fetch_refs(transport, mapped_refs);
remote_head = find_ref_by_name(refs, "HEAD");
remote_head_points_at =
@@ -1198,7 +1208,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
if (is_local)
clone_local(path, git_dir);
else if (refs && complete_refs_before_fetch)
- transport_fetch_refs(transport, mapped_refs, NULL);
+ transport_fetch_refs(transport, mapped_refs);
update_remote_refs(refs, mapped_refs, remote_head_points_at,
branch_top.buf, reflog_msg.buf, transport,
@@ -1232,6 +1242,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
strbuf_release(&value);
junk_mode = JUNK_LEAVE_ALL;
- refspec_item_clear(&refspec);
+ refspec_clear(&rs);
+ argv_array_clear(&ref_prefixes);
return err;
}
diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c
index 37420ae..0bf0c48 100644
--- a/builtin/commit-graph.c
+++ b/builtin/commit-graph.c
@@ -3,12 +3,19 @@
#include "dir.h"
#include "lockfile.h"
#include "parse-options.h"
+#include "repository.h"
#include "commit-graph.h"
static char const * const builtin_commit_graph_usage[] = {
N_("git commit-graph [--object-dir <objdir>]"),
N_("git commit-graph read [--object-dir <objdir>]"),
- N_("git commit-graph write [--object-dir <objdir>] [--append] [--stdin-packs|--stdin-commits]"),
+ N_("git commit-graph verify [--object-dir <objdir>]"),
+ N_("git commit-graph write [--object-dir <objdir>] [--append] [--reachable|--stdin-packs|--stdin-commits]"),
+ NULL
+};
+
+static const char * const builtin_commit_graph_verify_usage[] = {
+ N_("git commit-graph verify [--object-dir <objdir>]"),
NULL
};
@@ -18,17 +25,48 @@ static const char * const builtin_commit_graph_read_usage[] = {
};
static const char * const builtin_commit_graph_write_usage[] = {
- N_("git commit-graph write [--object-dir <objdir>] [--append] [--stdin-packs|--stdin-commits]"),
+ N_("git commit-graph write [--object-dir <objdir>] [--append] [--reachable|--stdin-packs|--stdin-commits]"),
NULL
};
static struct opts_commit_graph {
const char *obj_dir;
+ int reachable;
int stdin_packs;
int stdin_commits;
int append;
} opts;
+
+static int graph_verify(int argc, const char **argv)
+{
+ struct commit_graph *graph = NULL;
+ char *graph_name;
+
+ static struct option builtin_commit_graph_verify_options[] = {
+ OPT_STRING(0, "object-dir", &opts.obj_dir,
+ N_("dir"),
+ N_("The object directory to store the graph")),
+ OPT_END(),
+ };
+
+ argc = parse_options(argc, argv, NULL,
+ builtin_commit_graph_verify_options,
+ builtin_commit_graph_verify_usage, 0);
+
+ if (!opts.obj_dir)
+ opts.obj_dir = get_object_directory();
+
+ graph_name = get_commit_graph_filename(opts.obj_dir);
+ graph = load_commit_graph_one(graph_name);
+ FREE_AND_NULL(graph_name);
+
+ if (!graph)
+ return 0;
+
+ return verify_commit_graph(the_repository, graph);
+}
+
static int graph_read(int argc, const char **argv)
{
struct commit_graph *graph = NULL;
@@ -51,8 +89,11 @@ static int graph_read(int argc, const char **argv)
graph_name = get_commit_graph_filename(opts.obj_dir);
graph = load_commit_graph_one(graph_name);
- if (!graph)
+ if (!graph) {
+ UNLEAK(graph_name);
die("graph file %s does not exist", graph_name);
+ }
+
FREE_AND_NULL(graph_name);
printf("header: %08x %d %d %d %d\n",
@@ -74,23 +115,23 @@ static int graph_read(int argc, const char **argv)
printf(" large_edges");
printf("\n");
+ free_commit_graph(graph);
+
return 0;
}
static int graph_write(int argc, const char **argv)
{
- const char **pack_indexes = NULL;
- int packs_nr = 0;
- const char **commit_hex = NULL;
- int commits_nr = 0;
- const char **lines = NULL;
- int lines_nr = 0;
- int lines_alloc = 0;
+ struct string_list *pack_indexes = NULL;
+ struct string_list *commit_hex = NULL;
+ struct string_list lines;
static struct option builtin_commit_graph_write_options[] = {
OPT_STRING(0, "object-dir", &opts.obj_dir,
N_("dir"),
N_("The object directory to store the graph")),
+ OPT_BOOL(0, "reachable", &opts.reachable,
+ N_("start walk at all refs")),
OPT_BOOL(0, "stdin-packs", &opts.stdin_packs,
N_("scan pack-indexes listed by stdin for commits")),
OPT_BOOL(0, "stdin-commits", &opts.stdin_commits,
@@ -104,39 +145,35 @@ static int graph_write(int argc, const char **argv)
builtin_commit_graph_write_options,
builtin_commit_graph_write_usage, 0);
- if (opts.stdin_packs && opts.stdin_commits)
- die(_("cannot use both --stdin-commits and --stdin-packs"));
+ if (opts.reachable + opts.stdin_packs + opts.stdin_commits > 1)
+ die(_("use at most one of --reachable, --stdin-commits, or --stdin-packs"));
if (!opts.obj_dir)
opts.obj_dir = get_object_directory();
+ if (opts.reachable) {
+ write_commit_graph_reachable(opts.obj_dir, opts.append);
+ return 0;
+ }
+
+ string_list_init(&lines, 0);
if (opts.stdin_packs || opts.stdin_commits) {
struct strbuf buf = STRBUF_INIT;
- lines_nr = 0;
- lines_alloc = 128;
- ALLOC_ARRAY(lines, lines_alloc);
-
- while (strbuf_getline(&buf, stdin) != EOF) {
- ALLOC_GROW(lines, lines_nr + 1, lines_alloc);
- lines[lines_nr++] = strbuf_detach(&buf, NULL);
- }
-
- if (opts.stdin_packs) {
- pack_indexes = lines;
- packs_nr = lines_nr;
- }
- if (opts.stdin_commits) {
- commit_hex = lines;
- commits_nr = lines_nr;
- }
+
+ while (strbuf_getline(&buf, stdin) != EOF)
+ string_list_append(&lines, strbuf_detach(&buf, NULL));
+
+ if (opts.stdin_packs)
+ pack_indexes = &lines;
+ if (opts.stdin_commits)
+ commit_hex = &lines;
}
write_commit_graph(opts.obj_dir,
pack_indexes,
- packs_nr,
commit_hex,
- commits_nr,
opts.append);
+ string_list_clear(&lines, 0);
return 0;
}
@@ -162,6 +199,8 @@ int cmd_commit_graph(int argc, const char **argv, const char *prefix)
if (argc > 0) {
if (!strcmp(argv[0], "read"))
return graph_read(argc, argv);
+ if (!strcmp(argv[0], "verify"))
+ return graph_verify(argc, argv);
if (!strcmp(argv[0], "write"))
return graph_write(argc, argv);
}
diff --git a/builtin/commit-tree.c b/builtin/commit-tree.c
index 9fbd352..9ec36a8 100644
--- a/builtin/commit-tree.c
+++ b/builtin/commit-tree.c
@@ -6,6 +6,7 @@
#include "cache.h"
#include "config.h"
#include "object-store.h"
+#include "repository.h"
#include "commit.h"
#include "tree.h"
#include "builtin.h"
@@ -60,7 +61,8 @@ int cmd_commit_tree(int argc, const char **argv, const char *prefix)
if (get_oid_commit(argv[i], &oid))
die("Not a valid object name %s", argv[i]);
assert_oid_type(&oid, OBJ_COMMIT);
- new_parent(lookup_commit(&oid), &parents);
+ new_parent(lookup_commit(the_repository, &oid),
+ &parents);
continue;
}
diff --git a/builtin/commit.c b/builtin/commit.c
index 158e3f8..213fca2 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -1647,9 +1647,9 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
unlink(git_path_squash_msg(the_repository));
if (commit_index_files())
- die (_("Repository has been updated, but unable to write\n"
- "new_index file. Check that disk is not full and quota is\n"
- "not exceeded, and then \"git reset HEAD\" to recover."));
+ die(_("repository has been updated, but unable to write\n"
+ "new_index file. Check that disk is not full and quota is\n"
+ "not exceeded, and then \"git reset HEAD\" to recover."));
rerere(0);
run_command_v_opt(argv_gc_auto, RUN_GIT_CMD);
diff --git a/builtin/config.c b/builtin/config.c
index 2c93a28..97b58c4 100644
--- a/builtin/config.c
+++ b/builtin/config.c
@@ -110,7 +110,7 @@ static int option_parse_type(const struct option *opt, const char *arg,
* --int' and '--type=bool
* --type=int'.
*/
- error("only one type at a time.");
+ error(_("only one type at a time"));
usage_builtin_config();
}
*to_type = new_type;
@@ -164,7 +164,11 @@ static NORETURN void usage_builtin_config(void)
static void check_argc(int argc, int min, int max) {
if (argc >= min && argc <= max)
return;
- error("wrong number of arguments");
+ if (min == max)
+ error(_("wrong number of arguments, should be %d"), min);
+ else
+ error(_("wrong number of arguments, should be from %d to %d"),
+ min, max);
usage_builtin_config();
}
@@ -297,7 +301,7 @@ static int get_value(const char *key_, const char *regex_)
key_regexp = (regex_t*)xmalloc(sizeof(regex_t));
if (regcomp(key_regexp, key, REG_EXTENDED)) {
- error("invalid key pattern: %s", key_);
+ error(_("invalid key pattern: %s"), key_);
FREE_AND_NULL(key_regexp);
ret = CONFIG_INVALID_PATTERN;
goto free_strings;
@@ -317,7 +321,7 @@ static int get_value(const char *key_, const char *regex_)
regexp = (regex_t*)xmalloc(sizeof(regex_t));
if (regcomp(regexp, regex_, REG_EXTENDED)) {
- error("invalid pattern: %s", regex_);
+ error(_("invalid pattern: %s"), regex_);
FREE_AND_NULL(regexp);
ret = CONFIG_INVALID_PATTERN;
goto free_strings;
@@ -390,7 +394,7 @@ static char *normalize_value(const char *key, const char *value)
if (type == TYPE_COLOR) {
char v[COLOR_MAXLEN];
if (git_config_color(v, key, value))
- die("cannot parse color '%s'", value);
+ die(_("cannot parse color '%s'"), value);
/*
* The contents of `v` now contain an ANSI escape
@@ -485,13 +489,13 @@ static int get_colorbool(const char *var, int print)
static void check_write(void)
{
if (!given_config_source.file && !startup_info->have_repository)
- die("not in a git directory");
+ die(_("not in a git directory"));
if (given_config_source.use_stdin)
- die("writing to stdin is not supported");
+ die(_("writing to stdin is not supported"));
if (given_config_source.blob)
- die("writing config blobs is not supported");
+ die(_("writing config blobs is not supported"));
}
struct urlmatch_current_candidate_value {
@@ -599,7 +603,7 @@ int cmd_config(int argc, const char **argv, const char *prefix)
if (use_global_config + use_system_config + use_local_config +
!!given_config_source.file + !!given_config_source.blob > 1) {
- error("only one config file at a time.");
+ error(_("only one config file at a time"));
usage_builtin_config();
}
@@ -626,7 +630,7 @@ int cmd_config(int argc, const char **argv, const char *prefix)
* location; error out even if XDG_CONFIG_HOME
* is set and points at a sane location.
*/
- die("$HOME not set");
+ die(_("$HOME not set"));
if (access_or_warn(user_config, R_OK, 0) &&
xdg_config && !access_or_warn(xdg_config, R_OK, 0)) {
@@ -663,12 +667,12 @@ int cmd_config(int argc, const char **argv, const char *prefix)
}
if ((actions & (ACTION_GET_COLOR|ACTION_GET_COLORBOOL)) && type) {
- error("--get-color and variable type are incoherent");
+ error(_("--get-color and variable type are incoherent"));
usage_builtin_config();
}
if (HAS_MULTI_BITS(actions)) {
- error("only one action at a time.");
+ error(_("only one action at a time"));
usage_builtin_config();
}
if (actions == 0)
@@ -681,19 +685,19 @@ int cmd_config(int argc, const char **argv, const char *prefix)
}
if (omit_values &&
!(actions == ACTION_LIST || actions == ACTION_GET_REGEXP)) {
- error("--name-only is only applicable to --list or --get-regexp");
+ error(_("--name-only is only applicable to --list or --get-regexp"));
usage_builtin_config();
}
if (show_origin && !(actions &
(ACTION_GET|ACTION_GET_ALL|ACTION_GET_REGEXP|ACTION_LIST))) {
- error("--show-origin is only applicable to --get, --get-all, "
- "--get-regexp, and --list.");
+ error(_("--show-origin is only applicable to --get, --get-all, "
+ "--get-regexp, and --list"));
usage_builtin_config();
}
if (default_value && !(actions & ACTION_GET)) {
- error("--default is only applicable to --get");
+ error(_("--default is only applicable to --get"));
usage_builtin_config();
}
@@ -706,10 +710,10 @@ int cmd_config(int argc, const char **argv, const char *prefix)
&given_config_source,
&config_options) < 0) {
if (given_config_source.file)
- die_errno("unable to read config file '%s'",
+ die_errno(_("unable to read config file '%s'"),
given_config_source.file);
else
- die("error processing config file(s)");
+ die(_("error processing config file(s)"));
}
}
else if (actions == ACTION_EDIT) {
@@ -717,11 +721,11 @@ int cmd_config(int argc, const char **argv, const char *prefix)
check_argc(argc, 0, 0);
if (!given_config_source.file && nongit)
- die("not in a git directory");
+ die(_("not in a git directory"));
if (given_config_source.use_stdin)
- die("editing stdin is not supported");
+ die(_("editing stdin is not supported"));
if (given_config_source.blob)
- die("editing blobs is not supported");
+ die(_("editing blobs is not supported"));
git_config(git_default_config, NULL);
config_file = given_config_source.file ?
xstrdup(given_config_source.file) :
@@ -822,7 +826,7 @@ int cmd_config(int argc, const char **argv, const char *prefix)
if (ret < 0)
return ret;
if (ret == 0)
- die("No such section!");
+ die(_("no such section: %s"), argv[0]);
}
else if (actions == ACTION_REMOVE_SECTION) {
int ret;
@@ -833,7 +837,7 @@ int cmd_config(int argc, const char **argv, const char *prefix)
if (ret < 0)
return ret;
if (ret == 0)
- die("No such section!");
+ die(_("no such section: %s"), argv[0]);
}
else if (actions == ACTION_GET_COLOR) {
check_argc(argc, 1, 2);
diff --git a/builtin/describe.c b/builtin/describe.c
index 1e87f68..41606c8 100644
--- a/builtin/describe.c
+++ b/builtin/describe.c
@@ -93,13 +93,13 @@ static int replace_name(struct commit_name *e,
struct tag *t;
if (!e->tag) {
- t = lookup_tag(&e->oid);
+ t = lookup_tag(the_repository, &e->oid);
if (!t || parse_tag(t))
return 1;
e->tag = t;
}
- t = lookup_tag(oid);
+ t = lookup_tag(the_repository, oid);
if (!t || parse_tag(t))
return 0;
*tag = t;
@@ -267,7 +267,7 @@ static unsigned long finish_depth_computation(
static void append_name(struct commit_name *n, struct strbuf *dst)
{
if (n->prio == 2 && !n->tag) {
- n->tag = lookup_tag(&n->oid);
+ n->tag = lookup_tag(the_repository, &n->oid);
if (!n->tag || parse_tag(n->tag))
die(_("annotated tag %s not available"), n->path);
}
@@ -303,7 +303,7 @@ static void describe_commit(struct object_id *oid, struct strbuf *dst)
unsigned long seen_commits = 0;
unsigned int unannotated_cnt = 0;
- cmit = lookup_commit_reference(oid);
+ cmit = lookup_commit_reference(the_repository, oid);
n = find_commit_name(&cmit->object.oid);
if (n && (tags || all || n->prio == 2)) {
@@ -331,7 +331,8 @@ static void describe_commit(struct object_id *oid, struct strbuf *dst)
init_commit_names(&commit_names);
n = hashmap_iter_first(&names, &iter);
for (; n; n = hashmap_iter_next(&iter)) {
- c = lookup_commit_reference_gently(&n->peeled, 1);
+ c = lookup_commit_reference_gently(the_repository,
+ &n->peeled, 1);
if (c)
*commit_names_at(&commit_names, c) = n;
}
@@ -509,7 +510,7 @@ static void describe(const char *arg, int last_one)
if (get_oid(arg, &oid))
die(_("Not a valid object name %s"), arg);
- cmit = lookup_commit_reference_gently(&oid, 1);
+ cmit = lookup_commit_reference_gently(the_repository, &oid, 1);
if (cmit)
describe_commit(&oid, &sb);
diff --git a/builtin/diff-tree.c b/builtin/diff-tree.c
index 4736151..91ba670 100644
--- a/builtin/diff-tree.c
+++ b/builtin/diff-tree.c
@@ -5,12 +5,13 @@
#include "log-tree.h"
#include "builtin.h"
#include "submodule.h"
+#include "repository.h"
static struct rev_info log_tree_opt;
static int diff_tree_commit_oid(const struct object_id *oid)
{
- struct commit *commit = lookup_commit_reference(oid);
+ struct commit *commit = lookup_commit_reference(the_repository, oid);
if (!commit)
return -1;
return log_tree_commit(&log_tree_opt, commit);
@@ -24,7 +25,7 @@ static int stdin_diff_commit(struct commit *commit, const char *p)
/* Graft the fake parents locally to the commit */
while (isspace(*p++) && !parse_oid_hex(p, &oid, &p)) {
- struct commit *parent = lookup_commit(&oid);
+ struct commit *parent = lookup_commit(the_repository, &oid);
if (!pptr) {
/* Free the real parent list */
free_commit_list(commit->parents);
@@ -45,7 +46,7 @@ static int stdin_diff_trees(struct tree *tree1, const char *p)
struct tree *tree2;
if (!isspace(*p++) || parse_oid_hex(p, &oid, &p) || *p)
return error("Need exactly two trees, separated by a space");
- tree2 = lookup_tree(&oid);
+ tree2 = lookup_tree(the_repository, &oid);
if (!tree2 || parse_tree(tree2))
return -1;
printf("%s %s\n", oid_to_hex(&tree1->object.oid),
@@ -68,7 +69,7 @@ static int diff_tree_stdin(char *line)
line[len-1] = 0;
if (parse_oid_hex(line, &oid, &p))
return -1;
- obj = parse_object(&oid);
+ obj = parse_object(the_repository, &oid);
if (!obj)
return -1;
if (obj->type == OBJ_COMMIT)
diff --git a/builtin/diff.c b/builtin/diff.c
index b709b6e..361a3c3 100644
--- a/builtin/diff.c
+++ b/builtin/diff.c
@@ -386,7 +386,8 @@ int cmd_diff(int argc, const char **argv, const char *prefix)
add_head_to_pending(&rev);
if (!rev.pending.nr) {
struct tree *tree;
- tree = lookup_tree(the_hash_algo->empty_tree);
+ tree = lookup_tree(the_repository,
+ the_repository->hash_algo->empty_tree);
add_pending_object(&rev, &tree->object, "HEAD");
}
break;
@@ -400,8 +401,8 @@ int cmd_diff(int argc, const char **argv, const char *prefix)
const char *name = entry->name;
int flags = (obj->flags & UNINTERESTING);
if (!obj->parsed)
- obj = parse_object(&obj->oid);
- obj = deref_tag(obj, NULL, 0);
+ obj = parse_object(the_repository, &obj->oid);
+ obj = deref_tag(the_repository, obj, NULL, 0);
if (!obj)
die(_("invalid object '%s' given."), name);
if (obj->type == OBJ_COMMIT)
diff --git a/builtin/difftool.c b/builtin/difftool.c
index 51f6c9c..cdd585c 100644
--- a/builtin/difftool.c
+++ b/builtin/difftool.c
@@ -322,10 +322,10 @@ static int checkout_path(unsigned mode, struct object_id *oid,
struct cache_entry *ce;
int ret;
- ce = make_cache_entry(mode, oid->hash, path, 0, 0);
+ ce = make_transient_cache_entry(mode, oid, path, 0);
ret = checkout_entry(ce, state, NULL);
- free(ce);
+ discard_cache_entry(ce);
return ret;
}
@@ -489,7 +489,7 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
* index.
*/
struct cache_entry *ce2 =
- make_cache_entry(rmode, roid.hash,
+ make_cache_entry(&wtindex, rmode, &roid,
dst_path, 0, 0);
add_index_entry(&wtindex, ce2,
@@ -703,7 +703,7 @@ int cmd_difftool(int argc, const char **argv, const char *prefix)
1, PARSE_OPT_NONEG | PARSE_OPT_HIDDEN),
OPT_BOOL(0, "symlinks", &symlinks,
N_("use symlinks in dir-diff mode")),
- OPT_STRING('t', "tool", &difftool_cmd, N_("<tool>"),
+ OPT_STRING('t', "tool", &difftool_cmd, N_("tool"),
N_("use the specified diff tool")),
OPT_BOOL(0, "tool-help", &tool_help,
N_("print a list of diff tools that may be used with "
@@ -711,7 +711,7 @@ int cmd_difftool(int argc, const char **argv, const char *prefix)
OPT_BOOL(0, "trust-exit-code", &trust_exit_code,
N_("make 'git-difftool' exit when an invoked diff "
"tool returns a non - zero exit code")),
- OPT_STRING('x', "extcmd", &extcmd, N_("<command>"),
+ OPT_STRING('x', "extcmd", &extcmd, N_("command"),
N_("specify a custom command for viewing diffs")),
OPT_END()
};
diff --git a/builtin/fast-export.c b/builtin/fast-export.c
index 9ee6a4d..9bd8a14 100644
--- a/builtin/fast-export.c
+++ b/builtin/fast-export.c
@@ -230,21 +230,22 @@ static void export_blob(const struct object_id *oid)
if (is_null_oid(oid))
return;
- object = lookup_object(oid->hash);
+ object = lookup_object(the_repository, oid->hash);
if (object && object->flags & SHOWN)
return;
if (anonymize) {
buf = anonymize_blob(&size);
- object = (struct object *)lookup_blob(oid);
+ object = (struct object *)lookup_blob(the_repository, oid);
eaten = 0;
} else {
buf = read_object_file(oid, &type, &size);
if (!buf)
- die ("Could not read blob %s", oid_to_hex(oid));
+ die("could not read blob %s", oid_to_hex(oid));
if (check_object_signature(oid, buf, size, type_name(type)) < 0)
die("sha1 mismatch in blob %s", oid_to_hex(oid));
- object = parse_object_buffer(oid, type, size, buf, &eaten);
+ object = parse_object_buffer(the_repository, oid, type,
+ size, buf, &eaten);
}
if (!object)
@@ -254,7 +255,7 @@ static void export_blob(const struct object_id *oid)
printf("blob\nmark :%"PRIu32"\ndata %lu\n", last_idnum, size);
if (size && fwrite(buf, size, 1, stdout) != 1)
- die_errno ("Could not write blob '%s'", oid_to_hex(oid));
+ die_errno("could not write blob '%s'", oid_to_hex(oid));
printf("\n");
show_progress();
@@ -402,7 +403,8 @@ static void show_filemodify(struct diff_queue_struct *q,
anonymize_sha1(&spec->oid) :
spec->oid.hash));
else {
- struct object *object = lookup_object(spec->oid.hash);
+ struct object *object = lookup_object(the_repository,
+ spec->oid.hash);
printf("M %06o :%d ", spec->mode,
get_object_mark(object));
}
@@ -561,14 +563,14 @@ static void handle_commit(struct commit *commit, struct rev_info *rev,
commit_buffer = get_commit_buffer(commit, NULL);
author = strstr(commit_buffer, "\nauthor ");
if (!author)
- die ("Could not find author in commit %s",
- oid_to_hex(&commit->object.oid));
+ die("could not find author in commit %s",
+ oid_to_hex(&commit->object.oid));
author++;
author_end = strchrnul(author, '\n');
committer = strstr(author_end, "\ncommitter ");
if (!committer)
- die ("Could not find committer in commit %s",
- oid_to_hex(&commit->object.oid));
+ die("could not find committer in commit %s",
+ oid_to_hex(&commit->object.oid));
committer++;
committer_end = strchrnul(committer, '\n');
message = strstr(committer_end, "\n\n");
@@ -689,7 +691,7 @@ static void handle_tag(const char *name, struct tag *tag)
buf = read_object_file(&tag->object.oid, &type, &size);
if (!buf)
- die ("Could not read tag %s", oid_to_hex(&tag->object.oid));
+ die("could not read tag %s", oid_to_hex(&tag->object.oid));
message = memmem(buf, size, "\n\n", 2);
if (message) {
message += 2;
@@ -726,18 +728,18 @@ static void handle_tag(const char *name, struct tag *tag)
if (signature)
switch(signed_tag_mode) {
case ABORT:
- die ("Encountered signed tag %s; use "
- "--signed-tags=<mode> to handle it.",
- oid_to_hex(&tag->object.oid));
+ die("encountered signed tag %s; use "
+ "--signed-tags=<mode> to handle it",
+ oid_to_hex(&tag->object.oid));
case WARN:
- warning ("Exporting signed tag %s",
- oid_to_hex(&tag->object.oid));
+ warning("exporting signed tag %s",
+ oid_to_hex(&tag->object.oid));
/* fallthru */
case VERBATIM:
break;
case WARN_STRIP:
- warning ("Stripping signature from tag %s",
- oid_to_hex(&tag->object.oid));
+ warning("stripping signature from tag %s",
+ oid_to_hex(&tag->object.oid));
/* fallthru */
case STRIP:
message_size = signature + 1 - message;
@@ -751,18 +753,18 @@ static void handle_tag(const char *name, struct tag *tag)
if (!tagged_mark) {
switch(tag_of_filtered_mode) {
case ABORT:
- die ("Tag %s tags unexported object; use "
- "--tag-of-filtered-object=<mode> to handle it.",
- oid_to_hex(&tag->object.oid));
+ die("tag %s tags unexported object; use "
+ "--tag-of-filtered-object=<mode> to handle it",
+ oid_to_hex(&tag->object.oid));
case DROP:
/* Ignore this tag altogether */
free(buf);
return;
case REWRITE:
if (tagged->type != OBJ_COMMIT) {
- die ("Tag %s tags unexported %s!",
- oid_to_hex(&tag->object.oid),
- type_name(tagged->type));
+ die("tag %s tags unexported %s!",
+ oid_to_hex(&tag->object.oid),
+ type_name(tagged->type));
}
p = (struct commit *)tagged;
for (;;) {
@@ -773,7 +775,7 @@ static void handle_tag(const char *name, struct tag *tag)
if (!(p->object.flags & TREESAME))
break;
if (!p->parents)
- die ("Can't find replacement commit for tag %s\n",
+ die("can't find replacement commit for tag %s",
oid_to_hex(&tag->object.oid));
p = p->parents->item;
}
@@ -801,7 +803,7 @@ static struct commit *get_commit(struct rev_cmdline_entry *e, char *full_name)
/* handle nested tags */
while (tag && tag->object.type == OBJ_TAG) {
- parse_object(&tag->object.oid);
+ parse_object(the_repository, &tag->object.oid);
string_list_append(&extra_refs, full_name)->util = tag;
tag = (struct tag *)tag->tagged;
}
@@ -961,7 +963,7 @@ static void import_marks(char *input_file)
/* only commits */
continue;
- commit = lookup_commit(&oid);
+ commit = lookup_commit(the_repository, &oid);
if (!commit)
die("not a commit? can't happen: %s", oid_to_hex(&oid));
diff --git a/builtin/fetch.c b/builtin/fetch.c
index ac06f6a..61bec5d 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -64,6 +64,7 @@ static int shown_url = 0;
static struct refspec refmap = REFSPEC_INIT_FETCH;
static struct list_objects_filter_options filter_options;
static struct string_list server_options = STRING_LIST_INIT_DUP;
+static struct string_list negotiation_tip = STRING_LIST_INIT_NODUP;
static int git_fetch_config(const char *k, const char *v, void *cb)
{
@@ -162,6 +163,8 @@ static struct option builtin_fetch_options[] = {
TRANSPORT_FAMILY_IPV4),
OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
TRANSPORT_FAMILY_IPV6),
+ OPT_STRING_LIST(0, "negotiation-tip", &negotiation_tip, N_("revision"),
+ N_("report that we have only objects reachable from this object")),
OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
OPT_END()
};
@@ -672,8 +675,10 @@ static int update_local_ref(struct ref *ref,
return r;
}
- current = lookup_commit_reference_gently(&ref->old_oid, 1);
- updated = lookup_commit_reference_gently(&ref->new_oid, 1);
+ current = lookup_commit_reference_gently(the_repository,
+ &ref->old_oid, 1);
+ updated = lookup_commit_reference_gently(the_repository,
+ &ref->new_oid, 1);
if (!current || !updated) {
const char *msg;
const char *what;
@@ -808,7 +813,8 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
continue;
}
- commit = lookup_commit_reference_gently(&rm->old_oid,
+ commit = lookup_commit_reference_gently(the_repository,
+ &rm->old_oid,
1);
if (!commit)
rm->fetch_head_status = FETCH_HEAD_NOT_FOR_MERGE;
@@ -936,13 +942,11 @@ static int quickfetch(struct ref *ref_map)
return check_connected(iterate_ref_map, &rm, &opt);
}
-static int fetch_refs(struct transport *transport, struct ref *ref_map,
- struct ref **updated_remote_refs)
+static int fetch_refs(struct transport *transport, struct ref *ref_map)
{
int ret = quickfetch(ref_map);
if (ret)
- ret = transport_fetch_refs(transport, ref_map,
- updated_remote_refs);
+ ret = transport_fetch_refs(transport, ref_map);
if (!ret)
/*
* Keep the new pack's ".keep" file around to allow the caller
@@ -1057,6 +1061,40 @@ static void set_option(struct transport *transport, const char *name, const char
name, transport->url);
}
+
+static int add_oid(const char *refname, const struct object_id *oid, int flags,
+ void *cb_data)
+{
+ struct oid_array *oids = cb_data;
+
+ oid_array_append(oids, oid);
+ return 0;
+}
+
+static void add_negotiation_tips(struct git_transport_options *smart_options)
+{
+ struct oid_array *oids = xcalloc(1, sizeof(*oids));
+ int i;
+
+ for (i = 0; i < negotiation_tip.nr; i++) {
+ const char *s = negotiation_tip.items[i].string;
+ int old_nr;
+ if (!has_glob_specials(s)) {
+ struct object_id oid;
+ if (get_oid(s, &oid))
+ die("%s is not a valid object", s);
+ oid_array_append(oids, &oid);
+ continue;
+ }
+ old_nr = oids->nr;
+ for_each_glob_ref(add_oid, s, oids);
+ if (old_nr == oids->nr)
+ warning("Ignoring --negotiation-tip=%s because it does not match any refs",
+ s);
+ }
+ smart_options->negotiation_tips = oids;
+}
+
static struct transport *prepare_transport(struct remote *remote, int deepen)
{
struct transport *transport;
@@ -1083,6 +1121,12 @@ static struct transport *prepare_transport(struct remote *remote, int deepen)
filter_options.filter_spec);
set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
}
+ if (negotiation_tip.nr) {
+ if (transport->smart_options)
+ add_negotiation_tips(transport->smart_options);
+ else
+ warning("Ignoring --negotiation-tip because the protocol does not support it.");
+ }
return transport;
}
@@ -1107,7 +1151,7 @@ static void backfill_tags(struct transport *transport, struct ref *ref_map)
transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, NULL);
transport_set_option(transport, TRANS_OPT_DEPTH, "0");
transport_set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, NULL);
- if (!fetch_refs(transport, ref_map, NULL))
+ if (!fetch_refs(transport, ref_map))
consume_refs(transport, ref_map);
if (gsecondary) {
@@ -1123,7 +1167,6 @@ static int do_fetch(struct transport *transport,
int autotags = (transport->remote->fetch_tags == 1);
int retcode = 0;
const struct ref *remote_refs;
- struct ref *updated_remote_refs = NULL;
struct argv_array ref_prefixes = ARGV_ARRAY_INIT;
if (tags == TAGS_DEFAULT) {
@@ -1146,7 +1189,7 @@ static int do_fetch(struct transport *transport,
refspec_ref_prefixes(&transport->remote->fetch, &ref_prefixes);
if (ref_prefixes.argc &&
- (tags == TAGS_SET || (tags == TAGS_DEFAULT && !rs->nr))) {
+ (tags == TAGS_SET || (tags == TAGS_DEFAULT))) {
argv_array_push(&ref_prefixes, "refs/tags/");
}
@@ -1174,24 +1217,7 @@ static int do_fetch(struct transport *transport,
transport->url);
}
}
-
- if (fetch_refs(transport, ref_map, &updated_remote_refs)) {
- free_refs(ref_map);
- retcode = 1;
- goto cleanup;
- }
- if (updated_remote_refs) {
- /*
- * Regenerate ref_map using the updated remote refs. This is
- * to account for additional information which may be provided
- * by the transport (e.g. shallow info).
- */
- free_refs(ref_map);
- ref_map = get_ref_map(transport->remote, updated_remote_refs, rs,
- tags, &autotags);
- free_refs(updated_remote_refs);
- }
- if (consume_refs(transport, ref_map)) {
+ if (fetch_refs(transport, ref_map) || consume_refs(transport, ref_map)) {
free_refs(ref_map);
retcode = 1;
goto cleanup;
diff --git a/builtin/fmt-merge-msg.c b/builtin/fmt-merge-msg.c
index 1b526ad..f35ff16 100644
--- a/builtin/fmt-merge-msg.c
+++ b/builtin/fmt-merge-msg.c
@@ -11,6 +11,7 @@
#include "branch.h"
#include "fmt-merge-msg.h"
#include "gpg-interface.h"
+#include "repository.h"
static const char * const fmt_merge_msg_usage[] = {
N_("git fmt-merge-msg [-m <message>] [--log[=<n>] | --no-log] [--file <file>]"),
@@ -109,14 +110,15 @@ static int handle_line(char *line, struct merge_parents *merge_parents)
struct string_list_item *item;
int pulling_head = 0;
struct object_id oid;
+ const unsigned hexsz = the_hash_algo->hexsz;
- if (len < GIT_SHA1_HEXSZ + 3 || line[GIT_SHA1_HEXSZ] != '\t')
+ if (len < hexsz + 3 || line[hexsz] != '\t')
return 1;
- if (starts_with(line + GIT_SHA1_HEXSZ + 1, "not-for-merge"))
+ if (starts_with(line + hexsz + 1, "not-for-merge"))
return 0;
- if (line[GIT_SHA1_HEXSZ + 1] != '\t')
+ if (line[hexsz + 1] != '\t')
return 2;
i = get_oid_hex(line, &oid);
@@ -131,7 +133,7 @@ static int handle_line(char *line, struct merge_parents *merge_parents)
if (line[len - 1] == '\n')
line[len - 1] = 0;
- line += GIT_SHA1_HEXSZ + 2;
+ line += hexsz + 2;
/*
* At this point, line points at the beginning of comment e.g.
@@ -343,7 +345,9 @@ static void shortlog(const char *name,
const struct object_id *oid = &origin_data->oid;
int limit = opts->shortlog_len;
- branch = deref_tag(parse_object(oid), oid_to_hex(oid), GIT_SHA1_HEXSZ);
+ branch = deref_tag(the_repository, parse_object(the_repository, oid),
+ oid_to_hex(oid),
+ the_hash_algo->hexsz);
if (!branch || branch->type != OBJ_COMMIT)
return;
@@ -546,6 +550,7 @@ static void find_merge_parents(struct merge_parents *result,
int len;
char *p = in->buf + pos;
char *newline = strchr(p, '\n');
+ const char *q;
struct object_id oid;
struct commit *parent;
struct object *obj;
@@ -553,24 +558,23 @@ static void find_merge_parents(struct merge_parents *result,
len = newline ? newline - p : strlen(p);
pos += len + !!newline;
- if (len < GIT_SHA1_HEXSZ + 3 ||
- get_oid_hex(p, &oid) ||
- p[GIT_SHA1_HEXSZ] != '\t' ||
- p[GIT_SHA1_HEXSZ + 1] != '\t')
+ if (parse_oid_hex(p, &oid, &q) ||
+ q[0] != '\t' ||
+ q[1] != '\t')
continue; /* skip not-for-merge */
/*
* Do not use get_merge_parent() here; we do not have
* "name" here and we do not want to contaminate its
* util field yet.
*/
- obj = parse_object(&oid);
+ obj = parse_object(the_repository, &oid);
parent = (struct commit *)peel_to_type(NULL, 0, obj, OBJ_COMMIT);
if (!parent)
continue;
commit_list_insert(parent, &parents);
add_merge_parent(result, &obj->oid, &parent->object.oid);
}
- head_commit = lookup_commit(head);
+ head_commit = lookup_commit(the_repository, head);
if (head_commit)
commit_list_insert(head_commit, &parents);
reduce_heads_replace(&parents);
@@ -624,7 +628,7 @@ int fmt_merge_msg(struct strbuf *in, struct strbuf *out,
i++;
p[len] = 0;
if (handle_line(p, &merge_parents))
- die ("Error in line %d: %.*s", i, len, p);
+ die("error in line %d: %.*s", i, len, p);
}
if (opts->add_title && srcs.nr)
diff --git a/builtin/fsck.c b/builtin/fsck.c
index 3ad4f16..250f5af 100644
--- a/builtin/fsck.c
+++ b/builtin/fsck.c
@@ -18,6 +18,7 @@
#include "decorate.h"
#include "packfile.h"
#include "object-store.h"
+#include "run-command.h"
#define REACHABLE 0x0001
#define SEEN 0x0002
@@ -47,6 +48,7 @@ static int name_objects;
#define ERROR_REACHABLE 02
#define ERROR_PACK 04
#define ERROR_REFS 010
+#define ERROR_COMMIT_GRAPH 020
static const char *describe_object(struct object *obj)
{
@@ -70,7 +72,7 @@ static const char *printable_type(struct object *obj)
enum object_type type = oid_object_info(the_repository,
&obj->oid, NULL);
if (type > 0)
- object_as_type(obj, type, 0);
+ object_as_type(the_repository, obj, type, 0);
}
ret = type_name(obj->type);
@@ -392,7 +394,8 @@ static int fsck_obj_buffer(const struct object_id *oid, enum object_type type,
* verify_packfile(), data_valid variable for details.
*/
struct object *obj;
- obj = parse_object_buffer(oid, type, size, buffer, eaten);
+ obj = parse_object_buffer(the_repository, oid, type, size, buffer,
+ eaten);
if (!obj) {
errors_found |= ERROR_OBJECT;
return error("%s: object corrupt or missing", oid_to_hex(oid));
@@ -410,7 +413,7 @@ static void fsck_handle_reflog_oid(const char *refname, struct object_id *oid,
struct object *obj;
if (!is_null_oid(oid)) {
- obj = lookup_object(oid->hash);
+ obj = lookup_object(the_repository, oid->hash);
if (obj && (obj->flags & HAS_OBJ)) {
if (timestamp && name_objects)
add_decoration(fsck_walk_options.object_names,
@@ -452,7 +455,7 @@ static int fsck_handle_ref(const char *refname, const struct object_id *oid,
{
struct object *obj;
- obj = parse_object(oid);
+ obj = parse_object(the_repository, oid);
if (!obj) {
if (is_promisor_object(oid)) {
/*
@@ -525,7 +528,9 @@ static int fsck_loose(const struct object_id *oid, const char *path, void *data)
if (!contents && type != OBJ_BLOB)
BUG("read_loose_object streamed a non-blob");
- obj = parse_object_buffer(oid, type, size, contents, &eaten);
+ obj = parse_object_buffer(the_repository, oid, type, size,
+ contents, &eaten);
+
if (!obj) {
errors_found |= ERROR_OBJECT;
error("%s: object could not be parsed: %s",
@@ -614,7 +619,7 @@ static int fsck_cache_tree(struct cache_tree *it)
fprintf(stderr, "Checking cache tree\n");
if (0 <= it->entry_count) {
- struct object *obj = parse_object(&it->oid);
+ struct object *obj = parse_object(the_repository, &it->oid);
if (!obj) {
error("%s: invalid sha1 pointer in cache-tree",
oid_to_hex(&it->oid));
@@ -689,7 +694,7 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
fetch_if_missing = 0;
errors_found = 0;
- check_replace_refs = 0;
+ read_replace_refs = 0;
argc = parse_options(argc, argv, prefix, fsck_opts, fsck_usage, 0);
@@ -763,7 +768,8 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
const char *arg = argv[i];
struct object_id oid;
if (!get_oid(arg, &oid)) {
- struct object *obj = lookup_object(oid.hash);
+ struct object *obj = lookup_object(the_repository,
+ oid.hash);
if (!obj || !(obj->flags & HAS_OBJ)) {
if (is_promisor_object(&oid))
@@ -806,7 +812,8 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
mode = active_cache[i]->ce_mode;
if (S_ISGITLINK(mode))
continue;
- blob = lookup_blob(&active_cache[i]->oid);
+ blob = lookup_blob(the_repository,
+ &active_cache[i]->oid);
if (!blob)
continue;
obj = &blob->object;
@@ -822,5 +829,24 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
}
check_connectivity();
+
+ if (!git_config_get_bool("core.commitgraph", &i) && i) {
+ struct child_process commit_graph_verify = CHILD_PROCESS_INIT;
+ const char *verify_argv[] = { "commit-graph", "verify", NULL, NULL, NULL };
+
+ commit_graph_verify.argv = verify_argv;
+ commit_graph_verify.git_cmd = 1;
+ if (run_command(&commit_graph_verify))
+ errors_found |= ERROR_COMMIT_GRAPH;
+
+ prepare_alt_odb(the_repository);
+ for (alt = the_repository->objects->alt_odb_list; alt; alt = alt->next) {
+ verify_argv[2] = "--object-dir";
+ verify_argv[3] = alt->path;
+ if (run_command(&commit_graph_verify))
+ errors_found |= ERROR_COMMIT_GRAPH;
+ }
+ }
+
return errors_found;
}
diff --git a/builtin/gc.c b/builtin/gc.c
index ccfb1ce..5706944 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -20,6 +20,7 @@
#include "sigchain.h"
#include "argv-array.h"
#include "commit.h"
+#include "commit-graph.h"
#include "packfile.h"
#include "object-store.h"
#include "pack.h"
@@ -40,6 +41,7 @@ static int aggressive_depth = 50;
static int aggressive_window = 250;
static int gc_auto_threshold = 6700;
static int gc_auto_pack_limit = 50;
+static int gc_write_commit_graph;
static int detach_auto = 1;
static timestamp_t gc_log_expire_time;
static const char *gc_log_expire = "1.day.ago";
@@ -129,6 +131,7 @@ static void gc_config(void)
git_config_get_int("gc.aggressivedepth", &aggressive_depth);
git_config_get_int("gc.auto", &gc_auto_threshold);
git_config_get_int("gc.autopacklimit", &gc_auto_pack_limit);
+ git_config_get_bool("gc.writecommitgraph", &gc_write_commit_graph);
git_config_get_bool("gc.autodetach", &detach_auto);
git_config_get_expiry("gc.pruneexpire", &prune_expire);
git_config_get_expiry("gc.worktreepruneexpire", &prune_worktrees_expire);
@@ -612,6 +615,7 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
return -1;
if (!repository_format_precious_objects) {
+ close_all_packs(the_repository->objects);
if (run_command_v_opt(repack.argv, RUN_GIT_CMD))
return error(FAILED_RUN, repack.argv[0]);
@@ -641,6 +645,9 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
if (pack_garbage.nr > 0)
clean_pack_garbage();
+ if (gc_write_commit_graph)
+ write_commit_graph_reachable(get_object_directory(), 0);
+
if (auto_gc && too_many_loose_objects())
warning(_("There are too many unreachable loose objects; "
"run 'git prune' to remove them."));
diff --git a/builtin/grep.c b/builtin/grep.c
index 61bcaf6..ee5a1bd 100644
--- a/builtin/grep.c
+++ b/builtin/grep.c
@@ -489,7 +489,7 @@ static int grep_cache(struct grep_opt *opt, struct repository *repo,
}
if (repo_read_index(repo) < 0)
- die("index file corrupt");
+ die(_("index file corrupt"));
for (nr = 0; nr < repo->index->cache_nr; nr++) {
const struct cache_entry *ce = repo->index->cache[nr];
@@ -647,7 +647,8 @@ static int grep_objects(struct grep_opt *opt, const struct pathspec *pathspec,
for (i = 0; i < nr; i++) {
struct object *real_obj;
- real_obj = deref_tag(list->objects[i].item, NULL, 0);
+ real_obj = deref_tag(the_repository, list->objects[i].item,
+ NULL, 0);
/* load the gitmodules file for this rev */
if (recurse_submodules) {
@@ -843,6 +844,8 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
OPT_BOOL_F('z', "null", &opt.null_following_name,
N_("print NUL after filenames"),
PARSE_OPT_NOCOMPLETE),
+ OPT_BOOL('o', "only-matching", &opt.only_matching,
+ N_("show only matching parts of a line")),
OPT_BOOL('c', "count", &opt.count,
N_("show the number of matches instead of matching lines")),
OPT__COLOR(&opt.color, N_("highlight matches")),
@@ -960,7 +963,11 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
}
if (!opt.pattern_list)
- die(_("no pattern given."));
+ die(_("no pattern given"));
+
+ /* --only-matching has no effect with --invert. */
+ if (opt.invert)
+ opt.only_matching = 0;
/*
* We have to find "--" in a separate pass, because its presence
@@ -1086,19 +1093,19 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
}
if (recurse_submodules && (!use_index || untracked))
- die(_("option not supported with --recurse-submodules."));
+ die(_("option not supported with --recurse-submodules"));
if (!show_in_pager && !opt.status_only)
setup_pager();
if (!use_index && (untracked || cached))
- die(_("--cached or --untracked cannot be used with --no-index."));
+ die(_("--cached or --untracked cannot be used with --no-index"));
if (!use_index || untracked) {
int use_exclude = (opt_exclude < 0) ? use_index : !!opt_exclude;
hit = grep_directory(&opt, &pathspec, use_exclude, use_index);
} else if (0 <= opt_exclude) {
- die(_("--[no-]exclude-standard cannot be used for tracked contents."));
+ die(_("--[no-]exclude-standard cannot be used for tracked contents"));
} else if (!list.nr) {
if (!cached)
setup_work_tree();
@@ -1106,7 +1113,7 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
hit = grep_cache(&opt, the_repository, &pathspec, cached);
} else {
if (cached)
- die(_("both --cached and trees are given."));
+ die(_("both --cached and trees are given"));
hit = grep_objects(&opt, &pathspec, &list);
}
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index 74fe297..9582ead 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -832,7 +832,7 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
if (strict || do_fsck_object) {
read_lock();
if (type == OBJ_BLOB) {
- struct blob *blob = lookup_blob(oid);
+ struct blob *blob = lookup_blob(the_repository, oid);
if (blob)
blob->object.flags |= FLAG_CHECKED;
else
@@ -851,7 +851,8 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
* we do not need to free the memory here, as the
* buf is deleted by the caller.
*/
- obj = parse_object_buffer(oid, type, size, buf,
+ obj = parse_object_buffer(the_repository, oid, type,
+ size, buf,
&eaten);
if (!obj)
die(_("invalid %s"), type_name(type));
@@ -1679,7 +1680,7 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
if (argc == 2 && !strcmp(argv[1], "-h"))
usage(index_pack_usage);
- check_replace_refs = 0;
+ read_replace_refs = 0;
fsck_options.walk = mark_link;
reset_pack_idx_option(&opts);
diff --git a/builtin/init-db.c b/builtin/init-db.c
index 4ecf909..12ddda7 100644
--- a/builtin/init-db.c
+++ b/builtin/init-db.c
@@ -73,7 +73,8 @@ static void copy_templates_1(struct strbuf *path, struct strbuf *template_path,
continue;
else if (S_ISLNK(st_template.st_mode)) {
struct strbuf lnk = STRBUF_INIT;
- if (strbuf_readlink(&lnk, template_path->buf, 0) < 0)
+ if (strbuf_readlink(&lnk, template_path->buf,
+ st_template.st_size) < 0)
die_errno(_("cannot readlink '%s'"), template_path->buf);
if (symlink(lnk.buf, path->buf))
die_errno(_("cannot symlink '%s' '%s'"),
diff --git a/builtin/log.c b/builtin/log.c
index 805f89d..e094560 100644
--- a/builtin/log.c
+++ b/builtin/log.c
@@ -30,6 +30,7 @@
#include "gpg-interface.h"
#include "progress.h"
#include "commit-slab.h"
+#include "repository.h"
#define MAIL_DEFAULT_WRAP 72
@@ -619,7 +620,7 @@ int cmd_show(int argc, const char **argv, const char *prefix)
rev.shown_one = 1;
if (ret)
break;
- o = parse_object(&t->tagged->oid);
+ o = parse_object(the_repository, &t->tagged->oid);
if (!o)
ret = error(_("Could not read object %s"),
oid_to_hex(&t->tagged->oid));
@@ -906,8 +907,8 @@ static void get_patch_ids(struct rev_info *rev, struct patch_ids *ids)
o2 = rev->pending.objects[1].item;
flags1 = o1->flags;
flags2 = o2->flags;
- c1 = lookup_commit_reference(&o1->oid);
- c2 = lookup_commit_reference(&o2->oid);
+ c1 = lookup_commit_reference(the_repository, &o1->oid);
+ c2 = lookup_commit_reference(the_repository, &o2->oid);
if ((flags1 & UNINTERESTING) == (flags2 & UNINTERESTING))
die(_("Not a range."));
@@ -1607,14 +1608,14 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
numbered = 0;
if (numbered && keep_subject)
- die (_("-n and -k are mutually exclusive."));
+ die(_("-n and -k are mutually exclusive"));
if (keep_subject && subject_prefix)
- die (_("--subject-prefix/--rfc and -k are mutually exclusive."));
+ die(_("--subject-prefix/--rfc and -k are mutually exclusive"));
rev.preserve_subject = keep_subject;
argc = setup_revisions(argc, argv, &rev, &s_r_opt);
if (argc > 1)
- die (_("unrecognized argument: %s"), argv[1]);
+ die(_("unrecognized argument: %s"), argv[1]);
if (rev.diffopt.output_format & DIFF_FORMAT_NAME)
die(_("--name-only does not make sense"));
@@ -1864,7 +1865,8 @@ static int add_pending_commit(const char *arg, struct rev_info *revs, int flags)
{
struct object_id oid;
if (get_oid(arg, &oid) == 0) {
- struct commit *commit = lookup_commit_reference(&oid);
+ struct commit *commit = lookup_commit_reference(the_repository,
+ &oid);
if (commit) {
commit->object.flags |= flags;
add_pending_object(revs, &commit->object, arg);
diff --git a/builtin/merge-base.c b/builtin/merge-base.c
index 3b76001..08d91b1 100644
--- a/builtin/merge-base.c
+++ b/builtin/merge-base.c
@@ -6,6 +6,7 @@
#include "diff.h"
#include "revision.h"
#include "parse-options.h"
+#include "repository.h"
static int show_merge_base(struct commit **rev, int rev_nr, int show_all)
{
@@ -42,7 +43,7 @@ static struct commit *get_commit_reference(const char *arg)
if (get_oid(arg, &revkey))
die("Not a valid object name %s", arg);
- r = lookup_commit_reference(&revkey);
+ r = lookup_commit_reference(the_repository, &revkey);
if (!r)
die("Not a valid commit name %s", arg);
@@ -123,7 +124,7 @@ static void add_one_commit(struct object_id *oid, struct rev_collect *revs)
if (is_null_oid(oid))
return;
- commit = lookup_commit(oid);
+ commit = lookup_commit(the_repository, oid);
if (!commit ||
(commit->object.flags & TMP_MARK) ||
parse_commit(commit))
@@ -171,7 +172,7 @@ static int handle_fork_point(int argc, const char **argv)
if (get_oid(commitname, &oid))
die("Not a valid object name: '%s'", commitname);
- derived = lookup_commit_reference(&oid);
+ derived = lookup_commit_reference(the_repository, &oid);
memset(&revs, 0, sizeof(revs));
revs.initial = 1;
for_each_reflog_ent(refname, collect_one_reflog_ent, &revs);
diff --git a/builtin/merge-recursive.c b/builtin/merge-recursive.c
index 0dd9021..9b2f707 100644
--- a/builtin/merge-recursive.c
+++ b/builtin/merge-recursive.c
@@ -9,10 +9,10 @@ static const char builtin_merge_recursive_usage[] =
static const char *better_branch_name(const char *branch)
{
- static char githead_env[8 + GIT_SHA1_HEXSZ + 1];
+ static char githead_env[8 + GIT_MAX_HEXSZ + 1];
char *name;
- if (strlen(branch) != GIT_SHA1_HEXSZ)
+ if (strlen(branch) != the_hash_algo->hexsz)
return branch;
xsnprintf(githead_env, sizeof(githead_env), "GITHEAD_%s", branch);
name = getenv(githead_env);
diff --git a/builtin/merge-tree.c b/builtin/merge-tree.c
index 8a8d579..f8023ba 100644
--- a/builtin/merge-tree.c
+++ b/builtin/merge-tree.c
@@ -2,6 +2,7 @@
#include "tree-walk.h"
#include "xdiff-interface.h"
#include "object-store.h"
+#include "repository.h"
#include "blob.h"
#include "exec-cmd.h"
#include "merge-blobs.h"
@@ -170,7 +171,7 @@ static struct merge_list *create_entry(unsigned stage, unsigned mode, const stru
res->stage = stage;
res->path = path;
res->mode = mode;
- res->blob = lookup_blob(oid);
+ res->blob = lookup_blob(the_repository, oid);
return res;
}
diff --git a/builtin/merge.c b/builtin/merge.c
index d1b547d..8f4a506 100644
--- a/builtin/merge.c
+++ b/builtin/merge.c
@@ -111,6 +111,35 @@ static int option_parse_message(const struct option *opt,
return 0;
}
+static int option_read_message(struct parse_opt_ctx_t *ctx,
+ const struct option *opt, int unset)
+{
+ struct strbuf *buf = opt->value;
+ const char *arg;
+
+ if (unset)
+ BUG("-F cannot be negated");
+
+ if (ctx->opt) {
+ arg = ctx->opt;
+ ctx->opt = NULL;
+ } else if (ctx->argc > 1) {
+ ctx->argc--;
+ arg = *++ctx->argv;
+ } else
+ return opterror(opt, "requires a value", 0);
+
+ if (buf->len)
+ strbuf_addch(buf, '\n');
+ if (ctx->prefix && !is_absolute_path(arg))
+ arg = prefix_filename(ctx->prefix, arg);
+ if (strbuf_read_file(buf, arg, 0) < 0)
+ return error(_("could not read file '%s'"), arg);
+ have_message = 1;
+
+ return 0;
+}
+
static struct strategy *get_strategy(const char *name)
{
int i;
@@ -228,6 +257,9 @@ static struct option builtin_merge_options[] = {
OPT_CALLBACK('m', "message", &merge_msg, N_("message"),
N_("merge commit message (for a non-fast-forward merge)"),
option_parse_message),
+ { OPTION_LOWLEVEL_CALLBACK, 'F', "file", &merge_msg, N_("path"),
+ N_("read message from file"), PARSE_OPT_NONEG,
+ (parse_opt_cb *) option_read_message },
OPT__VERBOSITY(&verbosity),
OPT_BOOL(0, "abort", &abort_current_merge,
N_("abort the current in-progress merge")),
@@ -693,7 +725,7 @@ static int try_merge_strategy(const char *strategy, struct commit_list *common,
exit(128);
if (write_locked_index(&the_index, &lock,
COMMIT_LOCK | SKIP_IF_UNCHANGED))
- die (_("unable to write %s"), get_index_file());
+ die(_("unable to write %s"), get_index_file());
return clean ? 0 : 1;
} else {
return try_merge_command(strategy, xopts_nr, xopts,
@@ -1035,6 +1067,7 @@ static void handle_fetch_head(struct commit_list **remotes, struct strbuf *merge
const char *filename;
int fd, pos, npos;
struct strbuf fetch_head_file = STRBUF_INIT;
+ const unsigned hexsz = the_hash_algo->hexsz;
if (!merge_names)
merge_names = &fetch_head_file;
@@ -1060,16 +1093,16 @@ static void handle_fetch_head(struct commit_list **remotes, struct strbuf *merge
else
npos = merge_names->len;
- if (npos - pos < GIT_SHA1_HEXSZ + 2 ||
+ if (npos - pos < hexsz + 2 ||
get_oid_hex(merge_names->buf + pos, &oid))
commit = NULL; /* bad */
- else if (memcmp(merge_names->buf + pos + GIT_SHA1_HEXSZ, "\t\t", 2))
+ else if (memcmp(merge_names->buf + pos + hexsz, "\t\t", 2))
continue; /* not-for-merge */
else {
- char saved = merge_names->buf[pos + GIT_SHA1_HEXSZ];
- merge_names->buf[pos + GIT_SHA1_HEXSZ] = '\0';
+ char saved = merge_names->buf[pos + hexsz];
+ merge_names->buf[pos + hexsz] = '\0';
commit = get_merge_parent(merge_names->buf + pos);
- merge_names->buf[pos + GIT_SHA1_HEXSZ] = saved;
+ merge_names->buf[pos + hexsz] = saved;
}
if (!commit) {
if (ptr)
diff --git a/builtin/name-rev.c b/builtin/name-rev.c
index 0eb4403..f1cb45c 100644
--- a/builtin/name-rev.c
+++ b/builtin/name-rev.c
@@ -1,5 +1,6 @@
#include "builtin.h"
#include "cache.h"
+#include "repository.h"
#include "config.h"
#include "commit.h"
#include "tag.h"
@@ -203,7 +204,7 @@ static int tipcmp(const void *a_, const void *b_)
static int name_ref(const char *path, const struct object_id *oid, int flags, void *cb_data)
{
- struct object *o = parse_object(oid);
+ struct object *o = parse_object(the_repository, oid);
struct name_ref_data *data = cb_data;
int can_abbreviate_output = data->tags_only && data->name_only;
int deref = 0;
@@ -261,7 +262,7 @@ static int name_ref(const char *path, const struct object_id *oid, int flags, vo
struct tag *t = (struct tag *) o;
if (!t->tagged)
break; /* broken repository */
- o = parse_object(&t->tagged->oid);
+ o = parse_object(the_repository, &t->tagged->oid);
deref = 1;
taggerdate = t->date;
}
@@ -378,7 +379,8 @@ static void name_rev_line(char *p, struct name_ref_data *data)
*(p+1) = 0;
if (!get_oid(p - (GIT_SHA1_HEXSZ - 1), &oid)) {
struct object *o =
- lookup_object(oid.hash);
+ lookup_object(the_repository,
+ oid.hash);
if (o)
name = get_rev_name(o, &buf);
}
@@ -451,9 +453,10 @@ int cmd_name_rev(int argc, const char **argv, const char *prefix)
}
commit = NULL;
- object = parse_object(&oid);
+ object = parse_object(the_repository, &oid);
if (object) {
- struct object *peeled = deref_tag(object, *argv, 0);
+ struct object *peeled = deref_tag(the_repository,
+ object, *argv, 0);
if (peeled && peeled->type == OBJ_COMMIT)
commit = (struct commit *)peeled;
}
diff --git a/builtin/notes.c b/builtin/notes.c
index a0a1840..c05cd00 100644
--- a/builtin/notes.c
+++ b/builtin/notes.c
@@ -12,6 +12,7 @@
#include "builtin.h"
#include "notes.h"
#include "object-store.h"
+#include "repository.h"
#include "blob.h"
#include "pretty.h"
#include "refs.h"
@@ -711,7 +712,7 @@ static int merge_commit(struct notes_merge_options *o)
if (get_oid("NOTES_MERGE_PARTIAL", &oid))
die(_("failed to read ref NOTES_MERGE_PARTIAL"));
- else if (!(partial = lookup_commit_reference(&oid)))
+ else if (!(partial = lookup_commit_reference(the_repository, &oid)))
die(_("could not find commit from NOTES_MERGE_PARTIAL."));
else if (parse_commit(partial))
die(_("could not parse commit from NOTES_MERGE_PARTIAL."));
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index ebc8cef..c0741ba 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -140,7 +140,7 @@ static void *get_delta(struct object_entry *entry)
buf = read_object_file(&entry->idx.oid, &type, &size);
if (!buf)
- die("unable to read %s", oid_to_hex(&entry->idx.oid));
+ die(_("unable to read %s"), oid_to_hex(&entry->idx.oid));
base_buf = read_object_file(&DELTA(entry)->idx.oid, &type,
&base_size);
if (!base_buf)
@@ -148,8 +148,13 @@ static void *get_delta(struct object_entry *entry)
oid_to_hex(&DELTA(entry)->idx.oid));
delta_buf = diff_delta(base_buf, base_size,
buf, size, &delta_size, 0);
+ /*
+ * We succesfully computed this delta once but dropped it for
+ * memory reasons. Something is very wrong if this time we
+ * recompute and create a different delta.
+ */
if (!delta_buf || delta_size != DELTA_SIZE(entry))
- die("delta size changed");
+ BUG("delta size changed");
free(buf);
free(base_buf);
return delta_buf;
@@ -406,7 +411,7 @@ static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
datalen = revidx[1].offset - offset;
if (!pack_to_stdout && p->index_version > 1 &&
check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
- error("bad packed object CRC for %s",
+ error(_("bad packed object CRC for %s"),
oid_to_hex(&entry->idx.oid));
unuse_pack(&w_curs);
return write_no_reuse_object(f, entry, limit, usable_delta);
@@ -417,7 +422,7 @@ static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
if (!pack_to_stdout && p->index_version == 1 &&
check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) {
- error("corrupt packed object for %s",
+ error(_("corrupt packed object for %s"),
oid_to_hex(&entry->idx.oid));
unuse_pack(&w_curs);
return write_no_reuse_object(f, entry, limit, usable_delta);
@@ -548,7 +553,7 @@ static enum write_one_status write_one(struct hashfile *f,
*/
recursing = (e->idx.offset == 1);
if (recursing) {
- warning("recursive delta detected for object %s",
+ warning(_("recursive delta detected for object %s"),
oid_to_hex(&e->idx.oid));
return WRITE_ONE_RECURSIVE;
} else if (e->idx.offset || e->preferred_base) {
@@ -582,7 +587,7 @@ static enum write_one_status write_one(struct hashfile *f,
/* make sure off_t is sufficiently large not to wrap */
if (signed_add_overflows(*offset, size))
- die("pack too large for current definition of off_t");
+ die(_("pack too large for current definition of off_t"));
*offset += size;
return WRITE_ONE_WRITTEN;
}
@@ -748,7 +753,8 @@ static struct object_entry **compute_write_order(void)
}
if (wo_end != to_pack.nr_objects)
- die("ordered %u objects, expected %"PRIu32, wo_end, to_pack.nr_objects);
+ die(_("ordered %u objects, expected %"PRIu32),
+ wo_end, to_pack.nr_objects);
return wo;
}
@@ -760,15 +766,15 @@ static off_t write_reused_pack(struct hashfile *f)
int fd;
if (!is_pack_valid(reuse_packfile))
- die("packfile is invalid: %s", reuse_packfile->pack_name);
+ die(_("packfile is invalid: %s"), reuse_packfile->pack_name);
fd = git_open(reuse_packfile->pack_name);
if (fd < 0)
- die_errno("unable to open packfile for reuse: %s",
+ die_errno(_("unable to open packfile for reuse: %s"),
reuse_packfile->pack_name);
if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1)
- die_errno("unable to seek in reused packfile");
+ die_errno(_("unable to seek in reused packfile"));
if (reuse_packfile_offset < 0)
reuse_packfile_offset = reuse_packfile->pack_size - the_hash_algo->rawsz;
@@ -779,7 +785,7 @@ static off_t write_reused_pack(struct hashfile *f)
int read_pack = xread(fd, buffer, sizeof(buffer));
if (read_pack <= 0)
- die_errno("unable to read from reused packfile");
+ die_errno(_("unable to read from reused packfile"));
if (read_pack > to_write)
read_pack = to_write;
@@ -882,7 +888,7 @@ static void write_pack_file(void)
* to preserve this property.
*/
if (stat(pack_tmp_name, &st) < 0) {
- warning_errno("failed to stat %s", pack_tmp_name);
+ warning_errno(_("failed to stat %s"), pack_tmp_name);
} else if (!last_mtime) {
last_mtime = st.st_mtime;
} else {
@@ -890,7 +896,7 @@ static void write_pack_file(void)
utb.actime = st.st_atime;
utb.modtime = --last_mtime;
if (utime(pack_tmp_name, &utb) < 0)
- warning_errno("failed utime() on %s", pack_tmp_name);
+ warning_errno(_("failed utime() on %s"), pack_tmp_name);
}
strbuf_addf(&tmpname, "%s-", base_name);
@@ -935,8 +941,8 @@ static void write_pack_file(void)
free(write_order);
stop_progress(&progress_state);
if (written != nr_result)
- die("wrote %"PRIu32" objects while expecting %"PRIu32,
- written, nr_result);
+ die(_("wrote %"PRIu32" objects while expecting %"PRIu32),
+ written, nr_result);
}
static int no_try_delta(const char *path)
@@ -1480,7 +1486,7 @@ static void check_object(struct object_entry *entry)
while (c & 128) {
ofs += 1;
if (!ofs || MSB(ofs, 7)) {
- error("delta base offset overflow in pack for %s",
+ error(_("delta base offset overflow in pack for %s"),
oid_to_hex(&entry->idx.oid));
goto give_up;
}
@@ -1489,7 +1495,7 @@ static void check_object(struct object_entry *entry)
}
ofs = entry->in_pack_offset - ofs;
if (ofs <= 0 || ofs >= entry->in_pack_offset) {
- error("delta base offset out of bound for %s",
+ error(_("delta base offset out of bound for %s"),
oid_to_hex(&entry->idx.oid));
goto give_up;
}
@@ -1852,18 +1858,30 @@ static int delta_cacheable(unsigned long src_size, unsigned long trg_size,
#ifndef NO_PTHREADS
+/* Protect access to object database */
static pthread_mutex_t read_mutex;
#define read_lock() pthread_mutex_lock(&read_mutex)
#define read_unlock() pthread_mutex_unlock(&read_mutex)
+/* Protect delta_cache_size */
static pthread_mutex_t cache_mutex;
#define cache_lock() pthread_mutex_lock(&cache_mutex)
#define cache_unlock() pthread_mutex_unlock(&cache_mutex)
+/*
+ * Protect object list partitioning (e.g. struct thread_param) and
+ * progress_state
+ */
static pthread_mutex_t progress_mutex;
#define progress_lock() pthread_mutex_lock(&progress_mutex)
#define progress_unlock() pthread_mutex_unlock(&progress_mutex)
+/*
+ * Access to struct object_entry is unprotected since each thread owns
+ * a portion of the main object list. Just don't access object entries
+ * ahead in the list because they can be stolen and would need
+ * progress_mutex for protection.
+ */
#else
#define read_lock() (void)0
@@ -1974,10 +1992,10 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);
read_unlock();
if (!trg->data)
- die("object %s cannot be read",
+ die(_("object %s cannot be read"),
oid_to_hex(&trg_entry->idx.oid));
if (sz != trg_size)
- die("object %s inconsistent object length (%lu vs %lu)",
+ die(_("object %s inconsistent object length (%lu vs %lu)"),
oid_to_hex(&trg_entry->idx.oid), sz,
trg_size);
*mem_usage += sz;
@@ -1990,7 +2008,7 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
if (src_entry->preferred_base) {
static int warned = 0;
if (!warned++)
- warning("object %s cannot be read",
+ warning(_("object %s cannot be read"),
oid_to_hex(&src_entry->idx.oid));
/*
* Those objects are not included in the
@@ -2000,11 +2018,11 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
*/
return 0;
}
- die("object %s cannot be read",
+ die(_("object %s cannot be read"),
oid_to_hex(&src_entry->idx.oid));
}
if (sz != src_size)
- die("object %s inconsistent object length (%lu vs %lu)",
+ die(_("object %s inconsistent object length (%lu vs %lu)"),
oid_to_hex(&src_entry->idx.oid), sz,
src_size);
*mem_usage += sz;
@@ -2014,7 +2032,7 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
if (!src->index) {
static int warned = 0;
if (!warned++)
- warning("suboptimal pack - out of memory");
+ warning(_("suboptimal pack - out of memory"));
return 0;
}
*mem_usage += sizeof_delta_index(src->index);
@@ -2245,12 +2263,19 @@ static void try_to_free_from_threads(size_t size)
static try_to_free_t old_try_to_free_routine;
/*
+ * The main object list is split into smaller lists, each is handed to
+ * one worker.
+ *
* The main thread waits on the condition that (at least) one of the workers
* has stopped working (which is indicated in the .working member of
* struct thread_params).
+ *
* When a work thread has completed its work, it sets .working to 0 and
* signals the main thread and waits on the condition that .data_ready
* becomes 1.
+ *
+ * The main thread steals half of the work from the worker that has
+ * most work left to hand it to the idle worker.
*/
struct thread_params {
@@ -2341,8 +2366,8 @@ static void ll_find_deltas(struct object_entry **list, unsigned list_size,
return;
}
if (progress > pack_to_stdout)
- fprintf(stderr, "Delta compression using up to %d threads.\n",
- delta_search_threads);
+ fprintf_ln(stderr, _("Delta compression using up to %d threads"),
+ delta_search_threads);
p = xcalloc(delta_search_threads, sizeof(*p));
/* Partition the work amongst work threads. */
@@ -2382,7 +2407,7 @@ static void ll_find_deltas(struct object_entry **list, unsigned list_size,
ret = pthread_create(&p[i].thread, NULL,
threaded_find_deltas, &p[i]);
if (ret)
- die("unable to create thread: %s", strerror(ret));
+ die(_("unable to create thread: %s"), strerror(ret));
active_threads++;
}
@@ -2474,10 +2499,10 @@ static void add_tag_chain(const struct object_id *oid)
if (packlist_find(&to_pack, oid->hash, NULL))
return;
- tag = lookup_tag(oid);
+ tag = lookup_tag(the_repository, oid);
while (1) {
if (!tag || parse_tag(tag) || !tag->tagged)
- die("unable to pack objects reachable from tag %s",
+ die(_("unable to pack objects reachable from tag %s"),
oid_to_hex(oid));
add_object_entry(&tag->object.oid, OBJ_TAG, NULL, 0);
@@ -2543,7 +2568,7 @@ static void prepare_pack(int window, int depth)
if (!entry->preferred_base) {
nr_deltas++;
if (oe_type(entry) < 0)
- die("unable to get type of object %s",
+ die(_("unable to get type of object %s"),
oid_to_hex(&entry->idx.oid));
} else {
if (oe_type(entry) < 0) {
@@ -2567,7 +2592,7 @@ static void prepare_pack(int window, int depth)
ll_find_deltas(delta_list, n, window+1, depth, &nr_done);
stop_progress(&progress_state);
if (nr_done != nr_deltas)
- die("inconsistency with delta count");
+ die(_("inconsistency with delta count"));
}
free(delta_list);
}
@@ -2607,11 +2632,11 @@ static int git_pack_config(const char *k, const char *v, void *cb)
if (!strcmp(k, "pack.threads")) {
delta_search_threads = git_config_int(k, v);
if (delta_search_threads < 0)
- die("invalid number of threads specified (%d)",
+ die(_("invalid number of threads specified (%d)"),
delta_search_threads);
#ifdef NO_PTHREADS
if (delta_search_threads != 1) {
- warning("no threads support, ignoring %s", k);
+ warning(_("no threads support, ignoring %s"), k);
delta_search_threads = 0;
}
#endif
@@ -2620,7 +2645,7 @@ static int git_pack_config(const char *k, const char *v, void *cb)
if (!strcmp(k, "pack.indexversion")) {
pack_idx_opts.version = git_config_int(k, v);
if (pack_idx_opts.version > 2)
- die("bad pack.indexversion=%"PRIu32,
+ die(_("bad pack.indexversion=%"PRIu32),
pack_idx_opts.version);
return 0;
}
@@ -2638,7 +2663,7 @@ static void read_object_list_from_stdin(void)
if (feof(stdin))
break;
if (!ferror(stdin))
- die("fgets returned NULL, not EOF, not error!");
+ die("BUG: fgets returned NULL, not EOF, not error!");
if (errno != EINTR)
die_errno("fgets");
clearerr(stdin);
@@ -2646,13 +2671,13 @@ static void read_object_list_from_stdin(void)
}
if (line[0] == '-') {
if (get_oid_hex(line+1, &oid))
- die("expected edge object ID, got garbage:\n %s",
+ die(_("expected edge object ID, got garbage:\n %s"),
line);
add_preferred_base(&oid);
continue;
}
if (parse_oid_hex(line, &oid, &p))
- die("expected object ID, got garbage:\n %s", line);
+ die(_("expected object ID, got garbage:\n %s"), line);
add_preferred_base_object(p + 1);
add_object_entry(&oid, OBJ_NONE, p + 1, 0);
@@ -2791,7 +2816,7 @@ static void add_objects_in_unpacked_packs(struct rev_info *revs)
if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
continue;
if (open_pack_index(p))
- die("cannot open pack index");
+ die(_("cannot open pack index"));
ALLOC_GROW(in_pack.array,
in_pack.nr + p->num_objects,
@@ -2822,7 +2847,7 @@ static int add_loose_object(const struct object_id *oid, const char *path,
enum object_type type = oid_object_info(the_repository, oid, NULL);
if (type < 0) {
- warning("loose object at %s could not be examined", path);
+ warning(_("loose object at %s could not be examined"), path);
return 0;
}
@@ -2899,7 +2924,7 @@ static void loosen_unused_packed_objects(struct rev_info *revs)
continue;
if (open_pack_index(p))
- die("cannot open pack index");
+ die(_("cannot open pack index"));
for (i = 0; i < p->num_objects; i++) {
nth_packed_object_oid(&oid, p, i);
@@ -2907,7 +2932,7 @@ static void loosen_unused_packed_objects(struct rev_info *revs)
!has_sha1_pack_kept_or_nonlocal(&oid) &&
!loosened_object_can_be_discarded(&oid, p->mtime))
if (force_object_loose(&oid, p->mtime))
- die("unable to force loose object");
+ die(_("unable to force loose object"));
}
}
}
@@ -2994,17 +3019,17 @@ static void get_object_list(int ac, const char **av)
use_bitmap_index = 0;
continue;
}
- die("not a rev '%s'", line);
+ die(_("not a rev '%s'"), line);
}
if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))
- die("bad revision '%s'", line);
+ die(_("bad revision '%s'"), line);
}
if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
return;
if (prepare_revision_walk(&revs))
- die("revision walk setup failed");
+ die(_("revision walk setup failed"));
mark_edges_uninteresting(&revs, show_edge);
if (!fn_show_object)
@@ -3017,9 +3042,9 @@ static void get_object_list(int ac, const char **av)
revs.ignore_missing_links = 1;
if (add_unseen_recent_objects_to_traversal(&revs,
unpack_unreachable_expiration))
- die("unable to add recent objects");
+ die(_("unable to add recent objects"));
if (prepare_revision_walk(&revs))
- die("revision walk setup failed");
+ die(_("revision walk setup failed"));
traverse_commit_list(&revs, record_recent_commit,
record_recent_object, NULL);
}
@@ -3110,7 +3135,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
OPT_BOOL(0, "all-progress-implied",
&all_progress_implied,
N_("similar to --all-progress when progress meter is shown")),
- { OPTION_CALLBACK, 0, "index-version", NULL, N_("version[,offset]"),
+ { OPTION_CALLBACK, 0, "index-version", NULL, N_("<version>[,<offset>]"),
N_("write the pack index file in the specified idx format version"),
0, option_parse_index_version },
OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit,
@@ -3188,7 +3213,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))
BUG("too many dfs states, increase OE_DFS_STATE_BITS");
- check_replace_refs = 0;
+ read_replace_refs = 0;
reset_pack_idx_option(&pack_idx_opts);
git_config(git_pack_config, NULL);
@@ -3254,35 +3279,35 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (pack_compression_level == -1)
pack_compression_level = Z_DEFAULT_COMPRESSION;
else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION)
- die("bad pack compression level %d", pack_compression_level);
+ die(_("bad pack compression level %d"), pack_compression_level);
if (!delta_search_threads) /* --threads=0 means autodetect */
delta_search_threads = online_cpus();
#ifdef NO_PTHREADS
if (delta_search_threads != 1)
- warning("no threads support, ignoring --threads");
+ warning(_("no threads support, ignoring --threads"));
#endif
if (!pack_to_stdout && !pack_size_limit)
pack_size_limit = pack_size_limit_cfg;
if (pack_to_stdout && pack_size_limit)
- die("--max-pack-size cannot be used to build a pack for transfer.");
+ die(_("--max-pack-size cannot be used to build a pack for transfer"));
if (pack_size_limit && pack_size_limit < 1024*1024) {
- warning("minimum pack size limit is 1 MiB");
+ warning(_("minimum pack size limit is 1 MiB"));
pack_size_limit = 1024*1024;
}
if (!pack_to_stdout && thin)
- die("--thin cannot be used to build an indexable pack.");
+ die(_("--thin cannot be used to build an indexable pack"));
if (keep_unreachable && unpack_unreachable)
- die("--keep-unreachable and --unpack-unreachable are incompatible.");
+ die(_("--keep-unreachable and --unpack-unreachable are incompatible"));
if (!rev_list_all || !rev_list_reflog || !rev_list_index)
unpack_unreachable_expiration = 0;
if (filter_options.choice) {
if (!pack_to_stdout)
- die("cannot use --filter without --stdout.");
+ die(_("cannot use --filter without --stdout"));
use_bitmap_index = 0;
}
@@ -3356,8 +3381,9 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
prepare_pack(window, depth);
write_pack_file();
if (progress)
- fprintf(stderr, "Total %"PRIu32" (delta %"PRIu32"),"
- " reused %"PRIu32" (delta %"PRIu32")\n",
- written, written_delta, reused, reused_delta);
+ fprintf_ln(stderr,
+ _("Total %"PRIu32" (delta %"PRIu32"),"
+ " reused %"PRIu32" (delta %"PRIu32")"),
+ written, written_delta, reused, reused_delta);
return 0;
}
diff --git a/builtin/prune.c b/builtin/prune.c
index 70ec35a..4916a4d 100644
--- a/builtin/prune.c
+++ b/builtin/prune.c
@@ -40,7 +40,7 @@ static int prune_object(const struct object_id *oid, const char *fullpath,
* Do we know about this object?
* It must have been reachable
*/
- if (lookup_object(oid->hash))
+ if (lookup_object(the_repository, oid->hash))
return 0;
if (lstat(fullpath, &st)) {
@@ -118,7 +118,7 @@ int cmd_prune(int argc, const char **argv, const char *prefix)
expire = TIME_MAX;
save_commit_buffer = 0;
- check_replace_refs = 0;
+ read_replace_refs = 0;
ref_paranoia = 1;
init_revisions(&revs, prefix);
diff --git a/builtin/pull.c b/builtin/pull.c
index 7197b22..53bc5fa 100644
--- a/builtin/pull.c
+++ b/builtin/pull.c
@@ -48,11 +48,11 @@ static enum rebase_type parse_config_rebase(const char *key, const char *value,
return REBASE_FALSE;
else if (v > 0)
return REBASE_TRUE;
- else if (!strcmp(value, "preserve"))
+ else if (!strcmp(value, "preserve") || !strcmp(value, "p"))
return REBASE_PRESERVE;
- else if (!strcmp(value, "merges"))
+ else if (!strcmp(value, "merges") || !strcmp(value, "m"))
return REBASE_MERGES;
- else if (!strcmp(value, "interactive"))
+ else if (!strcmp(value, "interactive") || !strcmp(value, "i"))
return REBASE_INTERACTIVE;
if (fatal)
@@ -765,10 +765,13 @@ static int get_octopus_merge_base(struct object_id *merge_base,
{
struct commit_list *revs = NULL, *result;
- commit_list_insert(lookup_commit_reference(curr_head), &revs);
- commit_list_insert(lookup_commit_reference(merge_head), &revs);
+ commit_list_insert(lookup_commit_reference(the_repository, curr_head),
+ &revs);
+ commit_list_insert(lookup_commit_reference(the_repository, merge_head),
+ &revs);
if (!is_null_oid(fork_point))
- commit_list_insert(lookup_commit_reference(fork_point), &revs);
+ commit_list_insert(lookup_commit_reference(the_repository, fork_point),
+ &revs);
result = get_octopus_merge_bases(revs);
free_commit_list(revs);
@@ -944,9 +947,11 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
struct commit_list *list = NULL;
struct commit *merge_head, *head;
- head = lookup_commit_reference(&orig_head);
+ head = lookup_commit_reference(the_repository,
+ &orig_head);
commit_list_insert(head, &list);
- merge_head = lookup_commit_reference(&merge_heads.oid[0]);
+ merge_head = lookup_commit_reference(the_repository,
+ &merge_heads.oid[0]);
if (is_descendant_of(merge_head, list)) {
/* we can fast-forward this without invoking rebase */
opt_ff = "--ff-only";
diff --git a/builtin/push.c b/builtin/push.c
index 9cd8e8c..ef4c188 100644
--- a/builtin/push.c
+++ b/builtin/push.c
@@ -558,9 +558,9 @@ int cmd_push(int argc, const char **argv, const char *prefix)
OPT_BIT( 0, "porcelain", &flags, N_("machine-readable output"), TRANSPORT_PUSH_PORCELAIN),
OPT_BIT('f', "force", &flags, N_("force updates"), TRANSPORT_PUSH_FORCE),
{ OPTION_CALLBACK,
- 0, CAS_OPT_NAME, &cas, N_("refname>:<expect"),
+ 0, CAS_OPT_NAME, &cas, N_("<refname>:<expect>"),
N_("require old value of ref to be at this value"),
- PARSE_OPT_OPTARG, parseopt_push_cas_option },
+ PARSE_OPT_OPTARG | PARSE_OPT_LITERAL_ARGHELP, parseopt_push_cas_option },
{ OPTION_CALLBACK, 0, "recurse-submodules", &recurse_submodules, "check|on-demand|no",
N_("control recursive pushing of submodules"),
PARSE_OPT_OPTARG, option_parse_recurse_submodules },
diff --git a/builtin/read-tree.c b/builtin/read-tree.c
index ebc43eb..fbbc98e 100644
--- a/builtin/read-tree.c
+++ b/builtin/read-tree.c
@@ -133,7 +133,7 @@ int cmd_read_tree(int argc, const char **argv, const char *unused_prefix)
N_("same as -m, but discard unmerged entries")),
{ OPTION_STRING, 0, "prefix", &opts.prefix, N_("<subdirectory>/"),
N_("read the tree into the index under <subdirectory>/"),
- PARSE_OPT_NONEG | PARSE_OPT_LITERAL_ARGHELP },
+ PARSE_OPT_NONEG },
OPT_BOOL('u', NULL, &opts.update,
N_("update working tree with merge result")),
{ OPTION_CALLBACK, 0, "exclude-per-directory", &opts,
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
index f1c5d07..c17ce94 100644
--- a/builtin/receive-pack.c
+++ b/builtin/receive-pack.c
@@ -1095,8 +1095,8 @@ static const char *update(struct command *cmd, struct shallow_info *si)
struct object *old_object, *new_object;
struct commit *old_commit, *new_commit;
- old_object = parse_object(old_oid);
- new_object = parse_object(new_oid);
+ old_object = parse_object(the_repository, old_oid);
+ new_object = parse_object(the_repository, new_oid);
if (!old_object || !new_object ||
old_object->type != OBJ_COMMIT ||
@@ -1119,7 +1119,7 @@ static const char *update(struct command *cmd, struct shallow_info *si)
if (is_null_oid(new_oid)) {
struct strbuf err = STRBUF_INIT;
- if (!parse_object(old_oid)) {
+ if (!parse_object(the_repository, old_oid)) {
old_oid = NULL;
if (ref_exists(name)) {
rp_warning("Allowing deletion of corrupt ref.");
diff --git a/builtin/reflog.c b/builtin/reflog.c
index 00911929..3acef5a 100644
--- a/builtin/reflog.c
+++ b/builtin/reflog.c
@@ -2,6 +2,7 @@
#include "config.h"
#include "lockfile.h"
#include "object-store.h"
+#include "repository.h"
#include "commit.h"
#include "refs.h"
#include "dir.h"
@@ -65,7 +66,7 @@ static int tree_is_complete(const struct object_id *oid)
int complete;
struct tree *tree;
- tree = lookup_tree(oid);
+ tree = lookup_tree(the_repository, oid);
if (!tree)
return 0;
if (tree->object.flags & SEEN)
@@ -129,7 +130,7 @@ static int commit_is_complete(struct commit *commit)
struct commit_list *parent;
c = (struct commit *)object_array_pop(&study);
- if (!c->object.parsed && !parse_object(&c->object.oid))
+ if (!c->object.parsed && !parse_object(the_repository, &c->object.oid))
c->object.flags |= INCOMPLETE;
if (c->object.flags & INCOMPLETE) {
@@ -195,7 +196,7 @@ static int keep_entry(struct commit **it, struct object_id *oid)
if (is_null_oid(oid))
return 1;
- commit = lookup_commit_reference_gently(oid, 1);
+ commit = lookup_commit_reference_gently(the_repository, oid, 1);
if (!commit)
return 0;
@@ -264,7 +265,8 @@ static int unreachable(struct expire_reflog_policy_cb *cb, struct commit *commit
if (is_null_oid(oid))
return 0;
- commit = lookup_commit_reference_gently(oid, 1);
+ commit = lookup_commit_reference_gently(the_repository, oid,
+ 1);
/* Not a commit -- keep it */
if (!commit)
@@ -321,7 +323,7 @@ static int push_tip_to_list(const char *refname, const struct object_id *oid,
struct commit *tip_commit;
if (flags & REF_ISSYMREF)
return 0;
- tip_commit = lookup_commit_reference_gently(oid, 1);
+ tip_commit = lookup_commit_reference_gently(the_repository, oid, 1);
if (!tip_commit)
return 0;
commit_list_insert(tip_commit, list);
@@ -338,7 +340,8 @@ static void reflog_expiry_prepare(const char *refname,
cb->tip_commit = NULL;
cb->unreachable_expire_kind = UE_HEAD;
} else {
- cb->tip_commit = lookup_commit_reference_gently(oid, 1);
+ cb->tip_commit = lookup_commit_reference_gently(the_repository,
+ oid, 1);
if (!cb->tip_commit)
cb->unreachable_expire_kind = UE_ALWAYS;
else
diff --git a/builtin/remote.c b/builtin/remote.c
index c74ee88..07bd51f 100644
--- a/builtin/remote.c
+++ b/builtin/remote.c
@@ -566,7 +566,7 @@ static int read_remote_branches(const char *refname,
strbuf_addf(&buf, "refs/remotes/%s/", rename->old_name);
if (starts_with(refname, buf.buf)) {
- item = string_list_append(rename->remote_branches, xstrdup(refname));
+ item = string_list_append(rename->remote_branches, refname);
symref = resolve_ref_unsafe(refname, RESOLVE_REF_READING,
NULL, &flag);
if (symref && (flag & REF_ISSYMREF))
@@ -612,7 +612,7 @@ static int mv(int argc, const char **argv)
struct remote *oldremote, *newremote;
struct strbuf buf = STRBUF_INIT, buf2 = STRBUF_INIT, buf3 = STRBUF_INIT,
old_remote_context = STRBUF_INIT;
- struct string_list remote_branches = STRING_LIST_INIT_NODUP;
+ struct string_list remote_branches = STRING_LIST_INIT_DUP;
struct rename_info rename;
int i, refspec_updated = 0;
@@ -734,6 +734,7 @@ static int mv(int argc, const char **argv)
if (create_symref(buf.buf, buf2.buf, buf3.buf))
die(_("creating '%s' failed"), buf.buf);
}
+ string_list_clear(&remote_branches, 1);
return 0;
}
diff --git a/builtin/replace.c b/builtin/replace.c
index deabda2..4f05791 100644
--- a/builtin/replace.c
+++ b/builtin/replace.c
@@ -54,7 +54,7 @@ static int show_reference(const char *refname, const struct object_id *oid,
enum object_type obj_type, repl_type;
if (get_oid(refname, &object))
- return error("Failed to resolve '%s' as a valid ref.", refname);
+ return error(_("failed to resolve '%s' as a valid ref"), refname);
obj_type = oid_object_info(the_repository, &object,
NULL);
@@ -83,8 +83,8 @@ static int list_replace_refs(const char *pattern, const char *format)
else if (!strcmp(format, "long"))
data.format = REPLACE_FORMAT_LONG;
else
- return error("invalid replace format '%s'\n"
- "valid formats are 'short', 'medium' and 'long'\n",
+ return error(_("invalid replace format '%s'\n"
+ "valid formats are 'short', 'medium' and 'long'"),
format);
for_each_replace_ref(the_repository, show_reference, (void *)&data);
@@ -108,7 +108,7 @@ static int for_each_replace_name(const char **argv, each_replace_name_fn fn)
for (p = argv; *p; p++) {
if (get_oid(*p, &oid)) {
- error("Failed to resolve '%s' as a valid ref.", *p);
+ error("failed to resolve '%s' as a valid ref", *p);
had_error = 1;
continue;
}
@@ -118,7 +118,7 @@ static int for_each_replace_name(const char **argv, each_replace_name_fn fn)
full_hex = ref.buf + base_len;
if (read_ref(ref.buf, &oid)) {
- error("replace ref '%s' not found.", full_hex);
+ error(_("replace ref '%s' not found"), full_hex);
had_error = 1;
continue;
}
@@ -134,7 +134,7 @@ static int delete_replace_ref(const char *name, const char *ref,
{
if (delete_ref(NULL, ref, oid, 0))
return 1;
- printf("Deleted replace ref '%s'\n", name);
+ printf_ln(_("Deleted replace ref '%s'"), name);
return 0;
}
@@ -146,12 +146,12 @@ static int check_ref_valid(struct object_id *object,
strbuf_reset(ref);
strbuf_addf(ref, "%s%s", git_replace_ref_base, oid_to_hex(object));
if (check_refname_format(ref->buf, 0))
- return error("'%s' is not a valid ref name.", ref->buf);
+ return error(_("'%s' is not a valid ref name"), ref->buf);
if (read_ref(ref->buf, prev))
oidclr(prev);
else if (!force)
- return error("replace ref '%s' already exists", ref->buf);
+ return error(_("replace ref '%s' already exists"), ref->buf);
return 0;
}
@@ -171,10 +171,10 @@ static int replace_object_oid(const char *object_ref,
obj_type = oid_object_info(the_repository, object, NULL);
repl_type = oid_object_info(the_repository, repl, NULL);
if (!force && obj_type != repl_type)
- return error("Objects must be of the same type.\n"
- "'%s' points to a replaced object of type '%s'\n"
- "while '%s' points to a replacement object of "
- "type '%s'.",
+ return error(_("Objects must be of the same type.\n"
+ "'%s' points to a replaced object of type '%s'\n"
+ "while '%s' points to a replacement object of "
+ "type '%s'."),
object_ref, type_name(obj_type),
replace_ref, type_name(repl_type));
@@ -200,10 +200,10 @@ static int replace_object(const char *object_ref, const char *replace_ref, int f
struct object_id object, repl;
if (get_oid(object_ref, &object))
- return error("Failed to resolve '%s' as a valid ref.",
+ return error(_("failed to resolve '%s' as a valid ref"),
object_ref);
if (get_oid(replace_ref, &repl))
- return error("Failed to resolve '%s' as a valid ref.",
+ return error(_("failed to resolve '%s' as a valid ref"),
replace_ref);
return replace_object_oid(object_ref, &object, replace_ref, &repl, force);
@@ -222,7 +222,7 @@ static int export_object(const struct object_id *oid, enum object_type type,
fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
if (fd < 0)
- return error_errno("unable to open %s for writing", filename);
+ return error_errno(_("unable to open %s for writing"), filename);
argv_array_push(&cmd.args, "--no-replace-objects");
argv_array_push(&cmd.args, "cat-file");
@@ -235,7 +235,7 @@ static int export_object(const struct object_id *oid, enum object_type type,
cmd.out = fd;
if (run_command(&cmd))
- return error("cat-file reported failure");
+ return error(_("cat-file reported failure"));
return 0;
}
@@ -251,7 +251,7 @@ static int import_object(struct object_id *oid, enum object_type type,
fd = open(filename, O_RDONLY);
if (fd < 0)
- return error_errno("unable to open %s for reading", filename);
+ return error_errno(_("unable to open %s for reading"), filename);
if (!raw && type == OBJ_TREE) {
const char *argv[] = { "mktree", NULL };
@@ -265,11 +265,11 @@ static int import_object(struct object_id *oid, enum object_type type,
if (start_command(&cmd)) {
close(fd);
- return error("unable to spawn mktree");
+ return error(_("unable to spawn mktree"));
}
if (strbuf_read(&result, cmd.out, 41) < 0) {
- error_errno("unable to read from mktree");
+ error_errno(_("unable to read from mktree"));
close(fd);
close(cmd.out);
return -1;
@@ -278,11 +278,11 @@ static int import_object(struct object_id *oid, enum object_type type,
if (finish_command(&cmd)) {
strbuf_release(&result);
- return error("mktree reported failure");
+ return error(_("mktree reported failure"));
}
if (get_oid_hex(result.buf, oid) < 0) {
strbuf_release(&result);
- return error("mktree did not return an object name");
+ return error(_("mktree did not return an object name"));
}
strbuf_release(&result);
@@ -291,12 +291,12 @@ static int import_object(struct object_id *oid, enum object_type type,
int flags = HASH_FORMAT_CHECK | HASH_WRITE_OBJECT;
if (fstat(fd, &st) < 0) {
- error_errno("unable to fstat %s", filename);
+ error_errno(_("unable to fstat %s"), filename);
close(fd);
return -1;
}
if (index_fd(oid, fd, &st, type, NULL, flags) < 0)
- return error("unable to write object to database");
+ return error(_("unable to write object to database"));
/* index_fd close()s fd for us */
}
@@ -315,11 +315,11 @@ static int edit_and_replace(const char *object_ref, int force, int raw)
struct strbuf ref = STRBUF_INIT;
if (get_oid(object_ref, &old_oid) < 0)
- return error("Not a valid object name: '%s'", object_ref);
+ return error(_("not a valid object name: '%s'"), object_ref);
type = oid_object_info(the_repository, &old_oid, NULL);
if (type < 0)
- return error("unable to get object type for %s",
+ return error(_("unable to get object type for %s"),
oid_to_hex(&old_oid));
if (check_ref_valid(&old_oid, &prev, &ref, force)) {
@@ -335,7 +335,7 @@ static int edit_and_replace(const char *object_ref, int force, int raw)
}
if (launch_editor(tmpfile, NULL, NULL) < 0) {
free(tmpfile);
- return error("editing object file failed");
+ return error(_("editing object file failed"));
}
if (import_object(&new_oid, type, raw, tmpfile)) {
free(tmpfile);
@@ -344,7 +344,7 @@ static int edit_and_replace(const char *object_ref, int force, int raw)
free(tmpfile);
if (!oidcmp(&old_oid, &new_oid))
- return error("new object is the same as the old one: '%s'", oid_to_hex(&old_oid));
+ return error(_("new object is the same as the old one: '%s'"), oid_to_hex(&old_oid));
return replace_object_oid(object_ref, &old_oid, "replacement", &new_oid, force);
}
@@ -368,10 +368,10 @@ static int replace_parents(struct strbuf *buf, int argc, const char **argv)
struct object_id oid;
if (get_oid(argv[i], &oid) < 0) {
strbuf_release(&new_parents);
- return error(_("Not a valid object name: '%s'"),
+ return error(_("not a valid object name: '%s'"),
argv[i]);
}
- if (!lookup_commit_reference(&oid)) {
+ if (!lookup_commit_reference(the_repository, &oid)) {
strbuf_release(&new_parents);
return error(_("could not parse %s"), argv[i]);
}
@@ -402,17 +402,17 @@ static int check_one_mergetag(struct commit *commit,
int i;
hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &tag_oid);
- tag = lookup_tag(&tag_oid);
+ tag = lookup_tag(the_repository, &tag_oid);
if (!tag)
return error(_("bad mergetag in commit '%s'"), ref);
- if (parse_tag_buffer(tag, extra->value, extra->len))
+ if (parse_tag_buffer(the_repository, tag, extra->value, extra->len))
return error(_("malformed mergetag in commit '%s'"), ref);
/* iterate over new parents */
for (i = 1; i < mergetag_data->argc; i++) {
struct object_id oid;
if (get_oid(mergetag_data->argv[i], &oid) < 0)
- return error(_("Not a valid object name: '%s'"),
+ return error(_("not a valid object name: '%s'"),
mergetag_data->argv[i]);
if (!oidcmp(&tag->tagged->oid, &oid))
return 0; /* found */
@@ -442,8 +442,8 @@ static int create_graft(int argc, const char **argv, int force, int gentle)
unsigned long size;
if (get_oid(old_ref, &old_oid) < 0)
- return error(_("Not a valid object name: '%s'"), old_ref);
- commit = lookup_commit_reference(&old_oid);
+ return error(_("not a valid object name: '%s'"), old_ref);
+ commit = lookup_commit_reference(the_repository, &old_oid);
if (!commit)
return error(_("could not parse %s"), old_ref);
@@ -457,7 +457,7 @@ static int create_graft(int argc, const char **argv, int force, int gentle)
}
if (remove_signature(&buf)) {
- warning(_("the original commit '%s' has a gpg signature."), old_ref);
+ warning(_("the original commit '%s' has a gpg signature"), old_ref);
warning(_("the signature will be removed in the replacement commit!"));
}
@@ -476,10 +476,10 @@ static int create_graft(int argc, const char **argv, int force, int gentle)
if (!oidcmp(&old_oid, &new_oid)) {
if (gentle) {
- warning("graft for '%s' unnecessary", oid_to_hex(&old_oid));
+ warning(_("graft for '%s' unnecessary"), oid_to_hex(&old_oid));
return 0;
}
- return error("new commit is the same as the old one: '%s'", oid_to_hex(&old_oid));
+ return error(_("new commit is the same as the old one: '%s'"), oid_to_hex(&old_oid));
}
return replace_object_oid(old_ref, &old_oid, "replacement", &new_oid, force);
@@ -544,7 +544,7 @@ int cmd_replace(int argc, const char **argv, const char *prefix)
OPT_END()
};
- check_replace_refs = 0;
+ read_replace_refs = 0;
git_config(git_default_config, NULL);
argc = parse_options(argc, argv, prefix, options, git_replace_usage, 0);
@@ -553,7 +553,7 @@ int cmd_replace(int argc, const char **argv, const char *prefix)
cmdmode = argc ? MODE_REPLACE : MODE_LIST;
if (format && cmdmode != MODE_LIST)
- usage_msg_opt("--format cannot be used when not listing",
+ usage_msg_opt(_("--format cannot be used when not listing"),
git_replace_usage, options);
if (force &&
@@ -561,47 +561,47 @@ int cmd_replace(int argc, const char **argv, const char *prefix)
cmdmode != MODE_EDIT &&
cmdmode != MODE_GRAFT &&
cmdmode != MODE_CONVERT_GRAFT_FILE)
- usage_msg_opt("-f only makes sense when writing a replacement",
+ usage_msg_opt(_("-f only makes sense when writing a replacement"),
git_replace_usage, options);
if (raw && cmdmode != MODE_EDIT)
- usage_msg_opt("--raw only makes sense with --edit",
+ usage_msg_opt(_("--raw only makes sense with --edit"),
git_replace_usage, options);
switch (cmdmode) {
case MODE_DELETE:
if (argc < 1)
- usage_msg_opt("-d needs at least one argument",
+ usage_msg_opt(_("-d needs at least one argument"),
git_replace_usage, options);
return for_each_replace_name(argv, delete_replace_ref);
case MODE_REPLACE:
if (argc != 2)
- usage_msg_opt("bad number of arguments",
+ usage_msg_opt(_("bad number of arguments"),
git_replace_usage, options);
return replace_object(argv[0], argv[1], force);
case MODE_EDIT:
if (argc != 1)
- usage_msg_opt("-e needs exactly one argument",
+ usage_msg_opt(_("-e needs exactly one argument"),
git_replace_usage, options);
return edit_and_replace(argv[0], force, raw);
case MODE_GRAFT:
if (argc < 1)
- usage_msg_opt("-g needs at least one argument",
+ usage_msg_opt(_("-g needs at least one argument"),
git_replace_usage, options);
return create_graft(argc, argv, force, 0);
case MODE_CONVERT_GRAFT_FILE:
if (argc != 0)
- usage_msg_opt("--convert-graft-file takes no argument",
+ usage_msg_opt(_("--convert-graft-file takes no argument"),
git_replace_usage, options);
return !!convert_graft_file(force);
case MODE_LIST:
if (argc > 1)
- usage_msg_opt("only one pattern can be given with -l",
+ usage_msg_opt(_("only one pattern can be given with -l"),
git_replace_usage, options);
return list_replace_refs(argv[0], format);
diff --git a/builtin/reset.c b/builtin/reset.c
index ffe41c9..11cd0dc 100644
--- a/builtin/reset.c
+++ b/builtin/reset.c
@@ -134,7 +134,7 @@ static void update_index_from_diff(struct diff_queue_struct *q,
continue;
}
- ce = make_cache_entry(one->mode, one->oid.hash, one->path,
+ ce = make_cache_entry(&the_index, one->mode, &one->oid, one->path,
0, 0);
if (!ce)
die(_("make_cache_entry failed for path '%s'"),
@@ -319,7 +319,7 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
struct commit *commit;
if (get_oid_committish(rev, &oid))
die(_("Failed to resolve '%s' as a valid revision."), rev);
- commit = lookup_commit_reference(&oid);
+ commit = lookup_commit_reference(the_repository, &oid);
if (!commit)
die(_("Could not parse object '%s'."), rev);
oidcpy(&oid, &commit->object.oid);
@@ -396,7 +396,7 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
update_ref_status = reset_refs(rev, &oid);
if (reset_type == HARD && !update_ref_status && !quiet)
- print_new_head_line(lookup_commit_reference(&oid));
+ print_new_head_line(lookup_commit_reference(the_repository, &oid));
}
if (!pathspec.nr)
remove_branch_state();
diff --git a/builtin/rev-list.c b/builtin/rev-list.c
index 6fcb0ff..5b07f3f 100644
--- a/builtin/rev-list.c
+++ b/builtin/rev-list.c
@@ -240,7 +240,7 @@ static int finish_object(struct object *obj, const char *name, void *cb_data)
return 1;
}
if (info->revs->verify_objects && !obj->parsed && obj->type != OBJ_COMMIT)
- parse_object(&obj->oid);
+ parse_object(the_repository, &obj->oid);
return 0;
}
diff --git a/builtin/rev-parse.c b/builtin/rev-parse.c
index 2a6cb29..0f09bbb 100644
--- a/builtin/rev-parse.c
+++ b/builtin/rev-parse.c
@@ -280,8 +280,8 @@ static int try_difference(const char *arg)
if (symmetric) {
struct commit_list *exclude;
struct commit *a, *b;
- a = lookup_commit_reference(&start_oid);
- b = lookup_commit_reference(&end_oid);
+ a = lookup_commit_reference(the_repository, &start_oid);
+ b = lookup_commit_reference(the_repository, &end_oid);
if (!a || !b) {
*dotdot = '.';
return 0;
@@ -333,7 +333,7 @@ static int try_parent_shorthands(const char *arg)
*dotdot = 0;
if (get_oid_committish(arg, &oid) ||
- !(commit = lookup_commit_reference(&oid))) {
+ !(commit = lookup_commit_reference(the_repository, &oid))) {
*dotdot = '^';
return 0;
}
diff --git a/builtin/rm.c b/builtin/rm.c
index 65b448e..f4d3f00 100644
--- a/builtin/rm.c
+++ b/builtin/rm.c
@@ -285,7 +285,7 @@ int cmd_rm(int argc, const char **argv, const char *prefix)
list.entry[list.nr].is_submodule = S_ISGITLINK(ce->ce_mode);
if (list.entry[list.nr++].is_submodule &&
!is_staging_gitmodules_ok(&the_index))
- die (_("Please stage your changes to .gitmodules or stash them to proceed"));
+ die(_("please stage your changes to .gitmodules or stash them to proceed"));
}
if (pathspec.nr) {
diff --git a/builtin/send-pack.c b/builtin/send-pack.c
index 42fd8d1..724b484 100644
--- a/builtin/send-pack.c
+++ b/builtin/send-pack.c
@@ -178,7 +178,7 @@ int cmd_send_pack(int argc, const char **argv, const char *prefix)
OPT_BOOL(0, "stdin", &from_stdin, N_("read refs from stdin")),
OPT_BOOL(0, "helper-status", &helper_status, N_("print status from remote helper")),
{ OPTION_CALLBACK,
- 0, CAS_OPT_NAME, &cas, N_("refname>:<expect"),
+ 0, CAS_OPT_NAME, &cas, N_("<refname>:<expect>"),
N_("require old value of ref to be at this value"),
PARSE_OPT_OPTARG, parseopt_push_cas_option },
OPT_END()
diff --git a/builtin/shortlog.c b/builtin/shortlog.c
index 608d6ba..3898a2c 100644
--- a/builtin/shortlog.c
+++ b/builtin/shortlog.c
@@ -268,8 +268,9 @@ int cmd_shortlog(int argc, const char **argv, const char *prefix)
N_("Suppress commit descriptions, only provides commit count")),
OPT_BOOL('e', "email", &log.email,
N_("Show the email address of each author")),
- { OPTION_CALLBACK, 'w', NULL, &log, N_("w[,i1[,i2]]"),
- N_("Linewrap output"), PARSE_OPT_OPTARG, &parse_wrap_args },
+ { OPTION_CALLBACK, 'w', NULL, &log, N_("<w>[,<i1>[,<i2>]]"),
+ N_("Linewrap output"), PARSE_OPT_OPTARG,
+ &parse_wrap_args },
OPT_END(),
};
diff --git a/builtin/show-branch.c b/builtin/show-branch.c
index f2e985c..363cf85 100644
--- a/builtin/show-branch.c
+++ b/builtin/show-branch.c
@@ -378,7 +378,8 @@ static void sort_ref_range(int bottom, int top)
static int append_ref(const char *refname, const struct object_id *oid,
int allow_dups)
{
- struct commit *commit = lookup_commit_reference_gently(oid, 1);
+ struct commit *commit = lookup_commit_reference_gently(the_repository,
+ oid, 1);
int i;
if (!commit)
@@ -673,7 +674,7 @@ int cmd_show_branch(int ac, const char **av, const char *prefix)
{ OPTION_CALLBACK, 'g', "reflog", &reflog_base, N_("<n>[,<base>]"),
N_("show <n> most recent ref-log entries starting at "
"base"),
- PARSE_OPT_OPTARG | PARSE_OPT_LITERAL_ARGHELP,
+ PARSE_OPT_OPTARG,
parse_reflog_param },
OPT_END()
};
@@ -830,7 +831,7 @@ int cmd_show_branch(int ac, const char **av, const char *prefix)
MAX_REVS), MAX_REVS);
if (get_oid(ref_name[num_rev], &revkey))
die(_("'%s' is not a valid ref."), ref_name[num_rev]);
- commit = lookup_commit_reference(&revkey);
+ commit = lookup_commit_reference(the_repository, &revkey);
if (!commit)
die(_("cannot find commit %s (%s)"),
ref_name[num_rev], oid_to_hex(&revkey));
diff --git a/builtin/tag.c b/builtin/tag.c
index 9919b03..9a19ffb 100644
--- a/builtin/tag.c
+++ b/builtin/tag.c
@@ -313,7 +313,7 @@ static void create_reflog_msg(const struct object_id *oid, struct strbuf *sb)
}
free(buf);
- if ((c = lookup_commit_reference(oid)) != NULL)
+ if ((c = lookup_commit_reference(the_repository, oid)) != NULL)
strbuf_addf(sb, ", %s", show_date(c->date, 0, DATE_MODE(SHORT)));
break;
case OBJ_TREE:
diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c
index cf585fc..30d9413 100644
--- a/builtin/unpack-objects.c
+++ b/builtin/unpack-objects.c
@@ -254,7 +254,7 @@ static void write_object(unsigned nr, enum object_type type,
added_object(nr, type, buf, size);
free(buf);
- blob = lookup_blob(&obj_list[nr].oid);
+ blob = lookup_blob(the_repository, &obj_list[nr].oid);
if (blob)
blob->object.flags |= FLAG_WRITTEN;
else
@@ -265,7 +265,8 @@ static void write_object(unsigned nr, enum object_type type,
int eaten;
hash_object_file(buf, size, type_name(type), &obj_list[nr].oid);
added_object(nr, type, buf, size);
- obj = parse_object_buffer(&obj_list[nr].oid, type, size, buf,
+ obj = parse_object_buffer(the_repository, &obj_list[nr].oid,
+ type, size, buf,
&eaten);
if (!obj)
die("invalid %s", type_name(type));
@@ -331,7 +332,7 @@ static int resolve_against_held(unsigned nr, const struct object_id *base,
{
struct object *obj;
struct obj_buffer *obj_buffer;
- obj = lookup_object(base->hash);
+ obj = lookup_object(the_repository, base->hash);
if (!obj)
return 0;
obj_buffer = lookup_object_buffer(obj);
@@ -513,7 +514,7 @@ int cmd_unpack_objects(int argc, const char **argv, const char *prefix)
int i;
struct object_id oid;
- check_replace_refs = 0;
+ read_replace_refs = 0;
git_config(git_default_config, NULL);
diff --git a/builtin/update-index.c b/builtin/update-index.c
index a8709a2..5aee2ea 100644
--- a/builtin/update-index.c
+++ b/builtin/update-index.c
@@ -268,15 +268,14 @@ static int process_lstat_error(const char *path, int err)
static int add_one_path(const struct cache_entry *old, const char *path, int len, struct stat *st)
{
- int option, size;
+ int option;
struct cache_entry *ce;
/* Was the old index entry already up-to-date? */
if (old && !ce_stage(old) && !ce_match_stat(old, st, 0))
return 0;
- size = cache_entry_size(len);
- ce = xcalloc(1, size);
+ ce = make_empty_cache_entry(&the_index, len);
memcpy(ce->name, path, len);
ce->ce_flags = create_ce_flags(0);
ce->ce_namelen = len;
@@ -285,13 +284,13 @@ static int add_one_path(const struct cache_entry *old, const char *path, int len
if (index_path(&ce->oid, path, st,
info_only ? 0 : HASH_WRITE_OBJECT)) {
- free(ce);
+ discard_cache_entry(ce);
return -1;
}
option = allow_add ? ADD_CACHE_OK_TO_ADD : 0;
option |= allow_replace ? ADD_CACHE_OK_TO_REPLACE : 0;
if (add_cache_entry(ce, option)) {
- free(ce);
+ discard_cache_entry(ce);
return error("%s: cannot add to the index - missing --add option?", path);
}
return 0;
@@ -402,15 +401,14 @@ static int process_path(const char *path, struct stat *st, int stat_errno)
static int add_cacheinfo(unsigned int mode, const struct object_id *oid,
const char *path, int stage)
{
- int size, len, option;
+ int len, option;
struct cache_entry *ce;
if (!verify_path(path, mode))
return error("Invalid path '%s'", path);
len = strlen(path);
- size = cache_entry_size(len);
- ce = xcalloc(1, size);
+ ce = make_empty_cache_entry(&the_index, len);
oidcpy(&ce->oid, oid);
memcpy(ce->name, path, len);
@@ -492,6 +490,7 @@ static void update_one(const char *path)
static void read_index_info(int nul_term_line)
{
+ const int hexsz = the_hash_algo->hexsz;
struct strbuf buf = STRBUF_INIT;
struct strbuf uq = STRBUF_INIT;
strbuf_getline_fn getline_fn;
@@ -529,7 +528,7 @@ static void read_index_info(int nul_term_line)
mode = ul;
tab = strchr(ptr, '\t');
- if (!tab || tab - ptr < GIT_SHA1_HEXSZ + 1)
+ if (!tab || tab - ptr < hexsz + 1)
goto bad_line;
if (tab[-2] == ' ' && '0' <= tab[-1] && tab[-1] <= '3') {
@@ -542,8 +541,8 @@ static void read_index_info(int nul_term_line)
ptr = tab + 1; /* point at the head of path */
}
- if (get_oid_hex(tab - GIT_SHA1_HEXSZ, &oid) ||
- tab[-(GIT_SHA1_HEXSZ + 1)] != ' ')
+ if (get_oid_hex(tab - hexsz, &oid) ||
+ tab[-(hexsz + 1)] != ' ')
goto bad_line;
path_name = ptr;
@@ -571,7 +570,7 @@ static void read_index_info(int nul_term_line)
* ptr[-1] points at tab,
* ptr[-41] is at the beginning of sha1
*/
- ptr[-(GIT_SHA1_HEXSZ + 2)] = ptr[-1] = 0;
+ ptr[-(hexsz + 2)] = ptr[-1] = 0;
if (add_cacheinfo(mode, &oid, path_name, stage))
die("git update-index: unable to update %s",
path_name);
@@ -599,7 +598,6 @@ static struct cache_entry *read_one_ent(const char *which,
{
unsigned mode;
struct object_id oid;
- int size;
struct cache_entry *ce;
if (get_tree_entry(ent, path, &oid, &mode)) {
@@ -612,8 +610,7 @@ static struct cache_entry *read_one_ent(const char *which,
error("%s: not a blob in %s branch.", path, which);
return NULL;
}
- size = cache_entry_size(namelen);
- ce = xcalloc(1, size);
+ ce = make_empty_cache_entry(&the_index, namelen);
oidcpy(&ce->oid, &oid);
memcpy(ce->name, path, namelen);
@@ -690,8 +687,8 @@ static int unresolve_one(const char *path)
error("%s: cannot add their version to the index.", path);
ret = -1;
free_return:
- free(ce_2);
- free(ce_3);
+ discard_cache_entry(ce_2);
+ discard_cache_entry(ce_3);
return ret;
}
@@ -758,7 +755,7 @@ static int do_reupdate(int ac, const char **av,
ce->name, ce_namelen(ce), 0);
if (old && ce->ce_mode == old->ce_mode &&
!oidcmp(&ce->oid, &old->oid)) {
- free(old);
+ discard_cache_entry(old);
continue; /* unchanged */
}
/* Be careful. The working tree may not have the
@@ -769,7 +766,7 @@ static int do_reupdate(int ac, const char **av,
path = xstrdup(ce->name);
update_one(path);
free(path);
- free(old);
+ discard_cache_entry(old);
if (save_nr != active_nr)
goto redo;
}
@@ -826,6 +823,7 @@ static int parse_new_style_cacheinfo(const char *arg,
{
unsigned long ul;
char *endp;
+ const char *p;
if (!arg)
return -1;
@@ -836,9 +834,9 @@ static int parse_new_style_cacheinfo(const char *arg,
return -1; /* not a new-style cacheinfo */
*mode = ul;
endp++;
- if (get_oid_hex(endp, oid) || endp[GIT_SHA1_HEXSZ] != ',')
+ if (parse_oid_hex(endp, oid, &p) || *p != ',')
return -1;
- *path = endp + GIT_SHA1_HEXSZ + 1;
+ *path = p + 1;
return 0;
}
@@ -971,9 +969,9 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
PARSE_OPT_NOARG | /* disallow --cacheinfo=<mode> form */
PARSE_OPT_NONEG | PARSE_OPT_LITERAL_ARGHELP,
(parse_opt_cb *) cacheinfo_callback},
- {OPTION_CALLBACK, 0, "chmod", &set_executable_bit, N_("(+/-)x"),
+ {OPTION_CALLBACK, 0, "chmod", &set_executable_bit, "(+|-)x",
N_("override the executable bit of the listed files"),
- PARSE_OPT_NONEG | PARSE_OPT_LITERAL_ARGHELP,
+ PARSE_OPT_NONEG,
chmod_callback},
{OPTION_SET_INT, 0, "assume-unchanged", &mark_valid_only, NULL,
N_("mark files as \"not changing\""),
diff --git a/builtin/upload-pack.c b/builtin/upload-pack.c
index decde5a..42dc4da 100644
--- a/builtin/upload-pack.c
+++ b/builtin/upload-pack.c
@@ -31,7 +31,7 @@ int cmd_upload_pack(int argc, const char **argv, const char *prefix)
};
packet_trace_identity("upload-pack");
- check_replace_refs = 0;
+ read_replace_refs = 0;
argc = parse_options(argc, argv, NULL, options, upload_pack_usage, 0);
diff --git a/builtin/verify-commit.c b/builtin/verify-commit.c
index f6922da..7772c07 100644
--- a/builtin/verify-commit.c
+++ b/builtin/verify-commit.c
@@ -9,6 +9,7 @@
#include "config.h"
#include "builtin.h"
#include "object-store.h"
+#include "repository.h"
#include "commit.h"
#include "run-command.h"
#include <signal.h>
@@ -27,7 +28,8 @@ static int run_gpg_verify(const struct object_id *oid, const char *buf, unsigned
memset(&signature_check, 0, sizeof(signature_check));
- ret = check_commit_signature(lookup_commit(oid), &signature_check);
+ ret = check_commit_signature(lookup_commit(the_repository, oid),
+ &signature_check);
print_signature_buffer(&signature_check, flags);
signature_check_clear(&signature_check);
diff --git a/builtin/worktree.c b/builtin/worktree.c
index 5c7d2bb..a763dbd 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -412,7 +412,7 @@ static const char *dwim_branch(const char *path, const char **new_branch)
if (guess_remote) {
struct object_id oid;
const char *remote =
- unique_tracking_name(*new_branch, &oid);
+ unique_tracking_name(*new_branch, &oid, NULL);
return remote;
}
return NULL;
@@ -484,7 +484,7 @@ static int add(int ac, const char **av, const char *prefix)
commit = lookup_commit_reference_by_name(branch);
if (!commit) {
- remote = unique_tracking_name(branch, &oid);
+ remote = unique_tracking_name(branch, &oid, NULL);
if (remote) {
new_branch = branch;
branch = remote;
diff --git a/builtin/write-tree.c b/builtin/write-tree.c
index c9d3c54..cdcbf82 100644
--- a/builtin/write-tree.c
+++ b/builtin/write-tree.c
@@ -24,9 +24,8 @@ int cmd_write_tree(int argc, const char **argv, const char *unused_prefix)
struct option write_tree_options[] = {
OPT_BIT(0, "missing-ok", &flags, N_("allow missing objects"),
WRITE_TREE_MISSING_OK),
- { OPTION_STRING, 0, "prefix", &prefix, N_("<prefix>/"),
- N_("write tree object for a subdirectory <prefix>") ,
- PARSE_OPT_LITERAL_ARGHELP },
+ OPT_STRING(0, "prefix", &prefix, N_("<prefix>/"),
+ N_("write tree object for a subdirectory <prefix>")),
{ OPTION_BIT, 0, "ignore-cache-tree", &flags, NULL,
N_("only useful for debugging"),
PARSE_OPT_HIDDEN | PARSE_OPT_NOARG, NULL,
diff --git a/bundle.c b/bundle.c
index ba18e25..24cbe40 100644
--- a/bundle.c
+++ b/bundle.c
@@ -2,6 +2,7 @@
#include "lockfile.h"
#include "bundle.h"
#include "object-store.h"
+#include "repository.h"
#include "object.h"
#include "commit.h"
#include "diff.h"
@@ -142,7 +143,7 @@ int verify_bundle(struct bundle_header *header, int verbose)
init_revisions(&revs, NULL);
for (i = 0; i < p->nr; i++) {
struct ref_list_entry *e = p->list + i;
- struct object *o = parse_object(&e->oid);
+ struct object *o = parse_object(the_repository, &e->oid);
if (o) {
o->flags |= PREREQ_MARK;
add_pending_object(&revs, o, e->name);
@@ -167,7 +168,7 @@ int verify_bundle(struct bundle_header *header, int verbose)
for (i = 0; i < p->nr; i++) {
struct ref_list_entry *e = p->list + i;
- struct object *o = parse_object(&e->oid);
+ struct object *o = parse_object(the_repository, &e->oid);
assert(o); /* otherwise we'd have returned early */
if (o->flags & SHOWN)
continue;
@@ -179,7 +180,7 @@ int verify_bundle(struct bundle_header *header, int verbose)
/* Clean up objects used, as they will be reused. */
for (i = 0; i < p->nr; i++) {
struct ref_list_entry *e = p->list + i;
- commit = lookup_commit_reference_gently(&e->oid, 1);
+ commit = lookup_commit_reference_gently(the_repository, &e->oid, 1);
if (commit)
clear_commit_marks(commit, ALL_REV_FLAGS);
}
@@ -374,7 +375,8 @@ static int write_bundle_refs(int bundle_fd, struct rev_info *revs)
* in terms of a tag (e.g. v2.0 from the range
* "v1.0..v2.0")?
*/
- struct commit *one = lookup_commit_reference(&oid);
+ struct commit *one = lookup_commit_reference(the_repository,
+ &oid);
struct object *obj;
if (e->item == &(one->object)) {
diff --git a/cache-tree.c b/cache-tree.c
index 6b46711..181d591 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -671,7 +671,8 @@ static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
cnt++;
else {
struct cache_tree_sub *sub;
- struct tree *subtree = lookup_tree(entry.oid);
+ struct tree *subtree = lookup_tree(the_repository,
+ entry.oid);
if (!subtree->object.parsed)
parse_tree(subtree);
sub = cache_tree_sub(it, entry.path);
diff --git a/cache-tree.h b/cache-tree.h
index cfd5328..9799e89 100644
--- a/cache-tree.h
+++ b/cache-tree.h
@@ -51,6 +51,6 @@ int write_index_as_tree(struct object_id *oid, struct index_state *index_state,
int write_cache_as_tree(struct object_id *oid, int flags, const char *prefix);
void prime_cache_tree(struct index_state *, struct tree *);
-extern int cache_tree_matches_traversal(struct cache_tree *, struct name_entry *ent, struct traverse_info *info);
+int cache_tree_matches_traversal(struct cache_tree *, struct name_entry *ent, struct traverse_info *info);
#endif
diff --git a/cache.h b/cache.h
index 8b44765..1398b2a 100644
--- a/cache.h
+++ b/cache.h
@@ -15,6 +15,7 @@
#include "path.h"
#include "sha1-array.h"
#include "repository.h"
+#include "mem-pool.h"
#include <zlib.h>
typedef struct git_zstream {
@@ -156,6 +157,7 @@ struct cache_entry {
struct stat_data ce_stat_data;
unsigned int ce_mode;
unsigned int ce_flags;
+ unsigned int mem_pool_allocated;
unsigned int ce_namelen;
unsigned int index; /* for link extension */
struct object_id oid;
@@ -218,6 +220,7 @@ struct cache_entry {
/* Forward structure decls */
struct pathspec;
struct child_process;
+struct tree;
/*
* Copy the sha1 and stat state of a cache entry from one to
@@ -227,6 +230,7 @@ static inline void copy_cache_entry(struct cache_entry *dst,
const struct cache_entry *src)
{
unsigned int state = dst->ce_flags & CE_HASHED;
+ int mem_pool_allocated = dst->mem_pool_allocated;
/* Don't copy hash chain and name */
memcpy(&dst->ce_stat_data, &src->ce_stat_data,
@@ -235,6 +239,9 @@ static inline void copy_cache_entry(struct cache_entry *dst,
/* Restore the hash state */
dst->ce_flags = (dst->ce_flags & ~CE_HASHED) | state;
+
+ /* Restore the mem_pool_allocated flag */
+ dst->mem_pool_allocated = mem_pool_allocated;
}
static inline unsigned create_ce_flags(unsigned stage)
@@ -328,6 +335,7 @@ struct index_state {
struct untracked_cache *untracked;
uint64_t fsmonitor_last_update;
struct ewah_bitmap *fsmonitor_dirty;
+ struct mem_pool *ce_mem_pool;
};
extern struct index_state the_index;
@@ -339,6 +347,60 @@ extern void remove_name_hash(struct index_state *istate, struct cache_entry *ce)
extern void free_name_hash(struct index_state *istate);
+/* Cache entry creation and cleanup */
+
+/*
+ * Create cache_entry intended for use in the specified index. Caller
+ * is responsible for discarding the cache_entry with
+ * `discard_cache_entry`.
+ */
+struct cache_entry *make_cache_entry(struct index_state *istate,
+ unsigned int mode,
+ const struct object_id *oid,
+ const char *path,
+ int stage,
+ unsigned int refresh_options);
+
+struct cache_entry *make_empty_cache_entry(struct index_state *istate,
+ size_t name_len);
+
+/*
+ * Create a cache_entry that is not intended to be added to an index.
+ * Caller is responsible for discarding the cache_entry
+ * with `discard_cache_entry`.
+ */
+struct cache_entry *make_transient_cache_entry(unsigned int mode,
+ const struct object_id *oid,
+ const char *path,
+ int stage);
+
+struct cache_entry *make_empty_transient_cache_entry(size_t name_len);
+
+/*
+ * Discard cache entry.
+ */
+void discard_cache_entry(struct cache_entry *ce);
+
+/*
+ * Check configuration if we should perform extra validation on cache
+ * entries.
+ */
+int should_validate_cache_entries(void);
+
+/*
+ * Duplicate a cache_entry. Allocate memory for the new entry from a
+ * memory_pool. Takes into account cache_entry fields that are meant
+ * for managing the underlying memory allocation of the cache_entry.
+ */
+struct cache_entry *dup_cache_entry(const struct cache_entry *ce, struct index_state *istate);
+
+/*
+ * Validate the cache entries in the index. This is an internal
+ * consistency check that the cache_entry structs are allocated from
+ * the expected memory pool.
+ */
+void validate_cache_entries(const struct index_state *istate);
+
#ifndef NO_THE_INDEX_COMPATIBILITY_MACROS
#define active_cache (the_index.cache)
#define active_nr (the_index.cache_nr)
@@ -635,12 +697,15 @@ extern void move_index_extensions(struct index_state *dst, struct index_state *s
extern int unmerged_index(const struct index_state *);
/**
- * Returns 1 if the index differs from HEAD, 0 otherwise. When on an unborn
- * branch, returns 1 if there are entries in the index, 0 otherwise. If an
- * strbuf is provided, the space-separated list of files that differ will be
- * appended to it.
+ * Returns 1 if istate differs from tree, 0 otherwise. If tree is NULL,
+ * compares istate to HEAD. If tree is NULL and on an unborn branch,
+ * returns 1 if there are entries in istate, 0 otherwise. If an strbuf is
+ * provided, the space-separated list of files that differ will be appended
+ * to it.
*/
-extern int index_has_changes(struct strbuf *sb);
+extern int index_has_changes(const struct index_state *istate,
+ struct tree *tree,
+ struct strbuf *sb);
extern int verify_path(const char *path, unsigned mode);
extern int strcmp_offset(const char *s1, const char *s2, size_t *first_change);
@@ -698,7 +763,6 @@ extern int remove_file_from_index(struct index_state *, const char *path);
extern int add_to_index(struct index_state *, const char *path, struct stat *, int flags);
extern int add_file_to_index(struct index_state *, const char *path, int flags);
-extern struct cache_entry *make_cache_entry(unsigned int mode, const unsigned char *sha1, const char *path, int stage, unsigned int refresh_options);
extern int chmod_index_entry(struct index_state *, struct cache_entry *ce, char flip);
extern int ce_same_name(const struct cache_entry *a, const struct cache_entry *b);
extern void set_object_name_for_intent_to_add_entry(struct cache_entry *ce);
@@ -751,7 +815,7 @@ extern void fill_stat_cache_info(struct cache_entry *ce, struct stat *st);
#define REFRESH_IGNORE_SUBMODULES 0x0010 /* ignore submodules */
#define REFRESH_IN_PORCELAIN 0x0020 /* user friendly output, not "needs update" */
extern int refresh_index(struct index_state *, unsigned int flags, const struct pathspec *pathspec, char *seen, const char *header_msg);
-extern struct cache_entry *refresh_cache_entry(struct cache_entry *, unsigned int);
+extern struct cache_entry *refresh_cache_entry(struct index_state *, struct cache_entry *, unsigned int);
/*
* Opportunistically update the index but do not complain if we can't.
@@ -804,16 +868,13 @@ void reset_shared_repository(void);
* Do replace refs need to be checked this run? This variable is
* initialized to true unless --no-replace-object is used or
* $GIT_NO_REPLACE_OBJECTS is set, but is set to false by some
- * commands that do not want replace references to be active. As an
- * optimization it is also set to false if replace references have
- * been sought but there were none.
+ * commands that do not want replace references to be active.
*/
-extern int check_replace_refs;
+extern int read_replace_refs;
extern char *git_replace_ref_base;
extern int fsync_object_files;
extern int core_preload_index;
-extern int core_commit_graph;
extern int core_apply_sparse_checkout;
extern int precomposed_unicode;
extern int protect_hfs;
@@ -972,7 +1033,7 @@ extern const struct object_id null_oid;
static inline int hashcmp(const unsigned char *sha1, const unsigned char *sha2)
{
- return memcmp(sha1, sha2, GIT_SHA1_RAWSZ);
+ return memcmp(sha1, sha2, the_hash_algo->rawsz);
}
static inline int oidcmp(const struct object_id *oid1, const struct object_id *oid2)
@@ -992,7 +1053,7 @@ static inline int is_null_oid(const struct object_id *oid)
static inline void hashcpy(unsigned char *sha_dst, const unsigned char *sha_src)
{
- memcpy(sha_dst, sha_src, GIT_SHA1_RAWSZ);
+ memcpy(sha_dst, sha_src, the_hash_algo->rawsz);
}
static inline void oidcpy(struct object_id *dst, const struct object_id *src)
@@ -1009,7 +1070,7 @@ static inline struct object_id *oiddup(const struct object_id *src)
static inline void hashclr(unsigned char *hash)
{
- memset(hash, 0, GIT_SHA1_RAWSZ);
+ memset(hash, 0, the_hash_algo->rawsz);
}
static inline void oidclr(struct object_id *oid)
@@ -1362,18 +1423,20 @@ extern void *read_object_with_reference(const struct object_id *oid,
extern struct object *peel_to_type(const char *name, int namelen,
struct object *o, enum object_type);
+enum date_mode_type {
+ DATE_NORMAL = 0,
+ DATE_RELATIVE,
+ DATE_SHORT,
+ DATE_ISO8601,
+ DATE_ISO8601_STRICT,
+ DATE_RFC2822,
+ DATE_STRFTIME,
+ DATE_RAW,
+ DATE_UNIX
+};
+
struct date_mode {
- enum date_mode_type {
- DATE_NORMAL = 0,
- DATE_RELATIVE,
- DATE_SHORT,
- DATE_ISO8601,
- DATE_ISO8601_STRICT,
- DATE_RFC2822,
- DATE_STRFTIME,
- DATE_RAW,
- DATE_UNIX
- } type;
+ enum date_mode_type type;
const char *strftime_fmt;
int local;
};
diff --git a/checkout.c b/checkout.c
index bdefc88..c72e9f9 100644
--- a/checkout.c
+++ b/checkout.c
@@ -2,14 +2,20 @@
#include "remote.h"
#include "refspec.h"
#include "checkout.h"
+#include "config.h"
struct tracking_name_data {
/* const */ char *src_ref;
char *dst_ref;
struct object_id *dst_oid;
- int unique;
+ int num_matches;
+ const char *default_remote;
+ char *default_dst_ref;
+ struct object_id *default_dst_oid;
};
+#define TRACKING_NAME_DATA_INIT { NULL, NULL, NULL, 0, NULL, NULL, NULL }
+
static int check_tracking_name(struct remote *remote, void *cb_data)
{
struct tracking_name_data *cb = cb_data;
@@ -21,24 +27,45 @@ static int check_tracking_name(struct remote *remote, void *cb_data)
free(query.dst);
return 0;
}
+ cb->num_matches++;
+ if (cb->default_remote && !strcmp(remote->name, cb->default_remote)) {
+ struct object_id *dst = xmalloc(sizeof(*cb->default_dst_oid));
+ cb->default_dst_ref = xstrdup(query.dst);
+ oidcpy(dst, cb->dst_oid);
+ cb->default_dst_oid = dst;
+ }
if (cb->dst_ref) {
free(query.dst);
- cb->unique = 0;
return 0;
}
cb->dst_ref = query.dst;
return 0;
}
-const char *unique_tracking_name(const char *name, struct object_id *oid)
+const char *unique_tracking_name(const char *name, struct object_id *oid,
+ int *dwim_remotes_matched)
{
- struct tracking_name_data cb_data = { NULL, NULL, NULL, 1 };
+ struct tracking_name_data cb_data = TRACKING_NAME_DATA_INIT;
+ const char *default_remote = NULL;
+ if (!git_config_get_string_const("checkout.defaultremote", &default_remote))
+ cb_data.default_remote = default_remote;
cb_data.src_ref = xstrfmt("refs/heads/%s", name);
cb_data.dst_oid = oid;
for_each_remote(check_tracking_name, &cb_data);
+ if (dwim_remotes_matched)
+ *dwim_remotes_matched = cb_data.num_matches;
free(cb_data.src_ref);
- if (cb_data.unique)
+ free((char *)default_remote);
+ if (cb_data.num_matches == 1) {
+ free(cb_data.default_dst_ref);
+ free(cb_data.default_dst_oid);
return cb_data.dst_ref;
+ }
free(cb_data.dst_ref);
+ if (cb_data.default_dst_ref) {
+ oidcpy(oid, cb_data.default_dst_oid);
+ free(cb_data.default_dst_oid);
+ return cb_data.default_dst_ref;
+ }
return NULL;
}
diff --git a/checkout.h b/checkout.h
index 9980711..6b20733 100644
--- a/checkout.h
+++ b/checkout.h
@@ -8,6 +8,8 @@
* tracking branch. Return the name of the remote if such a branch
* exists, NULL otherwise.
*/
-extern const char *unique_tracking_name(const char *name, struct object_id *oid);
+extern const char *unique_tracking_name(const char *name,
+ struct object_id *oid,
+ int *dwim_remotes_matched);
#endif /* CHECKOUT_H */
diff --git a/ci/lib-travisci.sh b/ci/lib-travisci.sh
index ceecc88..06970f7 100755
--- a/ci/lib-travisci.sh
+++ b/ci/lib-travisci.sh
@@ -97,7 +97,7 @@ fi
export DEVELOPER=1
export DEFAULT_TEST_TARGET=prove
export GIT_PROVE_OPTS="--timer --jobs 3 --state=failed,slow,save"
-export GIT_TEST_OPTS="--verbose-log -x"
+export GIT_TEST_OPTS="--verbose-log -x --immediate"
export GIT_TEST_CLONE_2GB=YesPlease
if [ "$jobname" = linux-gcc ]; then
export CC=gcc-8
diff --git a/ci/print-test-failures.sh b/ci/print-test-failures.sh
index 4f261dd..d55460a 100755
--- a/ci/print-test-failures.sh
+++ b/ci/print-test-failures.sh
@@ -8,13 +8,24 @@
# Tracing executed commands would produce too much noise in the loop below.
set +x
-if ! ls t/test-results/*.exit >/dev/null 2>/dev/null
+cd t/
+
+if ! ls test-results/*.exit >/dev/null 2>/dev/null
then
echo "Build job failed before the tests could have been run"
exit
fi
-for TEST_EXIT in t/test-results/*.exit
+case "$jobname" in
+osx-clang|osx-gcc)
+ # base64 in OSX doesn't wrap its output at 76 columns by
+ # default, but prints a single, very long line.
+ base64_opts="-b 76"
+ ;;
+esac
+
+combined_trash_size=0
+for TEST_EXIT in test-results/*.exit
do
if [ "$(cat "$TEST_EXIT")" != "0" ]
then
@@ -23,5 +34,45 @@ do
echo "$(tput setaf 1)${TEST_OUT}...$(tput sgr0)"
echo "------------------------------------------------------------------------"
cat "${TEST_OUT}"
+
+ test_name="${TEST_EXIT%.exit}"
+ test_name="${test_name##*/}"
+ trash_dir="trash directory.$test_name"
+ trash_tgz_b64="trash.$test_name.base64"
+ if [ -d "$trash_dir" ]
+ then
+ tar czp "$trash_dir" |base64 $base64_opts >"$trash_tgz_b64"
+
+ trash_size=$(wc -c <"$trash_tgz_b64")
+ if [ $trash_size -gt 1048576 ]
+ then
+ # larger than 1MB
+ echo "$(tput setaf 1)Didn't include the trash directory of '$test_name' in the trace log, it's too big$(tput sgr0)"
+ continue
+ fi
+
+ new_combined_trash_size=$(($combined_trash_size + $trash_size))
+ if [ $new_combined_trash_size -gt 1048576 ]
+ then
+ echo "$(tput setaf 1)Didn't include the trash directory of '$test_name' in the trace log, there is plenty of trash in there already.$(tput sgr0)"
+ continue
+ fi
+ combined_trash_size=$new_combined_trash_size
+
+ # DO NOT modify these two 'echo'-ed strings below
+ # without updating 'ci/util/extract-trash-dirs.sh'
+ # as well.
+ echo "$(tput setaf 1)Start of trash directory of '$test_name':$(tput sgr0)"
+ cat "$trash_tgz_b64"
+ echo "$(tput setaf 1)End of trash directory of '$test_name'$(tput sgr0)"
+ fi
fi
done
+
+if [ $combined_trash_size -gt 0 ]
+then
+ echo "------------------------------------------------------------------------"
+ echo "Trash directories embedded in this log can be extracted by running:"
+ echo
+ echo " curl https://api.travis-ci.org/v3/job/$TRAVIS_JOB_ID/log.txt |./ci/util/extract-trash-dirs.sh"
+fi
diff --git a/ci/run-static-analysis.sh b/ci/run-static-analysis.sh
index fe4ee4e..5688f26 100755
--- a/ci/run-static-analysis.sh
+++ b/ci/run-static-analysis.sh
@@ -5,6 +5,25 @@
. ${0%/*}/lib-travisci.sh
-make coccicheck
+make --jobs=2 coccicheck
+
+set +x
+
+fail=
+for cocci_patch in contrib/coccinelle/*.patch
+do
+ if test -s "$cocci_patch"
+ then
+ echo "$(tput setaf 1)Coccinelle suggests the following changes in '$cocci_patch':$(tput sgr0)"
+ cat "$cocci_patch"
+ fail=UnfortunatelyYes
+ fi
+done
+
+if test -n "$fail"
+then
+ echo "$(tput setaf 1)error: Coccinelle suggested some changes$(tput sgr0)"
+ exit 1
+fi
save_good_tree
diff --git a/ci/util/extract-trash-dirs.sh b/ci/util/extract-trash-dirs.sh
new file mode 100755
index 0000000..8e67bec
--- /dev/null
+++ b/ci/util/extract-trash-dirs.sh
@@ -0,0 +1,50 @@
+#!/bin/sh
+
+error () {
+ echo >&2 "error: $@"
+ exit 1
+}
+
+find_embedded_trash () {
+ while read -r line
+ do
+ case "$line" in
+ *Start\ of\ trash\ directory\ of\ \'t[0-9][0-9][0-9][0-9]-*\':*)
+ test_name="${line#*\'}"
+ test_name="${test_name%\'*}"
+
+ return 0
+ esac
+ done
+
+ return 1
+}
+
+extract_embedded_trash () {
+ while read -r line
+ do
+ case "$line" in
+ *End\ of\ trash\ directory\ of\ \'$test_name\'*)
+ return
+ ;;
+ *)
+ printf '%s\n' "$line"
+ ;;
+ esac
+ done
+
+ error "unexpected end of input"
+}
+
+# Raw logs from Linux build jobs have CRLF line endings, while OSX
+# build jobs mostly have CRCRLF, except an odd line every now and
+# then that has CRCRCRLF. 'base64 -d' from 'coreutils' doesn't like
+# CRs and complains about "invalid input", so remove all CRs at the
+# end of lines.
+sed -e 's/\r*$//' | \
+while find_embedded_trash
+do
+ echo "Extracting trash directory of '$test_name'"
+
+ extract_embedded_trash |base64 -d |tar xzp
+done
diff --git a/color.c b/color.c
index b1c24c6..ebb222e 100644
--- a/color.c
+++ b/color.c
@@ -343,6 +343,9 @@ int want_color_fd(int fd, int var)
static int want_auto[3] = { -1, -1, -1 };
+ if (fd < 1 || fd >= ARRAY_SIZE(want_auto))
+ BUG("file descriptor out of range: %d", fd);
+
if (var < 0)
var = git_use_color_default;
diff --git a/commit-graph.c b/commit-graph.c
index b63a1fc..0034740 100644
--- a/commit-graph.c
+++ b/commit-graph.c
@@ -7,10 +7,12 @@
#include "packfile.h"
#include "commit.h"
#include "object.h"
+#include "refs.h"
#include "revision.h"
#include "sha1-lookup.h"
#include "commit-graph.h"
#include "object-store.h"
+#include "alloc.h"
#define GRAPH_SIGNATURE 0x43475048 /* "CGPH" */
#define GRAPH_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */
@@ -35,10 +37,11 @@
#define GRAPH_LAST_EDGE 0x80000000
+#define GRAPH_HEADER_SIZE 8
#define GRAPH_FANOUT_SIZE (4 * 256)
#define GRAPH_CHUNKLOOKUP_WIDTH 12
-#define GRAPH_MIN_SIZE (5 * GRAPH_CHUNKLOOKUP_WIDTH + GRAPH_FANOUT_SIZE + \
- GRAPH_OID_LEN + 8)
+#define GRAPH_MIN_SIZE (GRAPH_HEADER_SIZE + 4 * GRAPH_CHUNKLOOKUP_WIDTH \
+ + GRAPH_FANOUT_SIZE + GRAPH_OID_LEN)
char *get_commit_graph_filename(const char *obj_dir)
{
@@ -77,28 +80,28 @@ struct commit_graph *load_commit_graph_one(const char *graph_file)
if (graph_size < GRAPH_MIN_SIZE) {
close(fd);
- die("graph file %s is too small", graph_file);
+ die(_("graph file %s is too small"), graph_file);
}
graph_map = xmmap(NULL, graph_size, PROT_READ, MAP_PRIVATE, fd, 0);
data = (const unsigned char *)graph_map;
graph_signature = get_be32(data);
if (graph_signature != GRAPH_SIGNATURE) {
- error("graph signature %X does not match signature %X",
+ error(_("graph signature %X does not match signature %X"),
graph_signature, GRAPH_SIGNATURE);
goto cleanup_fail;
}
graph_version = *(unsigned char*)(data + 4);
if (graph_version != GRAPH_VERSION) {
- error("graph version %X does not match version %X",
+ error(_("graph version %X does not match version %X"),
graph_version, GRAPH_VERSION);
goto cleanup_fail;
}
hash_version = *(unsigned char*)(data + 5);
if (hash_version != GRAPH_OID_VERSION) {
- error("hash version %X does not match version %X",
+ error(_("hash version %X does not match version %X"),
hash_version, GRAPH_OID_VERSION);
goto cleanup_fail;
}
@@ -122,7 +125,7 @@ struct commit_graph *load_commit_graph_one(const char *graph_file)
chunk_lookup += GRAPH_CHUNKLOOKUP_WIDTH;
if (chunk_offset > graph_size - GIT_MAX_RAWSZ) {
- error("improper chunk offset %08x%08x", (uint32_t)(chunk_offset >> 32),
+ error(_("improper chunk offset %08x%08x"), (uint32_t)(chunk_offset >> 32),
(uint32_t)chunk_offset);
goto cleanup_fail;
}
@@ -158,7 +161,7 @@ struct commit_graph *load_commit_graph_one(const char *graph_file)
}
if (chunk_repeated) {
- error("chunk id %08x appears multiple times", chunk_id);
+ error(_("chunk id %08x appears multiple times"), chunk_id);
goto cleanup_fail;
}
@@ -180,53 +183,60 @@ cleanup_fail:
exit(1);
}
-/* global storage */
-static struct commit_graph *commit_graph = NULL;
-
-static void prepare_commit_graph_one(const char *obj_dir)
+static void prepare_commit_graph_one(struct repository *r, const char *obj_dir)
{
char *graph_name;
- if (commit_graph)
+ if (r->objects->commit_graph)
return;
graph_name = get_commit_graph_filename(obj_dir);
- commit_graph = load_commit_graph_one(graph_name);
+ r->objects->commit_graph =
+ load_commit_graph_one(graph_name);
FREE_AND_NULL(graph_name);
}
-static int prepare_commit_graph_run_once = 0;
-static void prepare_commit_graph(void)
+/*
+ * Return 1 if commit_graph is non-NULL, and 0 otherwise.
+ *
+ * On the first invocation, this function attemps to load the commit
+ * graph if the_repository is configured to have one.
+ */
+static int prepare_commit_graph(struct repository *r)
{
struct alternate_object_database *alt;
char *obj_dir;
+ int config_value;
+
+ if (r->objects->commit_graph_attempted)
+ return !!r->objects->commit_graph;
+ r->objects->commit_graph_attempted = 1;
+
+ if (repo_config_get_bool(r, "core.commitgraph", &config_value) ||
+ !config_value)
+ /*
+ * This repository is not configured to use commit graphs, so
+ * do not load one. (But report commit_graph_attempted anyway
+ * so that commit graph loading is not attempted again for this
+ * repository.)
+ */
+ return 0;
- if (prepare_commit_graph_run_once)
- return;
- prepare_commit_graph_run_once = 1;
-
- obj_dir = get_object_directory();
- prepare_commit_graph_one(obj_dir);
- prepare_alt_odb(the_repository);
- for (alt = the_repository->objects->alt_odb_list;
- !commit_graph && alt;
+ obj_dir = r->objects->objectdir;
+ prepare_commit_graph_one(r, obj_dir);
+ prepare_alt_odb(r);
+ for (alt = r->objects->alt_odb_list;
+ !r->objects->commit_graph && alt;
alt = alt->next)
- prepare_commit_graph_one(alt->path);
+ prepare_commit_graph_one(r, alt->path);
+ return !!r->objects->commit_graph;
}
static void close_commit_graph(void)
{
- if (!commit_graph)
- return;
-
- if (commit_graph->graph_fd >= 0) {
- munmap((void *)commit_graph->data, commit_graph->data_len);
- commit_graph->data = NULL;
- close(commit_graph->graph_fd);
- }
-
- FREE_AND_NULL(commit_graph);
+ free_commit_graph(the_repository->objects->commit_graph);
+ the_repository->objects->commit_graph = NULL;
}
static int bsearch_graph(struct commit_graph *g, struct object_id *oid, uint32_t *pos)
@@ -241,10 +251,14 @@ static struct commit_list **insert_parent_or_die(struct commit_graph *g,
{
struct commit *c;
struct object_id oid;
+
+ if (pos >= g->num_commits)
+ die("invalid parent position %"PRIu64, pos);
+
hashcpy(oid.hash, g->chunk_oid_lookup + g->hash_len * pos);
- c = lookup_commit(&oid);
+ c = lookup_commit(the_repository, &oid);
if (!c)
- die("could not find commit %s", oid_to_hex(&oid));
+ die(_("could not find commit %s"), oid_to_hex(&oid));
c->graph_pos = pos;
return &commit_list_insert(c, pptr)->next;
}
@@ -313,28 +327,33 @@ static int find_commit_in_graph(struct commit *item, struct commit_graph *g, uin
}
}
-int parse_commit_in_graph(struct commit *item)
+static int parse_commit_in_graph_one(struct commit_graph *g, struct commit *item)
{
uint32_t pos;
- if (!core_commit_graph)
- return 0;
if (item->object.parsed)
return 1;
- prepare_commit_graph();
- if (commit_graph && find_commit_in_graph(item, commit_graph, &pos))
- return fill_commit_in_graph(item, commit_graph, pos);
+
+ if (find_commit_in_graph(item, g, &pos))
+ return fill_commit_in_graph(item, g, pos);
+
return 0;
}
-void load_commit_graph_info(struct commit *item)
+int parse_commit_in_graph(struct repository *r, struct commit *item)
+{
+ if (!prepare_commit_graph(r))
+ return 0;
+ return parse_commit_in_graph_one(r->objects->commit_graph, item);
+}
+
+void load_commit_graph_info(struct repository *r, struct commit *item)
{
uint32_t pos;
- if (!core_commit_graph)
+ if (!prepare_commit_graph(r))
return;
- prepare_commit_graph();
- if (commit_graph && find_commit_in_graph(item, commit_graph, &pos))
- fill_commit_graph_info(item, commit_graph, pos);
+ if (find_commit_in_graph(item, r->objects->commit_graph, &pos))
+ fill_commit_graph_info(item, r->objects->commit_graph, pos);
}
static struct tree *load_tree_for_commit(struct commit_graph *g, struct commit *c)
@@ -344,19 +363,25 @@ static struct tree *load_tree_for_commit(struct commit_graph *g, struct commit *
GRAPH_DATA_WIDTH * (c->graph_pos);
hashcpy(oid.hash, commit_data);
- c->maybe_tree = lookup_tree(&oid);
+ c->maybe_tree = lookup_tree(the_repository, &oid);
return c->maybe_tree;
}
-struct tree *get_commit_tree_in_graph(const struct commit *c)
+static struct tree *get_commit_tree_in_graph_one(struct commit_graph *g,
+ const struct commit *c)
{
if (c->maybe_tree)
return c->maybe_tree;
if (c->graph_pos == COMMIT_NOT_FROM_GRAPH)
- BUG("get_commit_tree_in_graph called from non-commit-graph commit");
+ BUG("get_commit_tree_in_graph_one called from non-commit-graph commit");
- return load_tree_for_commit(commit_graph, (struct commit *)c);
+ return load_tree_for_commit(g, (struct commit *)c);
+}
+
+struct tree *get_commit_tree_in_graph(struct repository *r, const struct commit *c)
+{
+ return get_commit_tree_in_graph_one(r->objects->commit_graph, c);
}
static void write_graph_chunk_fanout(struct hashfile *f,
@@ -537,7 +562,7 @@ static int add_packed_commits(const struct object_id *oid,
oi.typep = &type;
if (packed_object_info(the_repository, pack, offset, &oi) < 0)
- die("unable to get type of object %s", oid_to_hex(oid));
+ die(_("unable to get type of object %s"), oid_to_hex(oid));
if (type != OBJ_COMMIT)
return 0;
@@ -568,7 +593,7 @@ static void close_reachable(struct packed_oid_list *oids)
struct commit *commit;
for (i = 0; i < oids->nr; i++) {
- commit = lookup_commit(&oids->list[i]);
+ commit = lookup_commit(the_repository, &oids->list[i]);
if (commit)
commit->object.flags |= UNINTERESTING;
}
@@ -579,14 +604,14 @@ static void close_reachable(struct packed_oid_list *oids)
* closure.
*/
for (i = 0; i < oids->nr; i++) {
- commit = lookup_commit(&oids->list[i]);
+ commit = lookup_commit(the_repository, &oids->list[i]);
if (commit && !parse_commit(commit))
add_missing_parents(oids, commit);
}
for (i = 0; i < oids->nr; i++) {
- commit = lookup_commit(&oids->list[i]);
+ commit = lookup_commit(the_repository, &oids->list[i]);
if (commit)
commit->object.flags &= ~UNINTERESTING;
@@ -632,11 +657,28 @@ static void compute_generation_numbers(struct packed_commit_list* commits)
}
}
+static int add_ref_to_list(const char *refname,
+ const struct object_id *oid,
+ int flags, void *cb_data)
+{
+ struct string_list *list = (struct string_list *)cb_data;
+
+ string_list_append(list, oid_to_hex(oid));
+ return 0;
+}
+
+void write_commit_graph_reachable(const char *obj_dir, int append)
+{
+ struct string_list list;
+
+ string_list_init(&list, 1);
+ for_each_ref(add_ref_to_list, &list);
+ write_commit_graph(obj_dir, NULL, &list, append);
+}
+
void write_commit_graph(const char *obj_dir,
- const char **pack_indexes,
- int nr_packs,
- const char **commit_hex,
- int nr_commits,
+ struct string_list *pack_indexes,
+ struct string_list *commit_hex,
int append)
{
struct packed_oid_list oids;
@@ -655,16 +697,18 @@ void write_commit_graph(const char *obj_dir,
oids.alloc = approximate_object_count() / 4;
if (append) {
- prepare_commit_graph_one(obj_dir);
- if (commit_graph)
- oids.alloc += commit_graph->num_commits;
+ prepare_commit_graph_one(the_repository, obj_dir);
+ if (the_repository->objects->commit_graph)
+ oids.alloc += the_repository->objects->commit_graph->num_commits;
}
if (oids.alloc < 1024)
oids.alloc = 1024;
ALLOC_ARRAY(oids.list, oids.alloc);
- if (append && commit_graph) {
+ if (append && the_repository->objects->commit_graph) {
+ struct commit_graph *commit_graph =
+ the_repository->objects->commit_graph;
for (i = 0; i < commit_graph->num_commits; i++) {
const unsigned char *hash = commit_graph->chunk_oid_lookup +
commit_graph->hash_len * i;
@@ -677,15 +721,15 @@ void write_commit_graph(const char *obj_dir,
int dirlen;
strbuf_addf(&packname, "%s/pack/", obj_dir);
dirlen = packname.len;
- for (i = 0; i < nr_packs; i++) {
+ for (i = 0; i < pack_indexes->nr; i++) {
struct packed_git *p;
strbuf_setlen(&packname, dirlen);
- strbuf_addstr(&packname, pack_indexes[i]);
+ strbuf_addstr(&packname, pack_indexes->items[i].string);
p = add_packed_git(packname.buf, packname.len, 1);
if (!p)
- die("error adding pack %s", packname.buf);
+ die(_("error adding pack %s"), packname.buf);
if (open_pack_index(p))
- die("error opening index for %s", packname.buf);
+ die(_("error opening index for %s"), packname.buf);
for_each_object_in_pack(p, add_packed_commits, &oids);
close_pack(p);
}
@@ -693,15 +737,16 @@ void write_commit_graph(const char *obj_dir,
}
if (commit_hex) {
- for (i = 0; i < nr_commits; i++) {
+ for (i = 0; i < commit_hex->nr; i++) {
const char *end;
struct object_id oid;
struct commit *result;
- if (commit_hex[i] && parse_oid_hex(commit_hex[i], &oid, &end))
+ if (commit_hex->items[i].string &&
+ parse_oid_hex(commit_hex->items[i].string, &oid, &end))
continue;
- result = lookup_commit_reference_gently(&oid, 1);
+ result = lookup_commit_reference_gently(the_repository, &oid, 1);
if (result) {
ALLOC_GROW(oids.list, oids.nr + 1, oids.alloc);
@@ -737,7 +782,7 @@ void write_commit_graph(const char *obj_dir,
if (i > 0 && !oidcmp(&oids.list[i-1], &oids.list[i]))
continue;
- commits.list[commits.nr] = lookup_commit(&oids.list[i]);
+ commits.list[commits.nr] = lookup_commit(the_repository, &oids.list[i]);
parse_commit(commits.list[commits.nr]);
for (parent = commits.list[commits.nr]->parents;
@@ -808,3 +853,191 @@ void write_commit_graph(const char *obj_dir,
oids.alloc = 0;
oids.nr = 0;
}
+
+#define VERIFY_COMMIT_GRAPH_ERROR_HASH 2
+static int verify_commit_graph_error;
+
+static void graph_report(const char *fmt, ...)
+{
+ va_list ap;
+
+ verify_commit_graph_error = 1;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ fprintf(stderr, "\n");
+ va_end(ap);
+}
+
+#define GENERATION_ZERO_EXISTS 1
+#define GENERATION_NUMBER_EXISTS 2
+
+int verify_commit_graph(struct repository *r, struct commit_graph *g)
+{
+ uint32_t i, cur_fanout_pos = 0;
+ struct object_id prev_oid, cur_oid, checksum;
+ int generation_zero = 0;
+ struct hashfile *f;
+ int devnull;
+
+ if (!g) {
+ graph_report("no commit-graph file loaded");
+ return 1;
+ }
+
+ verify_commit_graph_error = 0;
+
+ if (!g->chunk_oid_fanout)
+ graph_report("commit-graph is missing the OID Fanout chunk");
+ if (!g->chunk_oid_lookup)
+ graph_report("commit-graph is missing the OID Lookup chunk");
+ if (!g->chunk_commit_data)
+ graph_report("commit-graph is missing the Commit Data chunk");
+
+ if (verify_commit_graph_error)
+ return verify_commit_graph_error;
+
+ devnull = open("/dev/null", O_WRONLY);
+ f = hashfd(devnull, NULL);
+ hashwrite(f, g->data, g->data_len - g->hash_len);
+ finalize_hashfile(f, checksum.hash, CSUM_CLOSE);
+ if (hashcmp(checksum.hash, g->data + g->data_len - g->hash_len)) {
+ graph_report(_("the commit-graph file has incorrect checksum and is likely corrupt"));
+ verify_commit_graph_error = VERIFY_COMMIT_GRAPH_ERROR_HASH;
+ }
+
+ for (i = 0; i < g->num_commits; i++) {
+ struct commit *graph_commit;
+
+ hashcpy(cur_oid.hash, g->chunk_oid_lookup + g->hash_len * i);
+
+ if (i && oidcmp(&prev_oid, &cur_oid) >= 0)
+ graph_report("commit-graph has incorrect OID order: %s then %s",
+ oid_to_hex(&prev_oid),
+ oid_to_hex(&cur_oid));
+
+ oidcpy(&prev_oid, &cur_oid);
+
+ while (cur_oid.hash[0] > cur_fanout_pos) {
+ uint32_t fanout_value = get_be32(g->chunk_oid_fanout + cur_fanout_pos);
+
+ if (i != fanout_value)
+ graph_report("commit-graph has incorrect fanout value: fanout[%d] = %u != %u",
+ cur_fanout_pos, fanout_value, i);
+ cur_fanout_pos++;
+ }
+
+ graph_commit = lookup_commit(r, &cur_oid);
+ if (!parse_commit_in_graph_one(g, graph_commit))
+ graph_report("failed to parse %s from commit-graph",
+ oid_to_hex(&cur_oid));
+ }
+
+ while (cur_fanout_pos < 256) {
+ uint32_t fanout_value = get_be32(g->chunk_oid_fanout + cur_fanout_pos);
+
+ if (g->num_commits != fanout_value)
+ graph_report("commit-graph has incorrect fanout value: fanout[%d] = %u != %u",
+ cur_fanout_pos, fanout_value, i);
+
+ cur_fanout_pos++;
+ }
+
+ if (verify_commit_graph_error & ~VERIFY_COMMIT_GRAPH_ERROR_HASH)
+ return verify_commit_graph_error;
+
+ for (i = 0; i < g->num_commits; i++) {
+ struct commit *graph_commit, *odb_commit;
+ struct commit_list *graph_parents, *odb_parents;
+ uint32_t max_generation = 0;
+
+ hashcpy(cur_oid.hash, g->chunk_oid_lookup + g->hash_len * i);
+
+ graph_commit = lookup_commit(r, &cur_oid);
+ odb_commit = (struct commit *)create_object(r, cur_oid.hash, alloc_commit_node(r));
+ if (parse_commit_internal(odb_commit, 0, 0)) {
+ graph_report("failed to parse %s from object database",
+ oid_to_hex(&cur_oid));
+ continue;
+ }
+
+ if (oidcmp(&get_commit_tree_in_graph_one(g, graph_commit)->object.oid,
+ get_commit_tree_oid(odb_commit)))
+ graph_report("root tree OID for commit %s in commit-graph is %s != %s",
+ oid_to_hex(&cur_oid),
+ oid_to_hex(get_commit_tree_oid(graph_commit)),
+ oid_to_hex(get_commit_tree_oid(odb_commit)));
+
+ graph_parents = graph_commit->parents;
+ odb_parents = odb_commit->parents;
+
+ while (graph_parents) {
+ if (odb_parents == NULL) {
+ graph_report("commit-graph parent list for commit %s is too long",
+ oid_to_hex(&cur_oid));
+ break;
+ }
+
+ if (oidcmp(&graph_parents->item->object.oid, &odb_parents->item->object.oid))
+ graph_report("commit-graph parent for %s is %s != %s",
+ oid_to_hex(&cur_oid),
+ oid_to_hex(&graph_parents->item->object.oid),
+ oid_to_hex(&odb_parents->item->object.oid));
+
+ if (graph_parents->item->generation > max_generation)
+ max_generation = graph_parents->item->generation;
+
+ graph_parents = graph_parents->next;
+ odb_parents = odb_parents->next;
+ }
+
+ if (odb_parents != NULL)
+ graph_report("commit-graph parent list for commit %s terminates early",
+ oid_to_hex(&cur_oid));
+
+ if (!graph_commit->generation) {
+ if (generation_zero == GENERATION_NUMBER_EXISTS)
+ graph_report("commit-graph has generation number zero for commit %s, but non-zero elsewhere",
+ oid_to_hex(&cur_oid));
+ generation_zero = GENERATION_ZERO_EXISTS;
+ } else if (generation_zero == GENERATION_ZERO_EXISTS)
+ graph_report("commit-graph has non-zero generation number for commit %s, but zero elsewhere",
+ oid_to_hex(&cur_oid));
+
+ if (generation_zero == GENERATION_ZERO_EXISTS)
+ continue;
+
+ /*
+ * If one of our parents has generation GENERATION_NUMBER_MAX, then
+ * our generation is also GENERATION_NUMBER_MAX. Decrement to avoid
+ * extra logic in the following condition.
+ */
+ if (max_generation == GENERATION_NUMBER_MAX)
+ max_generation--;
+
+ if (graph_commit->generation != max_generation + 1)
+ graph_report("commit-graph generation for commit %s is %u != %u",
+ oid_to_hex(&cur_oid),
+ graph_commit->generation,
+ max_generation + 1);
+
+ if (graph_commit->date != odb_commit->date)
+ graph_report("commit date for commit %s in commit-graph is %"PRItime" != %"PRItime,
+ oid_to_hex(&cur_oid),
+ graph_commit->date,
+ odb_commit->date);
+ }
+
+ return verify_commit_graph_error;
+}
+
+void free_commit_graph(struct commit_graph *g)
+{
+ if (!g)
+ return;
+ if (g->graph_fd >= 0) {
+ munmap((void *)g->data, g->data_len);
+ g->data = NULL;
+ close(g->graph_fd);
+ }
+ free(g);
+}
diff --git a/commit-graph.h b/commit-graph.h
index 96cccb1..76e0989 100644
--- a/commit-graph.h
+++ b/commit-graph.h
@@ -2,6 +2,10 @@
#define COMMIT_GRAPH_H
#include "git-compat-util.h"
+#include "repository.h"
+#include "string-list.h"
+
+struct commit;
char *get_commit_graph_filename(const char *obj_dir);
@@ -15,7 +19,7 @@ char *get_commit_graph_filename(const char *obj_dir);
*
* See parse_commit_buffer() for the fallback after this call.
*/
-int parse_commit_in_graph(struct commit *item);
+int parse_commit_in_graph(struct repository *r, struct commit *item);
/*
* It is possible that we loaded commit contents from the commit buffer,
@@ -23,9 +27,10 @@ int parse_commit_in_graph(struct commit *item);
* checked and filled. Fill the graph_pos and generation members of
* the given commit.
*/
-void load_commit_graph_info(struct commit *item);
+void load_commit_graph_info(struct repository *r, struct commit *item);
-struct tree *get_commit_tree_in_graph(const struct commit *c);
+struct tree *get_commit_tree_in_graph(struct repository *r,
+ const struct commit *c);
struct commit_graph {
int graph_fd;
@@ -46,11 +51,14 @@ struct commit_graph {
struct commit_graph *load_commit_graph_one(const char *graph_file);
+void write_commit_graph_reachable(const char *obj_dir, int append);
void write_commit_graph(const char *obj_dir,
- const char **pack_indexes,
- int nr_packs,
- const char **commit_hex,
- int nr_commits,
+ struct string_list *pack_indexes,
+ struct string_list *commit_hex,
int append);
+int verify_commit_graph(struct repository *r, struct commit_graph *g);
+
+void free_commit_graph(struct commit_graph *);
+
#endif
diff --git a/commit-slab-impl.h b/commit-slab-impl.h
index 87a9cad..ac1e6d4 100644
--- a/commit-slab-impl.h
+++ b/commit-slab-impl.h
@@ -11,8 +11,6 @@
#define implement_commit_slab(slabname, elemtype, scope) \
\
-static int stat_ ##slabname## realloc; \
- \
scope void init_ ##slabname## _with_stride(struct slabname *s, \
unsigned stride) \
{ \
@@ -54,7 +52,6 @@ scope elemtype *slabname## _at_peek(struct slabname *s, \
if (!add_if_missing) \
return NULL; \
REALLOC_ARRAY(s->slab, nth_slab + 1); \
- stat_ ##slabname## realloc++; \
for (i = s->slab_count; i <= nth_slab; i++) \
s->slab[i] = NULL; \
s->slab_count = nth_slab + 1; \
diff --git a/commit.c b/commit.c
index a7c0b5f..30d1af2 100644
--- a/commit.c
+++ b/commit.c
@@ -24,24 +24,26 @@ int save_commit_buffer = 1;
const char *commit_type = "commit";
-struct commit *lookup_commit_reference_gently(const struct object_id *oid,
- int quiet)
+struct commit *lookup_commit_reference_gently(struct repository *r,
+ const struct object_id *oid, int quiet)
{
- struct object *obj = deref_tag(parse_object(oid), NULL, 0);
+ struct object *obj = deref_tag(r,
+ parse_object(r, oid),
+ NULL, 0);
if (!obj)
return NULL;
- return object_as_type(obj, OBJ_COMMIT, quiet);
+ return object_as_type(r, obj, OBJ_COMMIT, quiet);
}
-struct commit *lookup_commit_reference(const struct object_id *oid)
+struct commit *lookup_commit_reference(struct repository *r, const struct object_id *oid)
{
- return lookup_commit_reference_gently(oid, 0);
+ return lookup_commit_reference_gently(r, oid, 0);
}
struct commit *lookup_commit_or_die(const struct object_id *oid, const char *ref_name)
{
- struct commit *c = lookup_commit_reference(oid);
+ struct commit *c = lookup_commit_reference(the_repository, oid);
if (!c)
die(_("could not parse %s"), ref_name);
if (oidcmp(oid, &c->object.oid)) {
@@ -51,13 +53,13 @@ struct commit *lookup_commit_or_die(const struct object_id *oid, const char *ref
return c;
}
-struct commit *lookup_commit(const struct object_id *oid)
+struct commit *lookup_commit(struct repository *r, const struct object_id *oid)
{
- struct object *obj = lookup_object(oid->hash);
+ struct object *obj = lookup_object(r, oid->hash);
if (!obj)
- return create_object(the_repository, oid->hash,
- alloc_commit_node(the_repository));
- return object_as_type(obj, OBJ_COMMIT, 0);
+ return create_object(r, oid->hash,
+ alloc_commit_node(r));
+ return object_as_type(r, obj, OBJ_COMMIT, 0);
}
struct commit *lookup_commit_reference_by_name(const char *name)
@@ -67,7 +69,7 @@ struct commit *lookup_commit_reference_by_name(const char *name)
if (get_oid_committish(name, &oid))
return NULL;
- commit = lookup_commit_reference(&oid);
+ commit = lookup_commit_reference(the_repository, &oid);
if (parse_commit(commit))
return NULL;
return commit;
@@ -259,18 +261,32 @@ struct commit_buffer {
unsigned long size;
};
define_commit_slab(buffer_slab, struct commit_buffer);
-static struct buffer_slab buffer_slab = COMMIT_SLAB_INIT(1, buffer_slab);
-void set_commit_buffer(struct commit *commit, void *buffer, unsigned long size)
+struct buffer_slab *allocate_commit_buffer_slab(void)
{
- struct commit_buffer *v = buffer_slab_at(&buffer_slab, commit);
+ struct buffer_slab *bs = xmalloc(sizeof(*bs));
+ init_buffer_slab(bs);
+ return bs;
+}
+
+void free_commit_buffer_slab(struct buffer_slab *bs)
+{
+ clear_buffer_slab(bs);
+ free(bs);
+}
+
+void set_commit_buffer(struct repository *r, struct commit *commit, void *buffer, unsigned long size)
+{
+ struct commit_buffer *v = buffer_slab_at(
+ r->parsed_objects->buffer_slab, commit);
v->buffer = buffer;
v->size = size;
}
-const void *get_cached_commit_buffer(const struct commit *commit, unsigned long *sizep)
+const void *get_cached_commit_buffer(struct repository *r, const struct commit *commit, unsigned long *sizep)
{
- struct commit_buffer *v = buffer_slab_peek(&buffer_slab, commit);
+ struct commit_buffer *v = buffer_slab_peek(
+ r->parsed_objects->buffer_slab, commit);
if (!v) {
if (sizep)
*sizep = 0;
@@ -283,7 +299,7 @@ const void *get_cached_commit_buffer(const struct commit *commit, unsigned long
const void *get_commit_buffer(const struct commit *commit, unsigned long *sizep)
{
- const void *ret = get_cached_commit_buffer(commit, sizep);
+ const void *ret = get_cached_commit_buffer(the_repository, commit, sizep);
if (!ret) {
enum object_type type;
unsigned long size;
@@ -302,14 +318,16 @@ const void *get_commit_buffer(const struct commit *commit, unsigned long *sizep)
void unuse_commit_buffer(const struct commit *commit, const void *buffer)
{
- struct commit_buffer *v = buffer_slab_peek(&buffer_slab, commit);
+ struct commit_buffer *v = buffer_slab_peek(
+ the_repository->parsed_objects->buffer_slab, commit);
if (!(v && v->buffer == buffer))
free((void *)buffer);
}
void free_commit_buffer(struct commit *commit)
{
- struct commit_buffer *v = buffer_slab_peek(&buffer_slab, commit);
+ struct commit_buffer *v = buffer_slab_peek(
+ the_repository->parsed_objects->buffer_slab, commit);
if (v) {
FREE_AND_NULL(v->buffer);
v->size = 0;
@@ -324,7 +342,7 @@ struct tree *get_commit_tree(const struct commit *commit)
if (commit->graph_pos == COMMIT_NOT_FROM_GRAPH)
BUG("commit has NULL tree, but was not loaded from commit-graph");
- return get_commit_tree_in_graph(commit);
+ return get_commit_tree_in_graph(the_repository, commit);
}
struct object_id *get_commit_tree_oid(const struct commit *commit)
@@ -345,7 +363,8 @@ void release_commit_memory(struct commit *c)
const void *detach_commit_buffer(struct commit *commit, unsigned long *sizep)
{
- struct commit_buffer *v = buffer_slab_peek(&buffer_slab, commit);
+ struct commit_buffer *v = buffer_slab_peek(
+ the_repository->parsed_objects->buffer_slab, commit);
void *ret;
if (!v) {
@@ -362,15 +381,15 @@ const void *detach_commit_buffer(struct commit *commit, unsigned long *sizep)
return ret;
}
-int parse_commit_buffer(struct commit *item, const void *buffer, unsigned long size, int check_graph)
+int parse_commit_buffer(struct repository *r, struct commit *item, const void *buffer, unsigned long size, int check_graph)
{
const char *tail = buffer;
const char *bufptr = buffer;
struct object_id parent;
struct commit_list **pptr;
struct commit_graft *graft;
- const int tree_entry_len = GIT_SHA1_HEXSZ + 5;
- const int parent_entry_len = GIT_SHA1_HEXSZ + 7;
+ const int tree_entry_len = the_hash_algo->hexsz + 5;
+ const int parent_entry_len = the_hash_algo->hexsz + 7;
if (item->object.parsed)
return 0;
@@ -382,11 +401,11 @@ int parse_commit_buffer(struct commit *item, const void *buffer, unsigned long s
if (get_oid_hex(bufptr + 5, &parent) < 0)
return error("bad tree pointer in commit %s",
oid_to_hex(&item->object.oid));
- item->maybe_tree = lookup_tree(&parent);
+ item->maybe_tree = lookup_tree(r, &parent);
bufptr += tree_entry_len + 1; /* "tree " + "hex sha1" + "\n" */
pptr = &item->parents;
- graft = lookup_commit_graft(the_repository, &item->object.oid);
+ graft = lookup_commit_graft(r, &item->object.oid);
while (bufptr + parent_entry_len < tail && !memcmp(bufptr, "parent ", 7)) {
struct commit *new_parent;
@@ -401,7 +420,7 @@ int parse_commit_buffer(struct commit *item, const void *buffer, unsigned long s
*/
if (graft && (graft->nr_parent < 0 || grafts_replace_parents))
continue;
- new_parent = lookup_commit(&parent);
+ new_parent = lookup_commit(r, &parent);
if (new_parent)
pptr = &commit_list_insert(new_parent, pptr)->next;
}
@@ -409,7 +428,8 @@ int parse_commit_buffer(struct commit *item, const void *buffer, unsigned long s
int i;
struct commit *new_parent;
for (i = 0; i < graft->nr_parent; i++) {
- new_parent = lookup_commit(&graft->parent[i]);
+ new_parent = lookup_commit(r,
+ &graft->parent[i]);
if (!new_parent)
continue;
pptr = &commit_list_insert(new_parent, pptr)->next;
@@ -418,12 +438,12 @@ int parse_commit_buffer(struct commit *item, const void *buffer, unsigned long s
item->date = parse_commit_date(bufptr, tail);
if (check_graph)
- load_commit_graph_info(item);
+ load_commit_graph_info(the_repository, item);
return 0;
}
-int parse_commit_gently(struct commit *item, int quiet_on_missing)
+int parse_commit_internal(struct commit *item, int quiet_on_missing, int use_commit_graph)
{
enum object_type type;
void *buffer;
@@ -434,7 +454,7 @@ int parse_commit_gently(struct commit *item, int quiet_on_missing)
return -1;
if (item->object.parsed)
return 0;
- if (parse_commit_in_graph(item))
+ if (use_commit_graph && parse_commit_in_graph(the_repository, item))
return 0;
buffer = read_object_file(&item->object.oid, &type, &size);
if (!buffer)
@@ -446,15 +466,21 @@ int parse_commit_gently(struct commit *item, int quiet_on_missing)
return error("Object %s not a commit",
oid_to_hex(&item->object.oid));
}
- ret = parse_commit_buffer(item, buffer, size, 0);
+
+ ret = parse_commit_buffer(the_repository, item, buffer, size, 0);
if (save_commit_buffer && !ret) {
- set_commit_buffer(item, buffer, size);
+ set_commit_buffer(the_repository, item, buffer, size);
return 0;
}
free(buffer);
return ret;
}
+int parse_commit_gently(struct commit *item, int quiet_on_missing)
+{
+ return parse_commit_internal(item, quiet_on_missing, 1);
+}
+
void parse_commit_or_die(struct commit *item)
{
if (parse_commit(item))
@@ -1692,7 +1718,7 @@ struct commit *get_merge_parent(const char *name)
struct object_id oid;
if (get_oid(name, &oid))
return NULL;
- obj = parse_object(&oid);
+ obj = parse_object(the_repository, &oid);
commit = (struct commit *)peel_to_type(name, 0, obj, OBJ_COMMIT);
if (commit && !merge_remote_util(commit))
set_merge_remote_desc(commit, name, obj);
diff --git a/commit.h b/commit.h
index 01b8b1d..da0db36 100644
--- a/commit.h
+++ b/commit.h
@@ -63,9 +63,11 @@ enum decoration_type {
void add_name_decoration(enum decoration_type type, const char *name, struct object *obj);
const struct name_decoration *get_name_decoration(const struct object *obj);
-struct commit *lookup_commit(const struct object_id *oid);
-struct commit *lookup_commit_reference(const struct object_id *oid);
-struct commit *lookup_commit_reference_gently(const struct object_id *oid,
+struct commit *lookup_commit(struct repository *r, const struct object_id *oid);
+struct commit *lookup_commit_reference(struct repository *r,
+ const struct object_id *oid);
+struct commit *lookup_commit_reference_gently(struct repository *r,
+ const struct object_id *oid,
int quiet);
struct commit *lookup_commit_reference_by_name(const char *name);
@@ -76,7 +78,8 @@ struct commit *lookup_commit_reference_by_name(const char *name);
*/
struct commit *lookup_commit_or_die(const struct object_id *oid, const char *ref_name);
-int parse_commit_buffer(struct commit *item, const void *buffer, unsigned long size, int check_graph);
+int parse_commit_buffer(struct repository *r, struct commit *item, const void *buffer, unsigned long size, int check_graph);
+int parse_commit_internal(struct commit *item, int quiet_on_missing, int use_commit_graph);
int parse_commit_gently(struct commit *item, int quiet_on_missing);
static inline int parse_commit(struct commit *item)
{
@@ -84,17 +87,21 @@ static inline int parse_commit(struct commit *item)
}
void parse_commit_or_die(struct commit *item);
+struct buffer_slab;
+struct buffer_slab *allocate_commit_buffer_slab(void);
+void free_commit_buffer_slab(struct buffer_slab *bs);
+
/*
* Associate an object buffer with the commit. The ownership of the
* memory is handed over to the commit, and must be free()-able.
*/
-void set_commit_buffer(struct commit *, void *buffer, unsigned long size);
+void set_commit_buffer(struct repository *r, struct commit *, void *buffer, unsigned long size);
/*
* Get any cached object buffer associated with the commit. Returns NULL
* if none. The resulting memory should not be freed.
*/
-const void *get_cached_commit_buffer(const struct commit *, unsigned long *size);
+const void *get_cached_commit_buffer(struct repository *, const struct commit *, unsigned long *size);
/*
* Get the commit's object contents, either from cache or by reading the object
diff --git a/config.c b/config.c
index 7968ef7..4446909 100644
--- a/config.c
+++ b/config.c
@@ -32,7 +32,7 @@ struct config_source {
enum config_origin_type origin_type;
const char *name;
const char *path;
- int die_on_error;
+ enum config_error_action default_error_action;
int linenr;
int eof;
struct strbuf value;
@@ -117,12 +117,12 @@ static long config_buf_ftell(struct config_source *conf)
}
#define MAX_INCLUDE_DEPTH 10
-static const char include_depth_advice[] =
+static const char include_depth_advice[] = N_(
"exceeded maximum include depth (%d) while including\n"
" %s\n"
"from\n"
" %s\n"
-"Do you have circular includes?";
+"Do you have circular includes?");
static int handle_path_include(const char *path, struct config_include_data *inc)
{
int ret = 0;
@@ -134,7 +134,7 @@ static int handle_path_include(const char *path, struct config_include_data *inc
expanded = expand_user_path(path, 0);
if (!expanded)
- return error("could not expand include path '%s'", path);
+ return error(_("could not expand include path '%s'"), path);
path = expanded;
/*
@@ -145,7 +145,7 @@ static int handle_path_include(const char *path, struct config_include_data *inc
char *slash;
if (!cf || !cf->path)
- return error("relative config includes must come from files");
+ return error(_("relative config includes must come from files"));
slash = find_last_dir_sep(cf->path);
if (slash)
@@ -156,7 +156,7 @@ static int handle_path_include(const char *path, struct config_include_data *inc
if (!access_or_die(path, R_OK, 0)) {
if (++inc->depth > MAX_INCLUDE_DEPTH)
- die(include_depth_advice, MAX_INCLUDE_DEPTH, path,
+ die(_(include_depth_advice), MAX_INCLUDE_DEPTH, path,
!cf ? "<unknown>" :
cf->name ? cf->name :
"the command line");
@@ -343,13 +343,13 @@ static int git_config_parse_key_1(const char *key, char **store_key, int *basele
if (last_dot == NULL || last_dot == key) {
if (!quiet)
- error("key does not contain a section: %s", key);
+ error(_("key does not contain a section: %s"), key);
return -CONFIG_NO_SECTION_OR_NAME;
}
if (!last_dot[1]) {
if (!quiet)
- error("key does not contain variable name: %s", key);
+ error(_("key does not contain variable name: %s"), key);
return -CONFIG_NO_SECTION_OR_NAME;
}
@@ -373,13 +373,13 @@ static int git_config_parse_key_1(const char *key, char **store_key, int *basele
if (!iskeychar(c) ||
(i == baselen + 1 && !isalpha(c))) {
if (!quiet)
- error("invalid key: %s", key);
+ error(_("invalid key: %s"), key);
goto out_free_ret_1;
}
c = tolower(c);
} else if (c == '\n') {
if (!quiet)
- error("invalid key (newline): %s", key);
+ error(_("invalid key (newline): %s"), key);
goto out_free_ret_1;
}
if (store_key)
@@ -415,7 +415,7 @@ int git_config_parse_parameter(const char *text,
pair = strbuf_split_str(text, '=', 2);
if (!pair[0])
- return error("bogus config parameter: %s", text);
+ return error(_("bogus config parameter: %s"), text);
if (pair[0]->len && pair[0]->buf[pair[0]->len - 1] == '=') {
strbuf_setlen(pair[0], pair[0]->len - 1);
@@ -427,7 +427,7 @@ int git_config_parse_parameter(const char *text,
strbuf_trim(pair[0]);
if (!pair[0]->len) {
strbuf_list_free(pair);
- return error("bogus config parameter: %s", text);
+ return error(_("bogus config parameter: %s"), text);
}
if (git_config_parse_key(pair[0]->buf, &canonical_name, NULL)) {
@@ -462,7 +462,7 @@ int git_config_from_parameters(config_fn_t fn, void *data)
envw = xstrdup(env);
if (sq_dequote_to_argv(envw, &argv, &nr, &alloc) < 0) {
- ret = error("bogus format in " CONFIG_DATA_ENVIRONMENT);
+ ret = error(_("bogus format in %s"), CONFIG_DATA_ENVIRONMENT);
goto out;
}
@@ -810,10 +810,21 @@ static int git_parse_source(config_fn_t fn, void *data,
cf->linenr, cf->name);
}
- if (cf->die_on_error)
+ switch (opts && opts->error_action ?
+ opts->error_action :
+ cf->default_error_action) {
+ case CONFIG_ERROR_DIE:
die("%s", error_msg);
- else
+ break;
+ case CONFIG_ERROR_ERROR:
error_return = error("%s", error_msg);
+ break;
+ case CONFIG_ERROR_SILENT:
+ error_return = -1;
+ break;
+ case CONFIG_ERROR_UNSET:
+ BUG("config error action unset");
+ }
free(error_msg);
return error_return;
@@ -922,7 +933,7 @@ int git_parse_ulong(const char *value, unsigned long *ret)
return 1;
}
-static int git_parse_ssize_t(const char *value, ssize_t *ret)
+int git_parse_ssize_t(const char *value, ssize_t *ret)
{
intmax_t tmp;
if (!git_parse_signed(value, &tmp, maximum_signed_value_of_type(ssize_t)))
@@ -1155,7 +1166,7 @@ static int git_default_core_config(const char *var, const char *value)
else {
int abbrev = git_config_int(var, value);
if (abbrev < minimum_abbrev || abbrev > 40)
- return error("abbrev length out of range: %d", abbrev);
+ return error(_("abbrev length out of range: %d"), abbrev);
default_abbrev = abbrev;
}
return 0;
@@ -1272,7 +1283,7 @@ static int git_default_core_config(const char *var, const char *value)
comment_line_char = value[0];
auto_comment_line_char = 0;
} else
- return error("core.commentChar should only be one character");
+ return error(_("core.commentChar should only be one character"));
return 0;
}
@@ -1309,11 +1320,6 @@ static int git_default_core_config(const char *var, const char *value)
return 0;
}
- if (!strcmp(var, "core.commitgraph")) {
- core_commit_graph = git_config_bool(var, value);
- return 0;
- }
-
if (!strcmp(var, "core.sparsecheckout")) {
core_apply_sparse_checkout = git_config_bool(var, value);
return 0;
@@ -1347,6 +1353,11 @@ static int git_default_core_config(const char *var, const char *value)
var, value);
}
+ if (!strcmp(var, "core.usereplacerefs")) {
+ read_replace_refs = git_config_bool(var, value);
+ return 0;
+ }
+
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
@@ -1385,7 +1396,7 @@ static int git_default_branch_config(const char *var, const char *value)
else if (!strcmp(value, "always"))
autorebase = AUTOREBASE_ALWAYS;
else
- return error("malformed value for %s", var);
+ return error(_("malformed value for %s"), var);
return 0;
}
@@ -1411,9 +1422,9 @@ static int git_default_push_config(const char *var, const char *value)
else if (!strcmp(value, "current"))
push_default = PUSH_DEFAULT_CURRENT;
else {
- error("malformed value for %s: %s", var, value);
- return error("Must be one of nothing, matching, simple, "
- "upstream or current.");
+ error(_("malformed value for %s: %s"), var, value);
+ return error(_("must be one of nothing, matching, simple, "
+ "upstream or current"));
}
return 0;
}
@@ -1521,7 +1532,7 @@ static int do_config_from_file(config_fn_t fn,
top.origin_type = origin_type;
top.name = name;
top.path = path;
- top.die_on_error = 1;
+ top.default_error_action = CONFIG_ERROR_DIE;
top.do_fgetc = config_file_fgetc;
top.do_ungetc = config_file_ungetc;
top.do_ftell = config_file_ftell;
@@ -1559,8 +1570,10 @@ int git_config_from_file(config_fn_t fn, const char *filename, void *data)
return git_config_from_file_with_options(fn, filename, data, NULL);
}
-int git_config_from_mem(config_fn_t fn, const enum config_origin_type origin_type,
- const char *name, const char *buf, size_t len, void *data)
+int git_config_from_mem(config_fn_t fn,
+ const enum config_origin_type origin_type,
+ const char *name, const char *buf, size_t len,
+ void *data, const struct config_options *opts)
{
struct config_source top;
@@ -1570,12 +1583,12 @@ int git_config_from_mem(config_fn_t fn, const enum config_origin_type origin_typ
top.origin_type = origin_type;
top.name = name;
top.path = NULL;
- top.die_on_error = 0;
+ top.default_error_action = CONFIG_ERROR_ERROR;
top.do_fgetc = config_buf_fgetc;
top.do_ungetc = config_buf_ungetc;
top.do_ftell = config_buf_ftell;
- return do_config_from(&top, fn, data, NULL);
+ return do_config_from(&top, fn, data, opts);
}
int git_config_from_blob_oid(config_fn_t fn,
@@ -1590,13 +1603,14 @@ int git_config_from_blob_oid(config_fn_t fn,
buf = read_object_file(oid, &type, &size);
if (!buf)
- return error("unable to load config blob object '%s'", name);
+ return error(_("unable to load config blob object '%s'"), name);
if (type != OBJ_BLOB) {
free(buf);
- return error("reference '%s' does not point to a blob", name);
+ return error(_("reference '%s' does not point to a blob"), name);
}
- ret = git_config_from_mem(fn, CONFIG_ORIGIN_BLOB, name, buf, size, data);
+ ret = git_config_from_mem(fn, CONFIG_ORIGIN_BLOB, name, buf, size,
+ data, NULL);
free(buf);
return ret;
@@ -1609,7 +1623,7 @@ static int git_config_from_blob_ref(config_fn_t fn,
struct object_id oid;
if (get_oid(name, &oid) < 0)
- return error("unable to resolve config blob '%s'", name);
+ return error(_("unable to resolve config blob '%s'"), name);
return git_config_from_blob_oid(fn, name, &oid, data);
}
@@ -1639,7 +1653,7 @@ unsigned long git_env_ulong(const char *k, unsigned long val)
{
const char *v = getenv(k);
if (v && !git_parse_ulong(v, &val))
- die("failed to parse %s", k);
+ die(_("failed to parse %s"), k);
return val;
}
@@ -2356,7 +2370,7 @@ static int store_aux_event(enum config_event_t type,
if (type == CONFIG_EVENT_SECTION) {
if (cf->var.len < 2 || cf->var.buf[cf->var.len - 1] != '.')
- return error("invalid section name '%s'", cf->var.buf);
+ return error(_("invalid section name '%s'"), cf->var.buf);
/* Is this the section we were looking for? */
store->is_keys_section =
@@ -2412,7 +2426,7 @@ static int store_aux(const char *key, const char *value, void *cb)
static int write_error(const char *filename)
{
- error("failed to write new configuration file %s", filename);
+ error(_("failed to write new configuration file %s"), filename);
/* Same error code as "failed to rename". */
return 4;
@@ -2663,7 +2677,7 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
*/
fd = hold_lock_file_for_update(&lock, config_filename, 0);
if (fd < 0) {
- error_errno("could not lock config file %s", config_filename);
+ error_errno(_("could not lock config file %s"), config_filename);
ret = CONFIG_NO_LOCK;
goto out_free;
}
@@ -2674,7 +2688,7 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
in_fd = open(config_filename, O_RDONLY);
if ( in_fd < 0 ) {
if ( ENOENT != errno ) {
- error_errno("opening %s", config_filename);
+ error_errno(_("opening %s"), config_filename);
ret = CONFIG_INVALID_FILE; /* same as "invalid config file" */
goto out_free;
}
@@ -2709,7 +2723,7 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
store.value_regex = (regex_t*)xmalloc(sizeof(regex_t));
if (regcomp(store.value_regex, value_regex,
REG_EXTENDED)) {
- error("invalid pattern: %s", value_regex);
+ error(_("invalid pattern: %s"), value_regex);
FREE_AND_NULL(store.value_regex);
ret = CONFIG_INVALID_PATTERN;
goto out_free;
@@ -2734,7 +2748,7 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
if (git_config_from_file_with_options(store_aux,
config_filename,
&store, &opts)) {
- error("invalid config file %s", config_filename);
+ error(_("invalid config file %s"), config_filename);
ret = CONFIG_INVALID_FILE;
goto out_free;
}
@@ -2758,7 +2772,7 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
if (contents == MAP_FAILED) {
if (errno == ENODEV && S_ISDIR(st.st_mode))
errno = EISDIR;
- error_errno("unable to mmap '%s'", config_filename);
+ error_errno(_("unable to mmap '%s'"), config_filename);
ret = CONFIG_INVALID_FILE;
contents = NULL;
goto out_free;
@@ -2767,7 +2781,7 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
in_fd = -1;
if (chmod(get_lock_file_path(&lock), st.st_mode & 07777) < 0) {
- error_errno("chmod on %s failed", get_lock_file_path(&lock));
+ error_errno(_("chmod on %s failed"), get_lock_file_path(&lock));
ret = CONFIG_NO_WRITE;
goto out_free;
}
@@ -2852,7 +2866,7 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
}
if (commit_lock_file(&lock) < 0) {
- error_errno("could not write config file %s", config_filename);
+ error_errno(_("could not write config file %s"), config_filename);
ret = CONFIG_NO_WRITE;
goto out_free;
}
@@ -2978,7 +2992,7 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
memset(&store, 0, sizeof(store));
if (new_name && !section_name_is_ok(new_name)) {
- ret = error("invalid section name: %s", new_name);
+ ret = error(_("invalid section name: %s"), new_name);
goto out_no_rollback;
}
@@ -2987,7 +3001,7 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
out_fd = hold_lock_file_for_update(&lock, config_filename, 0);
if (out_fd < 0) {
- ret = error("could not lock config file %s", config_filename);
+ ret = error(_("could not lock config file %s"), config_filename);
goto out;
}
@@ -3005,7 +3019,7 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
}
if (chmod(get_lock_file_path(&lock), st.st_mode & 07777) < 0) {
- ret = error_errno("chmod on %s failed",
+ ret = error_errno(_("chmod on %s failed"),
get_lock_file_path(&lock));
goto out;
}
@@ -3102,7 +3116,7 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
config_file = NULL;
commit_and_out:
if (commit_lock_file(&lock) < 0)
- ret = error_errno("could not write config file %s",
+ ret = error_errno(_("could not write config file %s"),
config_filename);
out:
if (config_file)
@@ -3145,7 +3159,7 @@ int git_config_copy_section(const char *old_name, const char *new_name)
#undef config_error_nonbool
int config_error_nonbool(const char *var)
{
- return error("missing value for '%s'", var);
+ return error(_("missing value for '%s'"), var);
}
int parse_config_key(const char *var,
diff --git a/config.h b/config.h
index b95bb76..2d5c18b 100644
--- a/config.h
+++ b/config.h
@@ -54,6 +54,12 @@ struct config_options {
const char *git_dir;
config_parser_event_fn_t event_fn;
void *event_fn_data;
+ enum config_error_action {
+ CONFIG_ERROR_UNSET = 0, /* use source-specific default */
+ CONFIG_ERROR_DIE, /* die() on error */
+ CONFIG_ERROR_ERROR, /* error() on error, return -1 */
+ CONFIG_ERROR_SILENT, /* return -1 */
+ } error_action;
};
typedef int (*config_fn_t)(const char *, const char *, void *);
@@ -62,8 +68,11 @@ extern int git_config_from_file(config_fn_t fn, const char *, void *);
extern int git_config_from_file_with_options(config_fn_t fn, const char *,
void *,
const struct config_options *);
-extern int git_config_from_mem(config_fn_t fn, const enum config_origin_type,
- const char *name, const char *buf, size_t len, void *data);
+extern int git_config_from_mem(config_fn_t fn,
+ const enum config_origin_type,
+ const char *name,
+ const char *buf, size_t len,
+ void *data, const struct config_options *opts);
extern int git_config_from_blob_oid(config_fn_t fn, const char *name,
const struct object_id *oid, void *data);
extern void git_config_push_parameter(const char *text);
@@ -73,6 +82,7 @@ extern void git_config(config_fn_t fn, void *);
extern int config_with_options(config_fn_t fn, void *,
struct git_config_source *config_source,
const struct config_options *opts);
+extern int git_parse_ssize_t(const char *, ssize_t *);
extern int git_parse_ulong(const char *, unsigned long *);
extern int git_parse_maybe_bool(const char *);
extern int git_config_int(const char *, const char *);
@@ -179,9 +189,14 @@ struct config_set {
extern void git_configset_init(struct config_set *cs);
extern int git_configset_add_file(struct config_set *cs, const char *filename);
-extern int git_configset_get_value(struct config_set *cs, const char *key, const char **value);
extern const struct string_list *git_configset_get_value_multi(struct config_set *cs, const char *key);
extern void git_configset_clear(struct config_set *cs);
+
+/*
+ * These functions return 1 if not found, and 0 if found, leaving the found
+ * value in the 'dest' pointer.
+ */
+extern int git_configset_get_value(struct config_set *cs, const char *key, const char **dest);
extern int git_configset_get_string_const(struct config_set *cs, const char *key, const char **dest);
extern int git_configset_get_string(struct config_set *cs, const char *key, char **dest);
extern int git_configset_get_int(struct config_set *cs, const char *key, int *dest);
diff --git a/config.mak.dev b/config.mak.dev
index 2d244ca..9a99814 100644
--- a/config.mak.dev
+++ b/config.mak.dev
@@ -1,6 +1,11 @@
ifeq ($(filter no-error,$(DEVOPTS)),)
CFLAGS += -Werror
endif
+ifneq ($(filter pedantic,$(DEVOPTS)),)
+CFLAGS += -pedantic
+# don't warn for each N_ use
+CFLAGS += -DUSE_PARENS_AROUND_GETTEXT_N=0
+endif
CFLAGS += -Wdeclaration-after-statement
CFLAGS += -Wno-format-zero-length
CFLAGS += -Wold-style-definition
diff --git a/config.mak.uname b/config.mak.uname
index 684fc5b..2be2f19 100644
--- a/config.mak.uname
+++ b/config.mak.uname
@@ -528,7 +528,7 @@ ifneq (,$(findstring MINGW,$(uname_S)))
COMPAT_OBJS += compat/mingw.o compat/winansi.o \
compat/win32/pthread.o compat/win32/syslog.o \
compat/win32/dirent.o
- BASIC_CFLAGS += -DPROTECT_NTFS_DEFAULT=1
+ BASIC_CFLAGS += -DWIN32 -DPROTECT_NTFS_DEFAULT=1
EXTLIBS += -lws2_32
GITLIBS += git.res
PTHREAD_LIBS =
diff --git a/connect.c b/connect.c
index 968e91b..94547e5 100644
--- a/connect.c
+++ b/connect.c
@@ -58,7 +58,7 @@ static NORETURN void die_initial_contact(int unexpected)
* response does not necessarily mean an ACL problem, though.
*/
if (unexpected)
- die(_("The remote end hung up upon initial contact"));
+ die(_("the remote end hung up upon initial contact"));
else
die(_("Could not read from remote repository.\n\n"
"Please make sure you have the correct access rights\n"
@@ -78,7 +78,7 @@ int server_supports_v2(const char *c, int die_on_error)
}
if (die_on_error)
- die("server doesn't support '%s'", c);
+ die(_("server doesn't support '%s'"), c);
return 0;
}
@@ -100,7 +100,7 @@ int server_supports_feature(const char *c, const char *feature,
}
if (die_on_error)
- die("server doesn't support feature '%s'", feature);
+ die(_("server doesn't support feature '%s'"), feature);
return 0;
}
@@ -111,7 +111,7 @@ static void process_capabilities_v2(struct packet_reader *reader)
argv_array_push(&server_capabilities_v2, reader->line);
if (reader->status != PACKET_READ_FLUSH)
- die("expected flush after capabilities");
+ die(_("expected flush after capabilities"));
}
enum protocol_version discover_version(struct packet_reader *reader)
@@ -230,7 +230,7 @@ static int process_dummy_ref(const char *line)
static void check_no_capabilities(const char *line, int len)
{
if (strlen(line) != len)
- warning("Ignoring capabilities after first line '%s'",
+ warning(_("ignoring capabilities after first line '%s'"),
line + strlen(line));
}
@@ -249,7 +249,7 @@ static int process_ref(const char *line, int len, struct ref ***list,
if (extra_have && !strcmp(name, ".have")) {
oid_array_append(extra_have, &old_oid);
} else if (!strcmp(name, "capabilities^{}")) {
- die("protocol error: unexpected capabilities^{}");
+ die(_("protocol error: unexpected capabilities^{}"));
} else if (check_ref(name, flags)) {
struct ref *ref = alloc_ref(name);
oidcpy(&ref->old_oid, &old_oid);
@@ -270,9 +270,9 @@ static int process_shallow(const char *line, int len,
return 0;
if (get_oid_hex(arg, &old_oid))
- die("protocol error: expected shallow sha-1, got '%s'", arg);
+ die(_("protocol error: expected shallow sha-1, got '%s'"), arg);
if (!shallow_points)
- die("repository on the other end cannot be shallow");
+ die(_("repository on the other end cannot be shallow"));
oid_array_append(shallow_points, &old_oid);
check_no_capabilities(line, len);
return 1;
@@ -307,13 +307,13 @@ struct ref **get_remote_heads(struct packet_reader *reader,
case PACKET_READ_NORMAL:
len = reader->pktlen;
if (len > 4 && skip_prefix(reader->line, "ERR ", &arg))
- die("remote error: %s", arg);
+ die(_("remote error: %s"), arg);
break;
case PACKET_READ_FLUSH:
state = EXPECTING_DONE;
break;
case PACKET_READ_DELIM:
- die("invalid packet");
+ die(_("invalid packet"));
}
switch (state) {
@@ -333,7 +333,7 @@ struct ref **get_remote_heads(struct packet_reader *reader,
case EXPECTING_SHALLOW:
if (process_shallow(reader->line, len, shallow_points))
break;
- die("protocol error: unexpected '%s'", reader->line);
+ die(_("protocol error: unexpected '%s'"), reader->line);
case EXPECTING_DONE:
break;
}
@@ -441,11 +441,11 @@ struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
/* Process response from server */
while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
if (!process_ref_v2(reader->line, &list))
- die("invalid ls-refs response: %s", reader->line);
+ die(_("invalid ls-refs response: %s"), reader->line);
}
if (reader->status != PACKET_READ_FLUSH)
- die("expected flush after ref listing");
+ die(_("expected flush after ref listing"));
return list;
}
@@ -544,7 +544,7 @@ static enum protocol get_protocol(const char *name)
return PROTO_SSH;
if (!strcmp(name, "file"))
return PROTO_FILE;
- die("I don't handle protocol '%s'", name);
+ die(_("protocol '%s' is not supported"), name);
}
static char *host_end(char **hoststart, int removebrackets)
@@ -595,8 +595,7 @@ static void enable_keepalive(int sockfd)
int ka = 1;
if (setsockopt(sockfd, SOL_SOCKET, SO_KEEPALIVE, &ka, sizeof(ka)) < 0)
- fprintf(stderr, "unable to set SO_KEEPALIVE on socket: %s\n",
- strerror(errno));
+ error_errno(_("unable to set SO_KEEPALIVE on socket"));
}
#ifndef NO_IPV6
@@ -636,14 +635,15 @@ static int git_tcp_connect_sock(char *host, int flags)
hints.ai_protocol = IPPROTO_TCP;
if (flags & CONNECT_VERBOSE)
- fprintf(stderr, "Looking up %s ... ", host);
+ fprintf(stderr, _("Looking up %s ... "), host);
gai = getaddrinfo(host, port, &hints, &ai);
if (gai)
- die("Unable to look up %s (port %s) (%s)", host, port, gai_strerror(gai));
+ die(_("unable to look up %s (port %s) (%s)"), host, port, gai_strerror(gai));
if (flags & CONNECT_VERBOSE)
- fprintf(stderr, "done.\nConnecting to %s (port %s) ... ", host, port);
+ /* TRANSLATORS: this is the end of "Looking up %s ... " */
+ fprintf(stderr, _("done.\nConnecting to %s (port %s) ... "), host, port);
for (ai0 = ai; ai; ai = ai->ai_next, cnt++) {
sockfd = socket(ai->ai_family,
@@ -665,12 +665,13 @@ static int git_tcp_connect_sock(char *host, int flags)
freeaddrinfo(ai0);
if (sockfd < 0)
- die("unable to connect to %s:\n%s", host, error_message.buf);
+ die(_("unable to connect to %s:\n%s"), host, error_message.buf);
enable_keepalive(sockfd);
if (flags & CONNECT_VERBOSE)
- fprintf(stderr, "done.\n");
+ /* TRANSLATORS: this is the end of "Connecting to %s (port %s) ... " */
+ fprintf_ln(stderr, _("done."));
strbuf_release(&error_message);
@@ -697,22 +698,23 @@ static int git_tcp_connect_sock(char *host, int flags)
get_host_and_port(&host, &port);
if (flags & CONNECT_VERBOSE)
- fprintf(stderr, "Looking up %s ... ", host);
+ fprintf(stderr, _("Looking up %s ... "), host);
he = gethostbyname(host);
if (!he)
- die("Unable to look up %s (%s)", host, hstrerror(h_errno));
+ die(_("unable to look up %s (%s)"), host, hstrerror(h_errno));
nport = strtoul(port, &ep, 10);
if ( ep == port || *ep ) {
/* Not numeric */
struct servent *se = getservbyname(port,"tcp");
if ( !se )
- die("Unknown port %s", port);
+ die(_("unknown port %s"), port);
nport = se->s_port;
}
if (flags & CONNECT_VERBOSE)
- fprintf(stderr, "done.\nConnecting to %s (port %s) ... ", host, port);
+ /* TRANSLATORS: this is the end of "Looking up %s ... " */
+ fprintf(stderr, _("done.\nConnecting to %s (port %s) ... "), host, port);
for (cnt = 0, ap = he->h_addr_list; *ap; ap++, cnt++) {
memset(&sa, 0, sizeof sa);
@@ -740,12 +742,13 @@ static int git_tcp_connect_sock(char *host, int flags)
}
if (sockfd < 0)
- die("unable to connect to %s:\n%s", host, error_message.buf);
+ die(_("unable to connect to %s:\n%s"), host, error_message.buf);
enable_keepalive(sockfd);
if (flags & CONNECT_VERBOSE)
- fprintf(stderr, "done.\n");
+ /* TRANSLATORS: this is the end of "Connecting to %s (port %s) ... " */
+ fprintf_ln(stderr, _("done."));
return sockfd;
}
@@ -842,9 +845,9 @@ static struct child_process *git_proxy_connect(int fd[2], char *host)
get_host_and_port(&host, &port);
if (looks_like_command_line_option(host))
- die("strange hostname '%s' blocked", host);
+ die(_("strange hostname '%s' blocked"), host);
if (looks_like_command_line_option(port))
- die("strange port '%s' blocked", port);
+ die(_("strange port '%s' blocked"), port);
proxy = xmalloc(sizeof(*proxy));
child_process_init(proxy);
@@ -854,7 +857,7 @@ static struct child_process *git_proxy_connect(int fd[2], char *host)
proxy->in = -1;
proxy->out = -1;
if (start_command(proxy))
- die("cannot start proxy %s", git_proxy_command);
+ die(_("cannot start proxy %s"), git_proxy_command);
fd[0] = proxy->out; /* read from proxy stdout */
fd[1] = proxy->in; /* write to proxy stdin */
return proxy;
@@ -921,7 +924,7 @@ static enum protocol parse_connect_url(const char *url_orig, char **ret_host,
path = strchr(end, separator);
if (!path || !*path)
- die("No path specified. See 'man git-pull' for valid url syntax");
+ die(_("no path specified; see 'git help pull' for valid url syntax"));
/*
* null-terminate hostname and point path to ~ for URL's like this:
@@ -1116,7 +1119,7 @@ static void push_ssh_options(struct argv_array *args, struct argv_array *env,
case VARIANT_AUTO:
BUG("VARIANT_AUTO passed to push_ssh_options");
case VARIANT_SIMPLE:
- die("ssh variant 'simple' does not support -4");
+ die(_("ssh variant 'simple' does not support -4"));
case VARIANT_SSH:
case VARIANT_PLINK:
case VARIANT_PUTTY:
@@ -1128,7 +1131,7 @@ static void push_ssh_options(struct argv_array *args, struct argv_array *env,
case VARIANT_AUTO:
BUG("VARIANT_AUTO passed to push_ssh_options");
case VARIANT_SIMPLE:
- die("ssh variant 'simple' does not support -6");
+ die(_("ssh variant 'simple' does not support -6"));
case VARIANT_SSH:
case VARIANT_PLINK:
case VARIANT_PUTTY:
@@ -1145,7 +1148,7 @@ static void push_ssh_options(struct argv_array *args, struct argv_array *env,
case VARIANT_AUTO:
BUG("VARIANT_AUTO passed to push_ssh_options");
case VARIANT_SIMPLE:
- die("ssh variant 'simple' does not support setting port");
+ die(_("ssh variant 'simple' does not support setting port"));
case VARIANT_SSH:
argv_array_push(args, "-p");
break;
@@ -1168,7 +1171,7 @@ static void fill_ssh_args(struct child_process *conn, const char *ssh_host,
enum ssh_variant variant;
if (looks_like_command_line_option(ssh_host))
- die("strange hostname '%s' blocked", ssh_host);
+ die(_("strange hostname '%s' blocked"), ssh_host);
ssh = get_ssh_command();
if (ssh) {
@@ -1256,7 +1259,7 @@ struct child_process *git_connect(int fd[2], const char *url,
child_process_init(conn);
if (looks_like_command_line_option(path))
- die("strange pathname '%s' blocked", path);
+ die(_("strange pathname '%s' blocked"), path);
strbuf_addstr(&cmd, prog);
strbuf_addch(&cmd, ' ');
@@ -1301,7 +1304,7 @@ struct child_process *git_connect(int fd[2], const char *url,
argv_array_push(&conn->args, cmd.buf);
if (start_command(conn))
- die("unable to fork");
+ die(_("unable to fork"));
fd[0] = conn->out; /* read from child's stdout */
fd[1] = conn->in; /* write to child's stdin */
diff --git a/contrib/coccinelle/commit.cocci b/contrib/coccinelle/commit.cocci
index a7e9215..aec3345 100644
--- a/contrib/coccinelle/commit.cocci
+++ b/contrib/coccinelle/commit.cocci
@@ -12,7 +12,7 @@ expression c;
// These excluded functions must access c->maybe_tree direcly.
@@
-identifier f !~ "^(get_commit_tree|get_commit_tree_in_graph|load_tree_for_commit)$";
+identifier f !~ "^(get_commit_tree|get_commit_tree_in_graph_one|load_tree_for_commit)$";
expression c;
@@
f(...) {...
diff --git a/contrib/mw-to-git/t/t9360-mw-to-git-clone.sh b/contrib/mw-to-git/t/t9360-mw-to-git-clone.sh
index 22f069d..cfbfe7d 100755
--- a/contrib/mw-to-git/t/t9360-mw-to-git-clone.sh
+++ b/contrib/mw-to-git/t/t9360-mw-to-git-clone.sh
@@ -247,7 +247,7 @@ test_expect_success 'Test of resistance to modification of category on wiki for
wiki_editpage Notconsidered "this page will not appear on local" false &&
wiki_editpage Othercategory "this page will not appear on local" false -c=Cattwo &&
wiki_editpage Tobeedited "this page have been modified" true -c=Catone &&
- wiki_delete_page Tobedeleted
+ wiki_delete_page Tobedeleted &&
git clone -c remote.origin.categories="Catone" \
mediawiki::'"$WIKI_URL"' mw_dir_14 &&
wiki_getallpage ref_page_14 Catone &&
diff --git a/contrib/subtree/t/t7900-subtree.sh b/contrib/subtree/t/t7900-subtree.sh
index d05c613..57ff4b2 100755
--- a/contrib/subtree/t/t7900-subtree.sh
+++ b/contrib/subtree/t/t7900-subtree.sh
@@ -540,26 +540,10 @@ test_expect_success 'make sure exactly the right set of files ends up in the sub
git fetch .. subproj-br &&
git merge FETCH_HEAD &&
- chks="sub1
-sub2
-sub3
-sub4" &&
- chks_sub=$(cat <<TXT | sed '\''s,^,sub dir/,'\''
-$chks
-TXT
-) &&
- chkms="main-sub1
-main-sub2
-main-sub3
-main-sub4" &&
- chkms_sub=$(cat <<TXT | sed '\''s,^,sub dir/,'\''
-$chkms
-TXT
-) &&
-
- subfiles=$(git ls-files) &&
- check_equal "$subfiles" "$chkms
-$chks"
+ test_write_lines main-sub1 main-sub2 main-sub3 main-sub4 \
+ sub1 sub2 sub3 sub4 >expect &&
+ git ls-files >actual &&
+ test_cmp expect actual
)
'
@@ -606,25 +590,11 @@ test_expect_success 'make sure the subproj *only* contains commits that affect t
git fetch .. subproj-br &&
git merge FETCH_HEAD &&
- chks="sub1
-sub2
-sub3
-sub4" &&
- chks_sub=$(cat <<TXT | sed '\''s,^,sub dir/,'\''
-$chks
-TXT
-) &&
- chkms="main-sub1
-main-sub2
-main-sub3
-main-sub4" &&
- chkms_sub=$(cat <<TXT | sed '\''s,^,sub dir/,'\''
-$chkms
-TXT
-) &&
- allchanges=$(git log --name-only --pretty=format:"" | sort | sed "/^$/d") &&
- check_equal "$allchanges" "$chkms
-$chks"
+ test_write_lines main-sub1 main-sub2 main-sub3 main-sub4 \
+ sub1 sub2 sub3 sub4 >expect &&
+ git log --name-only --pretty=format:"" >log &&
+ sort <log | sed "/^\$/ d" >actual &&
+ test_cmp expect actual
)
'
@@ -675,29 +645,16 @@ test_expect_success 'make sure exactly the right set of files ends up in the mai
cd "$subtree_test_count" &&
git subtree pull --prefix="sub dir" ./"sub proj" master &&
- chkm="main1
-main2" &&
- chks="sub1
-sub2
-sub3
-sub4" &&
- chks_sub=$(cat <<TXT | sed '\''s,^,sub dir/,'\''
-$chks
-TXT
-) &&
- chkms="main-sub1
-main-sub2
-main-sub3
-main-sub4" &&
- chkms_sub=$(cat <<TXT | sed '\''s,^,sub dir/,'\''
-$chkms
-TXT
-) &&
- mainfiles=$(git ls-files) &&
- check_equal "$mainfiles" "$chkm
-$chkms_sub
-$chks_sub"
-)
+ test_write_lines main1 main2 >chkm &&
+ test_write_lines main-sub1 main-sub2 main-sub3 main-sub4 >chkms &&
+ sed "s,^,sub dir/," chkms >chkms_sub &&
+ test_write_lines sub1 sub2 sub3 sub4 >chks &&
+ sed "s,^,sub dir/," chks >chks_sub &&
+
+ cat chkm chkms_sub chks_sub >expect &&
+ git ls-files >actual &&
+ test_cmp expect actual
+ )
'
next_test
@@ -708,7 +665,7 @@ test_expect_success 'make sure each filename changed exactly once in the entire
test_create_commit "$subtree_test_count/sub proj" sub1 &&
(
cd "$subtree_test_count" &&
- git config log.date relative
+ git config log.date relative &&
git fetch ./"sub proj" master &&
git subtree add --prefix="sub dir" FETCH_HEAD
) &&
@@ -748,37 +705,21 @@ test_expect_success 'make sure each filename changed exactly once in the entire
cd "$subtree_test_count" &&
git subtree pull --prefix="sub dir" ./"sub proj" master &&
- chkm="main1
-main2" &&
- chks="sub1
-sub2
-sub3
-sub4" &&
- chks_sub=$(cat <<TXT | sed '\''s,^,sub dir/,'\''
-$chks
-TXT
-) &&
- chkms="main-sub1
-main-sub2
-main-sub3
-main-sub4" &&
- chkms_sub=$(cat <<TXT | sed '\''s,^,sub dir/,'\''
-$chkms
-TXT
-) &&
+ test_write_lines main1 main2 >chkm &&
+ test_write_lines sub1 sub2 sub3 sub4 >chks &&
+ test_write_lines main-sub1 main-sub2 main-sub3 main-sub4 >chkms &&
+ sed "s,^,sub dir/," chkms >chkms_sub &&
# main-sub?? and /"sub dir"/main-sub?? both change, because those are the
# changes that were split into their own history. And "sub dir"/sub?? never
# change, since they were *only* changed in the subtree branch.
- allchanges=$(git log --name-only --pretty=format:"" | sort | sed "/^$/d") &&
- expected=''"$(cat <<TXT | sort
-$chkms
-$chkm
-$chks
-$chkms_sub
-TXT
-)"'' &&
- check_equal "$allchanges" "$expected"
+ git log --name-only --pretty=format:"" >log &&
+ sort <log >sorted-log &&
+ sed "/^$/ d" sorted-log >actual &&
+
+ cat chkms chkm chks chkms_sub >expect-unsorted &&
+ sort expect-unsorted >expect &&
+ test_cmp expect actual
)
'
diff --git a/contrib/vscode/.gitattributes b/contrib/vscode/.gitattributes
new file mode 100644
index 0000000..e89f223
--- /dev/null
+++ b/contrib/vscode/.gitattributes
@@ -0,0 +1 @@
+init.sh whitespace=-indent-with-non-tab
diff --git a/contrib/vscode/README.md b/contrib/vscode/README.md
new file mode 100644
index 0000000..8202d62
--- /dev/null
+++ b/contrib/vscode/README.md
@@ -0,0 +1,14 @@
+Configuration for VS Code
+=========================
+
+[VS Code](https://code.visualstudio.com/) is a lightweight but powerful source
+code editor which runs on your desktop and is available for
+[Windows](https://code.visualstudio.com/docs/setup/windows),
+[macOS](https://code.visualstudio.com/docs/setup/mac) and
+[Linux](https://code.visualstudio.com/docs/setup/linux). Among other languages,
+it has [support for C/C++ via an extension](https://github.com/Microsoft/vscode-cpptools).
+
+To start developing Git with VS Code, simply run the Unix shell script called
+`init.sh` in this directory, which creates the configuration files in
+`.vscode/` that VS Code consumes. `init.sh` needs access to `make` and `gcc`,
+so run the script in a Git SDK shell if you are using Windows.
diff --git a/contrib/vscode/init.sh b/contrib/vscode/init.sh
new file mode 100755
index 0000000..27de949
--- /dev/null
+++ b/contrib/vscode/init.sh
@@ -0,0 +1,375 @@
+#!/bin/sh
+
+die () {
+ echo "$*" >&2
+ exit 1
+}
+
+cd "$(dirname "$0")"/../.. ||
+die "Could not cd to top-level directory"
+
+mkdir -p .vscode ||
+die "Could not create .vscode/"
+
+# General settings
+
+cat >.vscode/settings.json.new <<\EOF ||
+{
+ "C_Cpp.intelliSenseEngine": "Default",
+ "C_Cpp.intelliSenseEngineFallback": "Disabled",
+ "[git-commit]": {
+ "editor.wordWrap": "wordWrapColumn",
+ "editor.wordWrapColumn": 72
+ },
+ "[c]": {
+ "editor.detectIndentation": false,
+ "editor.insertSpaces": false,
+ "editor.tabSize": 8,
+ "editor.wordWrap": "wordWrapColumn",
+ "editor.wordWrapColumn": 80,
+ "files.trimTrailingWhitespace": true
+ },
+ "files.associations": {
+ "*.h": "c",
+ "*.c": "c"
+ },
+ "cSpell.ignorePaths": [
+ ],
+ "cSpell.words": [
+ "DATAW",
+ "DBCACHED",
+ "DFCHECK",
+ "DTYPE",
+ "Hamano",
+ "HCAST",
+ "HEXSZ",
+ "HKEY",
+ "HKLM",
+ "IFGITLINK",
+ "IFINVALID",
+ "ISBROKEN",
+ "ISGITLINK",
+ "ISSYMREF",
+ "Junio",
+ "LPDWORD",
+ "LPPROC",
+ "LPWSTR",
+ "MSVCRT",
+ "NOARG",
+ "NOCOMPLETE",
+ "NOINHERIT",
+ "RENORMALIZE",
+ "STARTF",
+ "STARTUPINFOEXW",
+ "Schindelin",
+ "UCRT",
+ "YESNO",
+ "argcp",
+ "beginthreadex",
+ "committish",
+ "contentp",
+ "cpath",
+ "cpidx",
+ "ctim",
+ "dequote",
+ "envw",
+ "ewah",
+ "fdata",
+ "fherr",
+ "fhin",
+ "fhout",
+ "fragp",
+ "fsmonitor",
+ "hnsec",
+ "idents",
+ "includeif",
+ "interpr",
+ "iprog",
+ "isexe",
+ "iskeychar",
+ "kompare",
+ "mksnpath",
+ "mktag",
+ "mktree",
+ "mmblob",
+ "mmbuffer",
+ "mmfile",
+ "noenv",
+ "nparents",
+ "ntpath",
+ "ondisk",
+ "ooid",
+ "oplen",
+ "osdl",
+ "pnew",
+ "pold",
+ "ppinfo",
+ "pushf",
+ "pushv",
+ "rawsz",
+ "rebasing",
+ "reencode",
+ "repo",
+ "rerere",
+ "scld",
+ "sharedrepo",
+ "spawnv",
+ "spawnve",
+ "spawnvpe",
+ "strdup'ing",
+ "submodule",
+ "submodules",
+ "topath",
+ "topo",
+ "tpatch",
+ "unexecutable",
+ "unhide",
+ "unkc",
+ "unkv",
+ "unmark",
+ "unmatch",
+ "unsets",
+ "unshown",
+ "untracked",
+ "untrackedcache",
+ "unuse",
+ "upos",
+ "uval",
+ "vreportf",
+ "wargs",
+ "wargv",
+ "wbuffer",
+ "wcmd",
+ "wcsnicmp",
+ "wcstoutfdup",
+ "wdeltaenv",
+ "wdir",
+ "wenv",
+ "wenvblk",
+ "wenvcmp",
+ "wenviron",
+ "wenvpos",
+ "wenvsz",
+ "wfile",
+ "wfilename",
+ "wfopen",
+ "wfreopen",
+ "wfullpath",
+ "which'll",
+ "wlink",
+ "wmain",
+ "wmkdir",
+ "wmktemp",
+ "wnewpath",
+ "wotype",
+ "wpath",
+ "wpathname",
+ "wpgmptr",
+ "wpnew",
+ "wpointer",
+ "wpold",
+ "wpos",
+ "wputenv",
+ "wrmdir",
+ "wship",
+ "wtarget",
+ "wtemplate",
+ "wunlink",
+ "xcalloc",
+ "xgetcwd",
+ "xmallocz",
+ "xmemdupz",
+ "xmmap",
+ "xopts",
+ "xrealloc",
+ "xsnprintf",
+ "xutftowcs",
+ "xutftowcsn",
+ "xwcstoutf"
+ ],
+ "cSpell.ignoreRegExpList": [
+ "\\\"(DIRC|FSMN|REUC|UNTR)\\\"",
+ "\\\\u[0-9a-fA-Fx]{4}\\b",
+ "\\b(filfre|frotz|xyzzy)\\b",
+ "\\bCMIT_FMT_DEFAULT\\b",
+ "\\bde-munge\\b",
+ "\\bGET_OID_DISAMBIGUATORS\\b",
+ "\\bHASH_RENORMALIZE\\b",
+ "\\bTREESAMEness\\b",
+ "\\bUSE_STDEV\\b",
+ "\\Wchar *\\*\\W*utfs\\W",
+ "cURL's",
+ "nedmalloc'ed",
+ "ntifs\\.h",
+ ],
+}
+EOF
+die "Could not write settings.json"
+
+# Infer some setup-specific locations/names
+
+GCCPATH="$(which gcc)"
+GDBPATH="$(which gdb)"
+MAKECOMMAND="make -j5 DEVELOPER=1"
+OSNAME=
+X=
+case "$(uname -s)" in
+MINGW*)
+ GCCPATH="$(cygpath -am "$GCCPATH")"
+ GDBPATH="$(cygpath -am "$GDBPATH")"
+ MAKE_BASH="$(cygpath -am /git-cmd.exe) --command=usr\\\\bin\\\\bash.exe"
+ MAKECOMMAND="$MAKE_BASH -lc \\\"$MAKECOMMAND\\\""
+ OSNAME=Win32
+ X=.exe
+ ;;
+Linux)
+ OSNAME=Linux
+ ;;
+Darwin)
+ OSNAME=macOS
+ ;;
+esac
+
+# Default build task
+
+cat >.vscode/tasks.json.new <<EOF ||
+{
+ // See https://go.microsoft.com/fwlink/?LinkId=733558
+ // for the documentation about the tasks.json format
+ "version": "2.0.0",
+ "tasks": [
+ {
+ "label": "make",
+ "type": "shell",
+ "command": "$MAKECOMMAND",
+ "group": {
+ "kind": "build",
+ "isDefault": true
+ }
+ }
+ ]
+}
+EOF
+die "Could not install default build task"
+
+# Debugger settings
+
+cat >.vscode/launch.json.new <<EOF ||
+{
+ // Use IntelliSense to learn about possible attributes.
+ // Hover to view descriptions of existing attributes.
+ // For more information, visit:
+ // https://go.microsoft.com/fwlink/?linkid=830387
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "name": "(gdb) Launch",
+ "type": "cppdbg",
+ "request": "launch",
+ "program": "\${workspaceFolder}/git$X",
+ "args": [],
+ "stopAtEntry": false,
+ "cwd": "\${workspaceFolder}",
+ "environment": [],
+ "externalConsole": true,
+ "MIMode": "gdb",
+ "miDebuggerPath": "$GDBPATH",
+ "setupCommands": [
+ {
+ "description": "Enable pretty-printing for gdb",
+ "text": "-enable-pretty-printing",
+ "ignoreFailures": true
+ }
+ ]
+ }
+ ]
+}
+EOF
+die "Could not write launch configuration"
+
+# C/C++ extension settings
+
+make -f - OSNAME=$OSNAME GCCPATH="$GCCPATH" vscode-init \
+ >.vscode/c_cpp_properties.json <<\EOF ||
+include Makefile
+
+vscode-init:
+ @mkdir -p .vscode && \
+ incs= && defs= && \
+ for e in $(ALL_CFLAGS) \
+ '-DGIT_EXEC_PATH="$(gitexecdir_SQ)"' \
+ '-DGIT_LOCALE_PATH="$(localedir_relative_SQ)"' \
+ '-DBINDIR="$(bindir_relative_SQ)"' \
+ '-DFALLBACK_RUNTIME_PREFIX="$(prefix_SQ)"' \
+ '-DDEFAULT_GIT_TEMPLATE_DIR="$(template_dir_SQ)"' \
+ '-DETC_GITCONFIG="$(ETC_GITCONFIG_SQ)"' \
+ '-DETC_GITATTRIBUTES="$(ETC_GITATTRIBUTES_SQ)"' \
+ '-DGIT_LOCALE_PATH="$(localedir_relative_SQ)"' \
+ '-DCURL_DISABLE_TYPECHECK', \
+ '-DGIT_HTML_PATH="$(htmldir_relative_SQ)"' \
+ '-DGIT_MAN_PATH="$(mandir_relative_SQ)"' \
+ '-DGIT_INFO_PATH="$(infodir_relative_SQ)"'; do \
+ case "$$e" in \
+ -I.) \
+ incs="$$(printf '% 16s"$${workspaceRoot}",\n%s' \
+ "" "$$incs")" \
+ ;; \
+ -I/*) \
+ incs="$$(printf '% 16s"%s",\n%s' \
+ "" "$${e#-I}" "$$incs")" \
+ ;; \
+ -I*) \
+ incs="$$(printf '% 16s"$${workspaceRoot}/%s",\n%s' \
+ "" "$${e#-I}" "$$incs")" \
+ ;; \
+ -D*) \
+ defs="$$(printf '% 16s"%s",\n%s' \
+ "" "$$(echo "$${e#-D}" | sed 's/"/\\&/g')" \
+ "$$defs")" \
+ ;; \
+ esac; \
+ done && \
+ echo '{' && \
+ echo ' "configurations": [' && \
+ echo ' {' && \
+ echo ' "name": "$(OSNAME)",' && \
+ echo ' "intelliSenseMode": "clang-x64",' && \
+ echo ' "includePath": [' && \
+ echo "$$incs" | sort | sed '$$s/,$$//' && \
+ echo ' ],' && \
+ echo ' "defines": [' && \
+ echo "$$defs" | sort | sed '$$s/,$$//' && \
+ echo ' ],' && \
+ echo ' "browse": {' && \
+ echo ' "limitSymbolsToIncludedHeaders": true,' && \
+ echo ' "databaseFilename": "",' && \
+ echo ' "path": [' && \
+ echo ' "$${workspaceRoot}"' && \
+ echo ' ]' && \
+ echo ' },' && \
+ echo ' "cStandard": "c11",' && \
+ echo ' "cppStandard": "c++17",' && \
+ echo ' "compilerPath": "$(GCCPATH)"' && \
+ echo ' }' && \
+ echo ' ],' && \
+ echo ' "version": 4' && \
+ echo '}'
+EOF
+die "Could not write settings for the C/C++ extension"
+
+for file in .vscode/settings.json .vscode/tasks.json .vscode/launch.json
+do
+ if test -f $file
+ then
+ if git diff --no-index --quiet --exit-code $file $file.new
+ then
+ rm $file.new
+ else
+ printf "The file $file.new has these changes:\n\n"
+ git --no-pager diff --no-index $file $file.new
+ printf "\n\nMaybe \`mv $file.new $file\`?\n\n"
+ fi
+ else
+ mv $file.new $file
+ fi
+done
diff --git a/convert.c b/convert.c
index 7907efd..ce7ea0d 100644
--- a/convert.c
+++ b/convert.c
@@ -191,7 +191,7 @@ static enum eol output_eol(enum crlf_action crlf_action)
/* fall through */
return text_eol_is_crlf() ? EOL_CRLF : EOL_LF;
}
- warning("Illegal crlf_action %d\n", (int)crlf_action);
+ warning(_("illegal crlf_action %d"), (int)crlf_action);
return core_eol;
}
@@ -204,11 +204,11 @@ static void check_global_conv_flags_eol(const char *path, enum crlf_action crlf_
* CRLFs would not be restored by checkout
*/
if (conv_flags & CONV_EOL_RNDTRP_DIE)
- die(_("CRLF would be replaced by LF in %s."), path);
+ die(_("CRLF would be replaced by LF in %s"), path);
else if (conv_flags & CONV_EOL_RNDTRP_WARN)
warning(_("CRLF will be replaced by LF in %s.\n"
"The file will have its original line"
- " endings in your working directory."), path);
+ " endings in your working directory"), path);
} else if (old_stats->lonelf && !new_stats->lonelf ) {
/*
* CRLFs would be added by checkout
@@ -218,7 +218,7 @@ static void check_global_conv_flags_eol(const char *path, enum crlf_action crlf_
else if (conv_flags & CONV_EOL_RNDTRP_WARN)
warning(_("LF will be replaced by CRLF in %s.\n"
"The file will have its original line"
- " endings in your working directory."), path);
+ " endings in your working directory"), path);
}
}
@@ -390,7 +390,7 @@ static int encode_to_git(const char *path, const char *src, size_t src_len,
struct strbuf *buf, const char *enc, int conv_flags)
{
char *dst;
- int dst_len;
+ size_t dst_len;
int die_on_error = conv_flags & CONV_WRITE_OBJECT;
/*
@@ -453,7 +453,7 @@ static int encode_to_git(const char *path, const char *src, size_t src_len,
*/
if (die_on_error && check_roundtrip(enc)) {
char *re_src;
- int re_src_len;
+ size_t re_src_len;
re_src = reencode_string_len(dst, dst_len,
enc, default_encoding,
@@ -481,7 +481,7 @@ static int encode_to_worktree(const char *path, const char *src, size_t src_len,
struct strbuf *buf, const char *enc)
{
char *dst;
- int dst_len;
+ size_t dst_len;
/*
* No encoding is specified or there is nothing to encode.
@@ -493,8 +493,8 @@ static int encode_to_worktree(const char *path, const char *src, size_t src_len,
dst = reencode_string_len(src, src_len, enc, default_encoding,
&dst_len);
if (!dst) {
- error("failed to encode '%s' from %s to %s",
- path, default_encoding, enc);
+ error(_("failed to encode '%s' from %s to %s"),
+ path, default_encoding, enc);
return 0;
}
@@ -671,7 +671,8 @@ static int filter_buffer_or_fd(int in, int out, void *data)
if (start_command(&child_process)) {
strbuf_release(&cmd);
- return error("cannot fork to run external filter '%s'", params->cmd);
+ return error(_("cannot fork to run external filter '%s'"),
+ params->cmd);
}
sigchain_push(SIGPIPE, SIG_IGN);
@@ -690,13 +691,14 @@ static int filter_buffer_or_fd(int in, int out, void *data)
if (close(child_process.in))
write_err = 1;
if (write_err)
- error("cannot feed the input to external filter '%s'", params->cmd);
+ error(_("cannot feed the input to external filter '%s'"),
+ params->cmd);
sigchain_pop(SIGPIPE);
status = finish_command(&child_process);
if (status)
- error("external filter '%s' failed %d", params->cmd, status);
+ error(_("external filter '%s' failed %d"), params->cmd, status);
strbuf_release(&cmd);
return (write_err || status);
@@ -731,13 +733,13 @@ static int apply_single_file_filter(const char *path, const char *src, size_t le
return 0; /* error was already reported */
if (strbuf_read(&nbuf, async.out, len) < 0) {
- err = error("read from external filter '%s' failed", cmd);
+ err = error(_("read from external filter '%s' failed"), cmd);
}
if (close(async.out)) {
- err = error("read from external filter '%s' failed", cmd);
+ err = error(_("read from external filter '%s' failed"), cmd);
}
if (finish_async(&async)) {
- err = error("external filter '%s' failed", cmd);
+ err = error(_("external filter '%s' failed"), cmd);
}
if (!err) {
@@ -791,7 +793,7 @@ static void handle_filter_error(const struct strbuf *filter_status,
* Something went wrong with the protocol filter.
* Force shutdown and restart if another blob requires filtering.
*/
- error("external filter '%s' failed", entry->subprocess.cmd);
+ error(_("external filter '%s' failed"), entry->subprocess.cmd);
subprocess_stop(&subprocess_map, &entry->subprocess);
free(entry);
}
@@ -839,7 +841,7 @@ static int apply_multi_file_filter(const char *path, const char *src, size_t len
else if (wanted_capability & CAP_SMUDGE)
filter_type = "smudge";
else
- die("unexpected filter type");
+ die(_("unexpected filter type"));
sigchain_push(SIGPIPE, SIG_IGN);
@@ -850,7 +852,7 @@ static int apply_multi_file_filter(const char *path, const char *src, size_t len
err = strlen(path) > LARGE_PACKET_DATA_MAX - strlen("pathname=\n");
if (err) {
- error("path name too long for external filter");
+ error(_("path name too long for external filter"));
goto done;
}
@@ -924,8 +926,8 @@ int async_query_available_blobs(const char *cmd, struct string_list *available_p
assert(subprocess_map_initialized);
entry = (struct cmd2process *)subprocess_find_entry(&subprocess_map, cmd);
if (!entry) {
- error("external filter '%s' is not available anymore although "
- "not all paths have been filtered", cmd);
+ error(_("external filter '%s' is not available anymore although "
+ "not all paths have been filtered"), cmd);
return 0;
}
process = &entry->subprocess.process;
@@ -1396,7 +1398,7 @@ int convert_to_git(const struct index_state *istate,
ret |= apply_filter(path, src, len, -1, dst, ca.drv, CAP_CLEAN, NULL);
if (!ret && ca.drv && ca.drv->required)
- die("%s: clean filter '%s' failed", path, ca.drv->name);
+ die(_("%s: clean filter '%s' failed"), path, ca.drv->name);
if (ret && dst) {
src = dst->buf;
@@ -1430,7 +1432,7 @@ void convert_to_git_filter_fd(const struct index_state *istate,
assert(ca.drv->clean || ca.drv->process);
if (!apply_filter(path, NULL, 0, fd, dst, ca.drv, CAP_CLEAN, NULL))
- die("%s: clean filter '%s' failed", path, ca.drv->name);
+ die(_("%s: clean filter '%s' failed"), path, ca.drv->name);
encode_to_git(path, dst->buf, dst->len, dst, ca.working_tree_encoding, conv_flags);
crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, conv_flags);
@@ -1473,7 +1475,7 @@ static int convert_to_working_tree_internal(const char *path, const char *src,
ret_filter = apply_filter(
path, src, len, -1, dst, ca.drv, CAP_SMUDGE, dco);
if (!ret_filter && ca.drv && ca.drv->required)
- die("%s: smudge filter %s failed", path, ca.drv->name);
+ die(_("%s: smudge filter %s failed"), path, ca.drv->name);
return ret | ret_filter;
}
diff --git a/convert.h b/convert.h
index 01385d9..0a0fa15 100644
--- a/convert.h
+++ b/convert.h
@@ -57,35 +57,36 @@ struct delayed_checkout {
extern enum eol core_eol;
extern char *check_roundtrip_encoding;
-extern const char *get_cached_convert_stats_ascii(const struct index_state *istate,
- const char *path);
-extern const char *get_wt_convert_stats_ascii(const char *path);
-extern const char *get_convert_attr_ascii(const char *path);
+const char *get_cached_convert_stats_ascii(const struct index_state *istate,
+ const char *path);
+const char *get_wt_convert_stats_ascii(const char *path);
+const char *get_convert_attr_ascii(const char *path);
/* returns 1 if *dst was used */
-extern int convert_to_git(const struct index_state *istate,
- const char *path, const char *src, size_t len,
- struct strbuf *dst, int conv_flags);
-extern int convert_to_working_tree(const char *path, const char *src,
- size_t len, struct strbuf *dst);
-extern int async_convert_to_working_tree(const char *path, const char *src,
- size_t len, struct strbuf *dst,
- void *dco);
-extern int async_query_available_blobs(const char *cmd, struct string_list *available_paths);
-extern int renormalize_buffer(const struct index_state *istate,
- const char *path, const char *src, size_t len,
- struct strbuf *dst);
+int convert_to_git(const struct index_state *istate,
+ const char *path, const char *src, size_t len,
+ struct strbuf *dst, int conv_flags);
+int convert_to_working_tree(const char *path, const char *src,
+ size_t len, struct strbuf *dst);
+int async_convert_to_working_tree(const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ void *dco);
+int async_query_available_blobs(const char *cmd,
+ struct string_list *available_paths);
+int renormalize_buffer(const struct index_state *istate,
+ const char *path, const char *src, size_t len,
+ struct strbuf *dst);
static inline int would_convert_to_git(const struct index_state *istate,
const char *path)
{
return convert_to_git(istate, path, NULL, 0, NULL, 0);
}
/* Precondition: would_convert_to_git_filter_fd(path) == true */
-extern void convert_to_git_filter_fd(const struct index_state *istate,
- const char *path, int fd,
- struct strbuf *dst,
- int conv_flags);
-extern int would_convert_to_git_filter_fd(const char *path);
+void convert_to_git_filter_fd(const struct index_state *istate,
+ const char *path, int fd,
+ struct strbuf *dst,
+ int conv_flags);
+int would_convert_to_git_filter_fd(const char *path);
/*****************************************************************
*
@@ -95,9 +96,10 @@ extern int would_convert_to_git_filter_fd(const char *path);
struct stream_filter; /* opaque */
-extern struct stream_filter *get_stream_filter(const char *path, const struct object_id *);
-extern void free_stream_filter(struct stream_filter *);
-extern int is_null_stream_filter(struct stream_filter *);
+struct stream_filter *get_stream_filter(const char *path,
+ const struct object_id *);
+void free_stream_filter(struct stream_filter *);
+int is_null_stream_filter(struct stream_filter *);
/*
* Use as much input up to *isize_p and fill output up to *osize_p;
@@ -111,8 +113,8 @@ extern int is_null_stream_filter(struct stream_filter *);
* such filters know there is no more input coming and it is time for
* them to produce the remaining output based on the buffered input.
*/
-extern int stream_filter(struct stream_filter *,
- const char *input, size_t *isize_p,
- char *output, size_t *osize_p);
+int stream_filter(struct stream_filter *,
+ const char *input, size_t *isize_p,
+ char *output, size_t *osize_p);
#endif /* CONVERT_H */
diff --git a/diff-lib.c b/diff-lib.c
index a9f38eb..732f684 100644
--- a/diff-lib.c
+++ b/diff-lib.c
@@ -520,6 +520,9 @@ int run_diff_index(struct rev_info *revs, int cached)
struct object_array_entry *ent;
uint64_t start = getnanotime();
+ if (revs->pending.nr != 1)
+ BUG("run_diff_index must be passed exactly one tree");
+
ent = revs->pending.objects;
if (diff_cache(revs, &ent->item->oid, ent->name, cached))
exit(128);
diff --git a/diff.c b/diff.c
index dc53a19..f830afa 100644
--- a/diff.c
+++ b/diff.c
@@ -37,6 +37,7 @@ static int diff_rename_limit_default = 400;
static int diff_suppress_blank_empty;
static int diff_use_color_default = -1;
static int diff_color_moved_default;
+static int diff_color_moved_ws_default;
static int diff_context_default = 3;
static int diff_interhunk_context_default;
static const char *diff_word_regex_cfg;
@@ -264,14 +265,54 @@ static int parse_color_moved(const char *arg)
return COLOR_MOVED_NO;
else if (!strcmp(arg, "plain"))
return COLOR_MOVED_PLAIN;
+ else if (!strcmp(arg, "blocks"))
+ return COLOR_MOVED_BLOCKS;
else if (!strcmp(arg, "zebra"))
return COLOR_MOVED_ZEBRA;
else if (!strcmp(arg, "default"))
return COLOR_MOVED_DEFAULT;
+ else if (!strcmp(arg, "dimmed-zebra"))
+ return COLOR_MOVED_ZEBRA_DIM;
else if (!strcmp(arg, "dimmed_zebra"))
return COLOR_MOVED_ZEBRA_DIM;
else
- return error(_("color moved setting must be one of 'no', 'default', 'zebra', 'dimmed_zebra', 'plain'"));
+ return error(_("color moved setting must be one of 'no', 'default', 'blocks', 'zebra', 'dimmed-zebra', 'plain'"));
+}
+
+static int parse_color_moved_ws(const char *arg)
+{
+ int ret = 0;
+ struct string_list l = STRING_LIST_INIT_DUP;
+ struct string_list_item *i;
+
+ string_list_split(&l, arg, ',', -1);
+
+ for_each_string_list_item(i, &l) {
+ struct strbuf sb = STRBUF_INIT;
+ strbuf_addstr(&sb, i->string);
+ strbuf_trim(&sb);
+
+ if (!strcmp(sb.buf, "ignore-space-change"))
+ ret |= XDF_IGNORE_WHITESPACE_CHANGE;
+ else if (!strcmp(sb.buf, "ignore-space-at-eol"))
+ ret |= XDF_IGNORE_WHITESPACE_AT_EOL;
+ else if (!strcmp(sb.buf, "ignore-all-space"))
+ ret |= XDF_IGNORE_WHITESPACE;
+ else if (!strcmp(sb.buf, "allow-indentation-change"))
+ ret |= COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE;
+ else
+ error(_("ignoring unknown color-moved-ws mode '%s'"), sb.buf);
+
+ strbuf_release(&sb);
+ }
+
+ if ((ret & COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE) &&
+ (ret & XDF_WHITESPACE_FLAGS))
+ die(_("color-moved-ws: allow-indentation-change cannot be combined with other white space modes"));
+
+ string_list_clear(&l, 0);
+
+ return ret;
}
int git_diff_ui_config(const char *var, const char *value, void *cb)
@@ -287,6 +328,13 @@ int git_diff_ui_config(const char *var, const char *value, void *cb)
diff_color_moved_default = cm;
return 0;
}
+ if (!strcmp(var, "diff.colormovedws")) {
+ int cm = parse_color_moved_ws(value);
+ if (cm < 0)
+ return -1;
+ diff_color_moved_ws_default = cm;
+ return 0;
+ }
if (!strcmp(var, "diff.context")) {
diff_context_default = git_config_int(var, value);
if (diff_context_default < 0)
@@ -698,16 +746,116 @@ struct moved_entry {
struct hashmap_entry ent;
const struct emitted_diff_symbol *es;
struct moved_entry *next_line;
+ struct ws_delta *wsd;
};
-static int moved_entry_cmp(const struct diff_options *diffopt,
- const struct moved_entry *a,
- const struct moved_entry *b,
+/**
+ * The struct ws_delta holds white space differences between moved lines, i.e.
+ * between '+' and '-' lines that have been detected to be a move.
+ * The string contains the difference in leading white spaces, before the
+ * rest of the line is compared using the white space config for move
+ * coloring. The current_longer indicates if the first string in the
+ * comparision is longer than the second.
+ */
+struct ws_delta {
+ char *string;
+ unsigned int current_longer : 1;
+};
+#define WS_DELTA_INIT { NULL, 0 }
+
+static int compute_ws_delta(const struct emitted_diff_symbol *a,
+ const struct emitted_diff_symbol *b,
+ struct ws_delta *out)
+{
+ const struct emitted_diff_symbol *longer = a->len > b->len ? a : b;
+ const struct emitted_diff_symbol *shorter = a->len > b->len ? b : a;
+ int d = longer->len - shorter->len;
+
+ out->string = xmemdupz(longer->line, d);
+ out->current_longer = (a == longer);
+
+ return !strncmp(longer->line + d, shorter->line, shorter->len);
+}
+
+static int cmp_in_block_with_wsd(const struct diff_options *o,
+ const struct moved_entry *cur,
+ const struct moved_entry *match,
+ struct moved_entry *pmb,
+ int n)
+{
+ struct emitted_diff_symbol *l = &o->emitted_symbols->buf[n];
+ int al = cur->es->len, cl = l->len;
+ const char *a = cur->es->line,
+ *b = match->es->line,
+ *c = l->line;
+
+ int wslen;
+
+ /*
+ * We need to check if 'cur' is equal to 'match'.
+ * As those are from the same (+/-) side, we do not need to adjust for
+ * indent changes. However these were found using fuzzy matching
+ * so we do have to check if they are equal.
+ */
+ if (strcmp(a, b))
+ return 1;
+
+ if (!pmb->wsd)
+ /*
+ * No white space delta was carried forward? This can happen
+ * when we exit early in this function and do not carry
+ * forward ws.
+ */
+ return 1;
+
+ /*
+ * The indent changes of the block are known and carried forward in
+ * pmb->wsd; however we need to check if the indent changes of the
+ * current line are still the same as before.
+ *
+ * To do so we need to compare 'l' to 'cur', adjusting the
+ * one of them for the white spaces, depending which was longer.
+ */
+
+ wslen = strlen(pmb->wsd->string);
+ if (pmb->wsd->current_longer) {
+ c += wslen;
+ cl -= wslen;
+ } else {
+ a += wslen;
+ al -= wslen;
+ }
+
+ if (strcmp(a, c))
+ return 1;
+
+ return 0;
+}
+
+static int moved_entry_cmp(const void *hashmap_cmp_fn_data,
+ const void *entry,
+ const void *entry_or_key,
const void *keydata)
{
+ const struct diff_options *diffopt = hashmap_cmp_fn_data;
+ const struct moved_entry *a = entry;
+ const struct moved_entry *b = entry_or_key;
+ unsigned flags = diffopt->color_moved_ws_handling
+ & XDF_WHITESPACE_FLAGS;
+
+ if (diffopt->color_moved_ws_handling &
+ COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE)
+ /*
+ * As there is not specific white space config given,
+ * we'd need to check for a new block, so ignore all
+ * white space. The setup of the white space
+ * configuration for the next block is done else where
+ */
+ flags |= XDF_IGNORE_WHITESPACE;
+
return !xdiff_compare_lines(a->es->line, a->es->len,
b->es->line, b->es->len,
- diffopt->xdl_opts);
+ flags);
}
static struct moved_entry *prepare_entry(struct diff_options *o,
@@ -715,10 +863,12 @@ static struct moved_entry *prepare_entry(struct diff_options *o,
{
struct moved_entry *ret = xmalloc(sizeof(*ret));
struct emitted_diff_symbol *l = &o->emitted_symbols->buf[line_no];
+ unsigned flags = o->color_moved_ws_handling & XDF_WHITESPACE_FLAGS;
- ret->ent.hash = xdiff_hash_string(l->line, l->len, o->xdl_opts);
+ ret->ent.hash = xdiff_hash_string(l->line, l->len, flags);
ret->es = l;
ret->next_line = NULL;
+ ret->wsd = NULL;
return ret;
}
@@ -755,6 +905,56 @@ static void add_lines_to_move_detection(struct diff_options *o,
}
}
+static void pmb_advance_or_null(struct diff_options *o,
+ struct moved_entry *match,
+ struct hashmap *hm,
+ struct moved_entry **pmb,
+ int pmb_nr)
+{
+ int i;
+ for (i = 0; i < pmb_nr; i++) {
+ struct moved_entry *prev = pmb[i];
+ struct moved_entry *cur = (prev && prev->next_line) ?
+ prev->next_line : NULL;
+ if (cur && !hm->cmpfn(o, cur, match, NULL)) {
+ pmb[i] = cur;
+ } else {
+ pmb[i] = NULL;
+ }
+ }
+}
+
+static void pmb_advance_or_null_multi_match(struct diff_options *o,
+ struct moved_entry *match,
+ struct hashmap *hm,
+ struct moved_entry **pmb,
+ int pmb_nr, int n)
+{
+ int i;
+ char *got_match = xcalloc(1, pmb_nr);
+
+ for (; match; match = hashmap_get_next(hm, match)) {
+ for (i = 0; i < pmb_nr; i++) {
+ struct moved_entry *prev = pmb[i];
+ struct moved_entry *cur = (prev && prev->next_line) ?
+ prev->next_line : NULL;
+ if (!cur)
+ continue;
+ if (!cmp_in_block_with_wsd(o, cur, match, pmb[i], n))
+ got_match[i] |= 1;
+ }
+ }
+
+ for (i = 0; i < pmb_nr; i++) {
+ if (got_match[i]) {
+ /* Carry the white space delta forward */
+ pmb[i]->next_line->wsd = pmb[i]->wsd;
+ pmb[i] = pmb[i]->next_line;
+ } else
+ pmb[i] = NULL;
+ }
+}
+
static int shrink_potential_moved_blocks(struct moved_entry **pmb,
int pmb_nr)
{
@@ -772,6 +972,10 @@ static int shrink_potential_moved_blocks(struct moved_entry **pmb,
if (lp < pmb_nr && rp > -1 && lp < rp) {
pmb[lp] = pmb[rp];
+ if (pmb[rp]->wsd) {
+ free(pmb[rp]->wsd->string);
+ FREE_AND_NULL(pmb[rp]->wsd);
+ }
pmb[rp] = NULL;
rp--;
lp++;
@@ -829,19 +1033,18 @@ static void mark_color_as_moved(struct diff_options *o,
struct moved_entry *key;
struct moved_entry *match = NULL;
struct emitted_diff_symbol *l = &o->emitted_symbols->buf[n];
- int i;
switch (l->s) {
case DIFF_SYMBOL_PLUS:
hm = del_lines;
key = prepare_entry(o, n);
- match = hashmap_get(hm, key, o);
+ match = hashmap_get(hm, key, NULL);
free(key);
break;
case DIFF_SYMBOL_MINUS:
hm = add_lines;
key = prepare_entry(o, n);
- match = hashmap_get(hm, key, o);
+ match = hashmap_get(hm, key, NULL);
free(key);
break;
default:
@@ -860,17 +1063,11 @@ static void mark_color_as_moved(struct diff_options *o,
if (o->color_moved == COLOR_MOVED_PLAIN)
continue;
- /* Check any potential block runs, advance each or nullify */
- for (i = 0; i < pmb_nr; i++) {
- struct moved_entry *p = pmb[i];
- struct moved_entry *pnext = (p && p->next_line) ?
- p->next_line : NULL;
- if (pnext && !hm->cmpfn(o, pnext, match, NULL)) {
- pmb[i] = p->next_line;
- } else {
- pmb[i] = NULL;
- }
- }
+ if (o->color_moved_ws_handling &
+ COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE)
+ pmb_advance_or_null_multi_match(o, match, hm, pmb, pmb_nr, n);
+ else
+ pmb_advance_or_null(o, match, hm, pmb, pmb_nr);
pmb_nr = shrink_potential_moved_blocks(pmb, pmb_nr);
@@ -881,7 +1078,17 @@ static void mark_color_as_moved(struct diff_options *o,
*/
for (; match; match = hashmap_get_next(hm, match)) {
ALLOC_GROW(pmb, pmb_nr + 1, pmb_alloc);
- pmb[pmb_nr++] = match;
+ if (o->color_moved_ws_handling &
+ COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE) {
+ struct ws_delta *wsd = xmalloc(sizeof(*match->wsd));
+ if (compute_ws_delta(l, match->es, wsd)) {
+ match->wsd = wsd;
+ pmb[pmb_nr++] = match;
+ } else
+ free(wsd);
+ } else {
+ pmb[pmb_nr++] = match;
+ }
}
flipped_block = (flipped_block + 1) % 2;
@@ -892,7 +1099,7 @@ static void mark_color_as_moved(struct diff_options *o,
block_length++;
- if (flipped_block)
+ if (flipped_block && o->color_moved != COLOR_MOVED_BLOCKS)
l->flags |= DIFF_SYMBOL_MOVED_LINE_ALT;
}
adjust_last_block(o, n, block_length);
@@ -1864,8 +2071,8 @@ static void init_diff_words_data(struct emit_callback *ecbdata,
if (regcomp(ecbdata->diff_words->word_regex,
o->word_regex,
REG_EXTENDED | REG_NEWLINE))
- die ("Invalid regular expression: %s",
- o->word_regex);
+ die("invalid regular expression: %s",
+ o->word_regex);
}
for (i = 0; i < ARRAY_SIZE(diff_words_styles); i++) {
if (o->word_diff == diff_words_styles[i].type) {
@@ -3833,7 +4040,7 @@ static const char *diff_abbrev_oid(const struct object_id *oid, int abbrev)
char *hex = oid_to_hex(oid);
if (abbrev < 0)
abbrev = FALLBACK_DEFAULT_ABBREV;
- if (abbrev > GIT_SHA1_HEXSZ)
+ if (abbrev > the_hash_algo->hexsz)
BUG("oid abbreviation out of range: %d", abbrev);
if (abbrev)
hex[abbrev] = '\0';
@@ -4125,6 +4332,7 @@ void diff_setup(struct diff_options *options)
}
options->color_moved = diff_color_moved_default;
+ options->color_moved_ws_handling = diff_color_moved_ws_default;
}
void diff_setup_done(struct diff_options *options)
@@ -4704,6 +4912,8 @@ int diff_opt_parse(struct diff_options *options,
if (cm < 0)
die("bad --color-moved argument: %s", arg);
options->color_moved = cm;
+ } else if (skip_prefix(arg, "--color-moved-ws=", &arg)) {
+ options->color_moved_ws_handling = parse_color_moved_ws(arg);
} else if (skip_to_optional_arg_default(arg, "--color-words", &options->word_regex, NULL)) {
options->use_color = 1;
options->word_diff = DIFF_WORDS_COLOR;
@@ -4948,7 +5158,7 @@ const char *diff_aligned_abbrev(const struct object_id *oid, int len)
const char *abbrev;
/* Do we want all 40 hex characters? */
- if (len == GIT_SHA1_HEXSZ)
+ if (len == the_hash_algo->hexsz)
return oid_to_hex(oid);
/* An abbreviated value is fine, possibly followed by an ellipsis. */
@@ -4978,7 +5188,7 @@ const char *diff_aligned_abbrev(const struct object_id *oid, int len)
* the automatic sizing is supposed to give abblen that ensures
* uniqueness across all objects (statistically speaking).
*/
- if (abblen < GIT_SHA1_HEXSZ - 3) {
+ if (abblen < the_hash_algo->hexsz - 3) {
static char hex[GIT_MAX_HEXSZ + 1];
if (len < abblen && abblen <= len + 2)
xsnprintf(hex, sizeof(hex), "%s%.*s", abbrev, len+3-abblen, "..");
@@ -5534,10 +5744,12 @@ static void diff_flush_patch_all_file_pairs(struct diff_options *o)
if (o->color_moved) {
struct hashmap add_lines, del_lines;
- hashmap_init(&del_lines,
- (hashmap_cmp_fn)moved_entry_cmp, o, 0);
- hashmap_init(&add_lines,
- (hashmap_cmp_fn)moved_entry_cmp, o, 0);
+ if (o->color_moved_ws_handling &
+ COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE)
+ o->color_moved_ws_handling |= XDF_IGNORE_WHITESPACE;
+
+ hashmap_init(&del_lines, moved_entry_cmp, o, 0);
+ hashmap_init(&add_lines, moved_entry_cmp, o, 0);
add_lines_to_move_detection(o, &add_lines, &del_lines);
mark_color_as_moved(o, &add_lines, &del_lines);
diff --git a/diff.h b/diff.h
index dedac47..20c697d 100644
--- a/diff.h
+++ b/diff.h
@@ -208,11 +208,16 @@ struct diff_options {
enum {
COLOR_MOVED_NO = 0,
COLOR_MOVED_PLAIN = 1,
- COLOR_MOVED_ZEBRA = 2,
- COLOR_MOVED_ZEBRA_DIM = 3,
+ COLOR_MOVED_BLOCKS = 2,
+ COLOR_MOVED_ZEBRA = 3,
+ COLOR_MOVED_ZEBRA_DIM = 4,
} color_moved;
#define COLOR_MOVED_DEFAULT COLOR_MOVED_ZEBRA
#define COLOR_MOVED_MIN_ALNUM_COUNT 20
+
+ /* XDF_WHITESPACE_FLAGS regarding block detection are set at 2, 3, 4 */
+ #define COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE (1<<5)
+ int color_moved_ws_handling;
};
void diff_emit_submodule_del(struct diff_options *o, const char *line);
@@ -253,15 +258,15 @@ const char *diff_line_prefix(struct diff_options *);
extern const char mime_boundary_leader[];
-extern struct combine_diff_path *diff_tree_paths(
+struct combine_diff_path *diff_tree_paths(
struct combine_diff_path *p, const struct object_id *oid,
const struct object_id **parents_oid, int nparent,
struct strbuf *base, struct diff_options *opt);
-extern int diff_tree_oid(const struct object_id *old_oid,
- const struct object_id *new_oid,
- const char *base, struct diff_options *opt);
-extern int diff_root_tree_oid(const struct object_id *new_oid, const char *base,
- struct diff_options *opt);
+int diff_tree_oid(const struct object_id *old_oid,
+ const struct object_id *new_oid,
+ const char *base, struct diff_options *opt);
+int diff_root_tree_oid(const struct object_id *new_oid, const char *base,
+ struct diff_options *opt);
struct combine_diff_path {
struct combine_diff_path *next;
@@ -278,33 +283,33 @@ struct combine_diff_path {
st_add4(sizeof(struct combine_diff_path), (l), 1, \
st_mult(sizeof(struct combine_diff_parent), (n)))
-extern void show_combined_diff(struct combine_diff_path *elem, int num_parent,
- int dense, struct rev_info *);
+void show_combined_diff(struct combine_diff_path *elem, int num_parent,
+ int dense, struct rev_info *);
-extern void diff_tree_combined(const struct object_id *oid, const struct oid_array *parents, int dense, struct rev_info *rev);
+void diff_tree_combined(const struct object_id *oid, const struct oid_array *parents, int dense, struct rev_info *rev);
-extern void diff_tree_combined_merge(const struct commit *commit, int dense, struct rev_info *rev);
+void diff_tree_combined_merge(const struct commit *commit, int dense, struct rev_info *rev);
void diff_set_mnemonic_prefix(struct diff_options *options, const char *a, const char *b);
-extern int diff_can_quit_early(struct diff_options *);
+int diff_can_quit_early(struct diff_options *);
-extern void diff_addremove(struct diff_options *,
- int addremove,
- unsigned mode,
- const struct object_id *oid,
- int oid_valid,
- const char *fullpath, unsigned dirty_submodule);
+void diff_addremove(struct diff_options *,
+ int addremove,
+ unsigned mode,
+ const struct object_id *oid,
+ int oid_valid,
+ const char *fullpath, unsigned dirty_submodule);
-extern void diff_change(struct diff_options *,
- unsigned mode1, unsigned mode2,
- const struct object_id *old_oid,
- const struct object_id *new_oid,
- int old_oid_valid, int new_oid_valid,
- const char *fullpath,
- unsigned dirty_submodule1, unsigned dirty_submodule2);
+void diff_change(struct diff_options *,
+ unsigned mode1, unsigned mode2,
+ const struct object_id *old_oid,
+ const struct object_id *new_oid,
+ int old_oid_valid, int new_oid_valid,
+ const char *fullpath,
+ unsigned dirty_submodule1, unsigned dirty_submodule2);
-extern struct diff_filepair *diff_unmerge(struct diff_options *, const char *path);
+struct diff_filepair *diff_unmerge(struct diff_options *, const char *path);
#define DIFF_SETUP_REVERSE 1
#define DIFF_SETUP_USE_CACHE 2
@@ -314,17 +319,17 @@ extern struct diff_filepair *diff_unmerge(struct diff_options *, const char *pat
* Poor man's alternative to parse-option, to allow both stuck form
* (--option=value) and separate form (--option value).
*/
-extern int parse_long_opt(const char *opt, const char **argv,
- const char **optarg);
-
-extern int git_diff_basic_config(const char *var, const char *value, void *cb);
-extern int git_diff_heuristic_config(const char *var, const char *value, void *cb);
-extern void init_diff_ui_defaults(void);
-extern int git_diff_ui_config(const char *var, const char *value, void *cb);
-extern void diff_setup(struct diff_options *);
-extern int diff_opt_parse(struct diff_options *, const char **, int, const char *);
-extern void diff_setup_done(struct diff_options *);
-extern int git_config_rename(const char *var, const char *value);
+int parse_long_opt(const char *opt, const char **argv,
+ const char **optarg);
+
+int git_diff_basic_config(const char *var, const char *value, void *cb);
+int git_diff_heuristic_config(const char *var, const char *value, void *cb);
+void init_diff_ui_defaults(void);
+int git_diff_ui_config(const char *var, const char *value, void *cb);
+void diff_setup(struct diff_options *);
+int diff_opt_parse(struct diff_options *, const char **, int, const char *);
+void diff_setup_done(struct diff_options *);
+int git_config_rename(const char *var, const char *value);
#define DIFF_DETECT_RENAME 1
#define DIFF_DETECT_COPY 2
@@ -342,8 +347,8 @@ extern int git_config_rename(const char *var, const char *value);
#define DIFF_PICKAXE_IGNORE_CASE 32
-extern void diffcore_std(struct diff_options *);
-extern void diffcore_fix_diff_index(struct diff_options *);
+void diffcore_std(struct diff_options *);
+void diffcore_fix_diff_index(struct diff_options *);
#define COMMON_DIFF_OPTIONS_HELP \
"\ncommon diff options:\n" \
@@ -373,9 +378,9 @@ extern void diffcore_fix_diff_index(struct diff_options *);
" show all files diff when -S is used and hit is found.\n" \
" -a --text treat all files as text.\n"
-extern int diff_queue_is_empty(void);
-extern void diff_flush(struct diff_options*);
-extern void diff_warn_rename_limit(const char *varname, int needed, int degraded_cc);
+int diff_queue_is_empty(void);
+void diff_flush(struct diff_options*);
+void diff_warn_rename_limit(const char *varname, int needed, int degraded_cc);
/* diff-raw status letters */
#define DIFF_STATUS_ADDED 'A'
@@ -397,24 +402,24 @@ extern void diff_warn_rename_limit(const char *varname, int needed, int degraded
* This is different from find_unique_abbrev() in that
* it stuffs the result with dots for alignment.
*/
-extern const char *diff_aligned_abbrev(const struct object_id *sha1, int);
+const char *diff_aligned_abbrev(const struct object_id *sha1, int);
/* do not report anything on removed paths */
#define DIFF_SILENT_ON_REMOVED 01
/* report racily-clean paths as modified */
#define DIFF_RACY_IS_MODIFIED 02
-extern int run_diff_files(struct rev_info *revs, unsigned int option);
-extern int run_diff_index(struct rev_info *revs, int cached);
+int run_diff_files(struct rev_info *revs, unsigned int option);
+int run_diff_index(struct rev_info *revs, int cached);
-extern int do_diff_cache(const struct object_id *, struct diff_options *);
-extern int diff_flush_patch_id(struct diff_options *, struct object_id *, int);
+int do_diff_cache(const struct object_id *, struct diff_options *);
+int diff_flush_patch_id(struct diff_options *, struct object_id *, int);
-extern int diff_result_code(struct diff_options *, int);
+int diff_result_code(struct diff_options *, int);
-extern void diff_no_index(struct rev_info *, int, const char **);
+void diff_no_index(struct rev_info *, int, const char **);
-extern int index_differs_from(const char *def, const struct diff_flags *flags,
- int ita_invisible_in_index);
+int index_differs_from(const char *def, const struct diff_flags *flags,
+ int ita_invisible_in_index);
/*
* Fill the contents of the filespec "df", respecting any textconv defined by
@@ -427,30 +432,30 @@ extern int index_differs_from(const char *def, const struct diff_flags *flags,
* struct. If it is non-NULL, then "outbuf" points to a newly allocated buffer
* that should be freed by the caller.
*/
-extern size_t fill_textconv(struct userdiff_driver *driver,
- struct diff_filespec *df,
- char **outbuf);
+size_t fill_textconv(struct userdiff_driver *driver,
+ struct diff_filespec *df,
+ char **outbuf);
/*
* Look up the userdiff driver for the given filespec, and return it if
* and only if it has textconv enabled (otherwise return NULL). The result
* can be passed to fill_textconv().
*/
-extern struct userdiff_driver *get_textconv(struct diff_filespec *one);
+struct userdiff_driver *get_textconv(struct diff_filespec *one);
/*
* Prepare diff_filespec and convert it using diff textconv API
* if the textconv driver exists.
* Return 1 if the conversion succeeds, 0 otherwise.
*/
-extern int textconv_object(const char *path, unsigned mode, const struct object_id *oid, int oid_valid, char **buf, unsigned long *buf_size);
+int textconv_object(const char *path, unsigned mode, const struct object_id *oid, int oid_valid, char **buf, unsigned long *buf_size);
-extern int parse_rename_score(const char **cp_p);
+int parse_rename_score(const char **cp_p);
-extern long parse_algorithm_value(const char *value);
+long parse_algorithm_value(const char *value);
-extern void print_stat_summary(FILE *fp, int files,
- int insertions, int deletions);
-extern void setup_diff_pager(struct diff_options *);
+void print_stat_summary(FILE *fp, int files,
+ int insertions, int deletions);
+void setup_diff_pager(struct diff_options *);
#endif /* DIFF_H */
diff --git a/diffcore.h b/diffcore.h
index a30da16..81281a3 100644
--- a/diffcore.h
+++ b/diffcore.h
@@ -50,17 +50,17 @@ struct diff_filespec {
struct userdiff_driver *driver;
};
-extern struct diff_filespec *alloc_filespec(const char *);
-extern void free_filespec(struct diff_filespec *);
-extern void fill_filespec(struct diff_filespec *, const struct object_id *,
- int, unsigned short);
+struct diff_filespec *alloc_filespec(const char *);
+void free_filespec(struct diff_filespec *);
+void fill_filespec(struct diff_filespec *, const struct object_id *,
+ int, unsigned short);
#define CHECK_SIZE_ONLY 1
#define CHECK_BINARY 2
-extern int diff_populate_filespec(struct diff_filespec *, unsigned int);
-extern void diff_free_filespec_data(struct diff_filespec *);
-extern void diff_free_filespec_blob(struct diff_filespec *);
-extern int diff_filespec_is_binary(struct diff_filespec *);
+int diff_populate_filespec(struct diff_filespec *, unsigned int);
+void diff_free_filespec_data(struct diff_filespec *);
+void diff_free_filespec_blob(struct diff_filespec *);
+int diff_filespec_is_binary(struct diff_filespec *);
struct diff_filepair {
struct diff_filespec *one;
@@ -86,9 +86,9 @@ struct diff_filepair {
#define DIFF_PAIR_MODE_CHANGED(p) ((p)->one->mode != (p)->two->mode)
-extern void diff_free_filepair(struct diff_filepair *);
+void diff_free_filepair(struct diff_filepair *);
-extern int diff_unmodified_pair(struct diff_filepair *);
+int diff_unmodified_pair(struct diff_filepair *);
struct diff_queue_struct {
struct diff_filepair **queue;
@@ -102,16 +102,16 @@ struct diff_queue_struct {
} while (0)
extern struct diff_queue_struct diff_queued_diff;
-extern struct diff_filepair *diff_queue(struct diff_queue_struct *,
- struct diff_filespec *,
- struct diff_filespec *);
-extern void diff_q(struct diff_queue_struct *, struct diff_filepair *);
+struct diff_filepair *diff_queue(struct diff_queue_struct *,
+ struct diff_filespec *,
+ struct diff_filespec *);
+void diff_q(struct diff_queue_struct *, struct diff_filepair *);
-extern void diffcore_break(int);
-extern void diffcore_rename(struct diff_options *);
-extern void diffcore_merge_broken(void);
-extern void diffcore_pickaxe(struct diff_options *);
-extern void diffcore_order(const char *orderfile);
+void diffcore_break(int);
+void diffcore_rename(struct diff_options *);
+void diffcore_merge_broken(void);
+void diffcore_pickaxe(struct diff_options *);
+void diffcore_order(const char *orderfile);
/* low-level interface to diffcore_order */
struct obj_order {
@@ -138,11 +138,11 @@ void diff_debug_queue(const char *, struct diff_queue_struct *);
#define diff_debug_queue(a,b) do { /* nothing */ } while (0)
#endif
-extern int diffcore_count_changes(struct diff_filespec *src,
- struct diff_filespec *dst,
- void **src_count_p,
- void **dst_count_p,
- unsigned long *src_copied,
- unsigned long *literal_added);
+int diffcore_count_changes(struct diff_filespec *src,
+ struct diff_filespec *dst,
+ void **src_count_p,
+ void **dst_count_p,
+ unsigned long *src_copied,
+ unsigned long *literal_added);
#endif
diff --git a/dir.c b/dir.c
index 21e6f25..32f5f72 100644
--- a/dir.c
+++ b/dir.c
@@ -561,7 +561,7 @@ int report_path_error(const char *ps_matched,
if (found_dup)
continue;
- error("pathspec '%s' did not match any file(s) known to git.",
+ error(_("pathspec '%s' did not match any file(s) known to git"),
pathspec->items[num].original);
errors++;
}
@@ -950,7 +950,7 @@ static void add_excludes_from_file_1(struct dir_struct *dir, const char *fname,
dir->unmanaged_exclude_files++;
el = add_exclude_list(dir, EXC_FILE, fname);
if (add_excludes(fname, "", 0, el, NULL, oid_stat) < 0)
- die("cannot use %s as an exclude file", fname);
+ die(_("cannot use %s as an exclude file"), fname);
}
void add_excludes_from_file(struct dir_struct *dir, const char *fname)
@@ -2231,7 +2231,7 @@ static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *d
return NULL;
if (!ident_in_untracked(dir->untracked)) {
- warning(_("Untracked cache is disabled on this system or location."));
+ warning(_("untracked cache is disabled on this system or location"));
return NULL;
}
@@ -3029,7 +3029,7 @@ static void connect_wt_gitdir_in_nested(const char *sub_worktree,
return;
if (repo_read_index(&subrepo) < 0)
- die("index file corrupt in repo %s", subrepo.gitdir);
+ die(_("index file corrupt in repo %s"), subrepo.gitdir);
for (i = 0; i < subrepo.index->cache_nr; i++) {
const struct cache_entry *ce = subrepo.index->cache[i];
diff --git a/environment.c b/environment.c
index 013e845..68523b6b 100644
--- a/environment.c
+++ b/environment.c
@@ -51,7 +51,7 @@ const char *editor_program;
const char *askpass_program;
const char *excludes_file;
enum auto_crlf auto_crlf = AUTO_CRLF_FALSE;
-int check_replace_refs = 1; /* NEEDSWORK: rename to read_replace_refs */
+int read_replace_refs = 1;
char *git_replace_ref_base;
enum eol core_eol = EOL_UNSET;
int global_conv_flags_eol = CONV_EOL_RNDTRP_WARN;
@@ -66,7 +66,6 @@ enum push_default_type push_default = PUSH_DEFAULT_UNSPECIFIED;
enum object_creation_mode object_creation_mode = OBJECT_CREATION_MODE;
char *notes_ref_name;
int grafts_replace_parents = 1;
-int core_commit_graph;
int core_apply_sparse_checkout;
int merge_log_config = -1;
int precomposed_unicode = -1; /* see probe_utf8_pathname_composition() */
@@ -147,7 +146,7 @@ static char *expand_namespace(const char *raw_namespace)
strbuf_addf(&buf, "refs/namespaces/%s", (*c)->buf);
strbuf_list_free(components);
if (check_refname_format(buf.buf, 0))
- die("bad git namespace path \"%s\"", raw_namespace);
+ die(_("bad git namespace path \"%s\""), raw_namespace);
strbuf_addch(&buf, '/');
return strbuf_detach(&buf, NULL);
}
@@ -183,7 +182,7 @@ void setup_git_env(const char *git_dir)
argv_array_clear(&to_free);
if (getenv(NO_REPLACE_OBJECTS_ENVIRONMENT))
- check_replace_refs = 0;
+ read_replace_refs = 0;
replace_ref_base = getenv(GIT_REPLACE_REF_BASE_ENVIRONMENT);
free(git_replace_ref_base);
git_replace_ref_base = xstrdup(replace_ref_base ? replace_ref_base
@@ -329,7 +328,7 @@ char *get_graft_file(struct repository *r)
static void set_git_dir_1(const char *path)
{
if (setenv(GIT_DIR_ENVIRONMENT, path, 1))
- die("could not set GIT_DIR to '%s'", path);
+ die(_("could not set GIT_DIR to '%s'"), path);
setup_git_env(path);
}
diff --git a/exec-cmd.c b/exec-cmd.c
index 02d31ee..4f81f44 100644
--- a/exec-cmd.c
+++ b/exec-cmd.c
@@ -358,7 +358,7 @@ int execl_git_cmd(const char *cmd, ...)
}
va_end(param);
if (MAX_ARGS <= argc)
- return error("too many args to run %s", cmd);
+ return error(_("too many args to run %s"), cmd);
argv[argc] = NULL;
return execv_git_cmd(argv);
diff --git a/fast-import.c b/fast-import.c
index 12195d5..89bb0c9 100644
--- a/fast-import.c
+++ b/fast-import.c
@@ -1724,8 +1724,10 @@ static int update_branch(struct branch *b)
if (!force_update && !is_null_oid(&old_oid)) {
struct commit *old_cmit, *new_cmit;
- old_cmit = lookup_commit_reference_gently(&old_oid, 0);
- new_cmit = lookup_commit_reference_gently(&b->oid, 0);
+ old_cmit = lookup_commit_reference_gently(the_repository,
+ &old_oid, 0);
+ new_cmit = lookup_commit_reference_gently(the_repository,
+ &b->oid, 0);
if (!old_cmit || !new_cmit)
return error("Branch %s is missing commits.", b->name);
diff --git a/fetch-negotiator.c b/fetch-negotiator.c
new file mode 100644
index 0000000..d6d685c
--- /dev/null
+++ b/fetch-negotiator.c
@@ -0,0 +1,20 @@
+#include "git-compat-util.h"
+#include "fetch-negotiator.h"
+#include "negotiator/default.h"
+#include "negotiator/skipping.h"
+
+void fetch_negotiator_init(struct fetch_negotiator *negotiator,
+ const char *algorithm)
+{
+ if (algorithm) {
+ if (!strcmp(algorithm, "skipping")) {
+ skipping_negotiator_init(negotiator);
+ return;
+ } else if (!strcmp(algorithm, "default")) {
+ /* Fall through to default initialization */
+ } else {
+ die("unknown fetch negotiation algorithm '%s'", algorithm);
+ }
+ }
+ default_negotiator_init(negotiator);
+}
diff --git a/fetch-negotiator.h b/fetch-negotiator.h
new file mode 100644
index 0000000..ddb44a2
--- /dev/null
+++ b/fetch-negotiator.h
@@ -0,0 +1,58 @@
+#ifndef FETCH_NEGOTIATOR
+#define FETCH_NEGOTIATOR
+
+struct commit;
+
+/*
+ * An object that supplies the information needed to negotiate the contents of
+ * the to-be-sent packfile during a fetch.
+ *
+ * To set up the negotiator, call fetch_negotiator_init(), then known_common()
+ * (0 or more times), then add_tip() (0 or more times).
+ *
+ * Then, when "have" lines are required, call next(). Call ack() to report what
+ * the server tells us.
+ *
+ * Once negotiation is done, call release(). The negotiator then cannot be used
+ * (unless reinitialized with fetch_negotiator_init()).
+ */
+struct fetch_negotiator {
+ /*
+ * Before negotiation starts, indicate that the server is known to have
+ * this commit.
+ */
+ void (*known_common)(struct fetch_negotiator *, struct commit *);
+
+ /*
+ * Once this function is invoked, known_common() cannot be invoked any
+ * more.
+ *
+ * Indicate that this commit and all its ancestors are to be checked
+ * for commonality with the server.
+ */
+ void (*add_tip)(struct fetch_negotiator *, struct commit *);
+
+ /*
+ * Once this function is invoked, known_common() and add_tip() cannot
+ * be invoked any more.
+ *
+ * Return the next commit that the client should send as a "have" line.
+ */
+ const struct object_id *(*next)(struct fetch_negotiator *);
+
+ /*
+ * Inform the negotiator that the server has the given commit. This
+ * method must only be called on commits returned by next().
+ */
+ int (*ack)(struct fetch_negotiator *, struct commit *);
+
+ void (*release)(struct fetch_negotiator *);
+
+ /* internal use */
+ void *data;
+};
+
+void fetch_negotiator_init(struct fetch_negotiator *negotiator,
+ const char *algorithm);
+
+#endif
diff --git a/fetch-object.c b/fetch-object.c
index 48fe63d..853624f 100644
--- a/fetch-object.c
+++ b/fetch-object.c
@@ -19,7 +19,7 @@ static void fetch_refs(const char *remote_name, struct ref *ref)
transport_set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
transport_set_option(transport, TRANS_OPT_NO_DEPENDENTS, "1");
- transport_fetch_refs(transport, ref, NULL);
+ transport_fetch_refs(transport, ref);
fetch_if_missing = original_fetch_if_missing;
}
diff --git a/fetch-pack.c b/fetch-pack.c
index 7ccb9c0..88a078e 100644
--- a/fetch-pack.c
+++ b/fetch-pack.c
@@ -15,12 +15,13 @@
#include "connect.h"
#include "transport.h"
#include "version.h"
-#include "prio-queue.h"
#include "sha1-array.h"
#include "oidset.h"
#include "packfile.h"
#include "object-store.h"
#include "connected.h"
+#include "fetch-negotiator.h"
+#include "fsck.h"
static int transfer_unpack_limit = -1;
static int fetch_unpack_limit = -1;
@@ -35,16 +36,12 @@ static int agent_supported;
static int server_supports_filtering;
static struct lock_file shallow_lock;
static const char *alternate_shallow_file;
+static char *negotiation_algorithm;
+static struct strbuf fsck_msg_types = STRBUF_INIT;
/* Remember to update object flag allocation in object.h */
#define COMPLETE (1U << 0)
-#define COMMON (1U << 1)
-#define COMMON_REF (1U << 2)
-#define SEEN (1U << 3)
-#define POPPED (1U << 4)
-#define ALTERNATE (1U << 5)
-
-static int marked;
+#define ALTERNATE (1U << 1)
/*
* After sending this many "have"s if we do not get any new ACK , we
@@ -52,8 +49,7 @@ static int marked;
*/
#define MAX_IN_VAIN 256
-static struct prio_queue rev_list = { compare_commits_by_commit_date };
-static int non_common_revs, multi_ack, use_sideband;
+static int multi_ack, use_sideband;
/* Allow specifying sha1 if it is a ref tip. */
#define ALLOW_TIP_SHA1 01
/* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
@@ -85,7 +81,7 @@ static void cache_one_alternate(const char *refname,
void *vcache)
{
struct alternate_object_cache *cache = vcache;
- struct object *obj = parse_object(oid);
+ struct object *obj = parse_object(the_repository, oid);
if (!obj || (obj->flags & ALTERNATE))
return;
@@ -95,7 +91,9 @@ static void cache_one_alternate(const char *refname,
cache->items[cache->nr++] = obj;
}
-static void for_each_cached_alternate(void (*cb)(struct object *))
+static void for_each_cached_alternate(struct fetch_negotiator *negotiator,
+ void (*cb)(struct fetch_negotiator *,
+ struct object *))
{
static int initialized;
static struct alternate_object_cache cache;
@@ -107,30 +105,19 @@ static void for_each_cached_alternate(void (*cb)(struct object *))
}
for (i = 0; i < cache.nr; i++)
- cb(cache.items[i]);
-}
-
-static void rev_list_push(struct commit *commit, int mark)
-{
- if (!(commit->object.flags & mark)) {
- commit->object.flags |= mark;
-
- if (parse_commit(commit))
- return;
-
- prio_queue_put(&rev_list, commit);
-
- if (!(commit->object.flags & COMMON))
- non_common_revs++;
- }
+ cb(negotiator, cache.items[i]);
}
-static int rev_list_insert_ref(const char *refname, const struct object_id *oid)
+static int rev_list_insert_ref(struct fetch_negotiator *negotiator,
+ const char *refname,
+ const struct object_id *oid)
{
- struct object *o = deref_tag(parse_object(oid), refname, 0);
+ struct object *o = deref_tag(the_repository,
+ parse_object(the_repository, oid),
+ refname, 0);
if (o && o->type == OBJ_COMMIT)
- rev_list_push((struct commit *)o, SEEN);
+ negotiator->add_tip(negotiator, (struct commit *)o);
return 0;
}
@@ -138,98 +125,7 @@ static int rev_list_insert_ref(const char *refname, const struct object_id *oid)
static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
int flag, void *cb_data)
{
- return rev_list_insert_ref(refname, oid);
-}
-
-static int clear_marks(const char *refname, const struct object_id *oid,
- int flag, void *cb_data)
-{
- struct object *o = deref_tag(parse_object(oid), refname, 0);
-
- if (o && o->type == OBJ_COMMIT)
- clear_commit_marks((struct commit *)o,
- COMMON | COMMON_REF | SEEN | POPPED);
- return 0;
-}
-
-/*
- This function marks a rev and its ancestors as common.
- In some cases, it is desirable to mark only the ancestors (for example
- when only the server does not yet know that they are common).
-*/
-
-static void mark_common(struct commit *commit,
- int ancestors_only, int dont_parse)
-{
- if (commit != NULL && !(commit->object.flags & COMMON)) {
- struct object *o = (struct object *)commit;
-
- if (!ancestors_only)
- o->flags |= COMMON;
-
- if (!(o->flags & SEEN))
- rev_list_push(commit, SEEN);
- else {
- struct commit_list *parents;
-
- if (!ancestors_only && !(o->flags & POPPED))
- non_common_revs--;
- if (!o->parsed && !dont_parse)
- if (parse_commit(commit))
- return;
-
- for (parents = commit->parents;
- parents;
- parents = parents->next)
- mark_common(parents->item, 0, dont_parse);
- }
- }
-}
-
-/*
- Get the next rev to send, ignoring the common.
-*/
-
-static const struct object_id *get_rev(void)
-{
- struct commit *commit = NULL;
-
- while (commit == NULL) {
- unsigned int mark;
- struct commit_list *parents;
-
- if (rev_list.nr == 0 || non_common_revs == 0)
- return NULL;
-
- commit = prio_queue_get(&rev_list);
- parse_commit(commit);
- parents = commit->parents;
-
- commit->object.flags |= POPPED;
- if (!(commit->object.flags & COMMON))
- non_common_revs--;
-
- if (commit->object.flags & COMMON) {
- /* do not send "have", and ignore ancestors */
- commit = NULL;
- mark = COMMON | SEEN;
- } else if (commit->object.flags & COMMON_REF)
- /* send "have", and ignore ancestors */
- mark = COMMON | SEEN;
- else
- /* send "have", also for its ancestors */
- mark = SEEN;
-
- while (parents) {
- if (!(parents->item->object.flags & SEEN))
- rev_list_push(parents->item, mark);
- if (mark & COMMON)
- mark_common(parents->item, 1, 0);
- parents = parents->next;
- }
- }
-
- return &commit->object.oid;
+ return rev_list_insert_ref(cb_data, refname, oid);
}
enum ack_type {
@@ -298,9 +194,10 @@ static void send_request(struct fetch_pack_args *args,
write_or_die(fd, buf->buf, buf->len);
}
-static void insert_one_alternate_object(struct object *obj)
+static void insert_one_alternate_object(struct fetch_negotiator *negotiator,
+ struct object *obj)
{
- rev_list_insert_ref(NULL, &obj->oid);
+ rev_list_insert_ref(negotiator, NULL, &obj->oid);
}
#define INITIAL_FLUSH 16
@@ -323,7 +220,24 @@ static int next_flush(int stateless_rpc, int count)
return count;
}
-static int find_common(struct fetch_pack_args *args,
+static void mark_tips(struct fetch_negotiator *negotiator,
+ const struct oid_array *negotiation_tips)
+{
+ int i;
+
+ if (!negotiation_tips) {
+ for_each_ref(rev_list_insert_ref_oid, negotiator);
+ return;
+ }
+
+ for (i = 0; i < negotiation_tips->nr; i++)
+ rev_list_insert_ref(negotiator, NULL,
+ &negotiation_tips->oid[i]);
+ return;
+}
+
+static int find_common(struct fetch_negotiator *negotiator,
+ struct fetch_pack_args *args,
int fd[2], struct object_id *result_oid,
struct ref *refs)
{
@@ -338,12 +252,9 @@ static int find_common(struct fetch_pack_args *args,
if (args->stateless_rpc && multi_ack == 1)
die(_("--stateless-rpc requires multi_ack_detailed"));
- if (marked)
- for_each_ref(clear_marks, NULL);
- marked = 1;
- for_each_ref(rev_list_insert_ref_oid, NULL);
- for_each_cached_alternate(insert_one_alternate_object);
+ mark_tips(negotiator, args->negotiation_tips);
+ for_each_cached_alternate(negotiator, insert_one_alternate_object);
fetching = 0;
for ( ; refs ; refs = refs->next) {
@@ -361,7 +272,7 @@ static int find_common(struct fetch_pack_args *args,
* interested in the case we *know* the object is
* reachable and we have already scanned it.
*/
- if (((o = lookup_object(remote->hash)) != NULL) &&
+ if (((o = lookup_object(the_repository, remote->hash)) != NULL) &&
(o->flags & COMPLETE)) {
continue;
}
@@ -435,10 +346,10 @@ static int find_common(struct fetch_pack_args *args,
if (skip_prefix(line, "unshallow ", &arg)) {
if (get_oid_hex(arg, &oid))
die(_("invalid unshallow line: %s"), line);
- if (!lookup_object(oid.hash))
+ if (!lookup_object(the_repository, oid.hash))
die(_("object not found: %s"), line);
/* make sure that it is parsed as shallow */
- if (!parse_object(&oid))
+ if (!parse_object(the_repository, &oid))
die(_("error in object: %s"), line);
if (unregister_shallow(&oid))
die(_("no shallow found: %s"), line);
@@ -461,7 +372,7 @@ static int find_common(struct fetch_pack_args *args,
retval = -1;
if (args->no_dependents)
goto done;
- while ((oid = get_rev())) {
+ while ((oid = negotiator->next(negotiator))) {
packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
print_verbose(args, "have %s", oid_to_hex(oid));
in_vain++;
@@ -497,12 +408,16 @@ static int find_common(struct fetch_pack_args *args,
case ACK_ready:
case ACK_continue: {
struct commit *commit =
- lookup_commit(result_oid);
+ lookup_commit(the_repository,
+ result_oid);
+ int was_common;
+
if (!commit)
die(_("invalid commit %s"), oid_to_hex(result_oid));
+ was_common = negotiator->ack(negotiator, commit);
if (args->stateless_rpc
&& ack == ACK_common
- && !(commit->object.flags & COMMON)) {
+ && !was_common) {
/* We need to replay the have for this object
* on the next RPC request so the peer knows
* it is in common with us.
@@ -519,13 +434,10 @@ static int find_common(struct fetch_pack_args *args,
} else if (!args->stateless_rpc
|| ack != ACK_common)
in_vain = 0;
- mark_common(commit, 0, 1);
retval = 0;
got_continue = 1;
- if (ack == ACK_ready) {
- clear_prio_queue(&rev_list);
+ if (ack == ACK_ready)
got_ready = 1;
- }
break;
}
}
@@ -535,6 +447,8 @@ static int find_common(struct fetch_pack_args *args,
print_verbose(args, _("giving up"));
break; /* give up */
}
+ if (got_ready)
+ break;
}
}
done:
@@ -571,14 +485,14 @@ static struct commit_list *complete;
static int mark_complete(const struct object_id *oid)
{
- struct object *o = parse_object(oid);
+ struct object *o = parse_object(the_repository, oid);
while (o && o->type == OBJ_TAG) {
struct tag *t = (struct tag *) o;
if (!t->tagged)
break; /* broken repository */
o->flags |= COMPLETE;
- o = parse_object(&t->tagged->oid);
+ o = parse_object(the_repository, &t->tagged->oid);
}
if (o && o->type == OBJ_COMMIT) {
struct commit *commit = (struct commit *)o;
@@ -709,7 +623,8 @@ static void filter_refs(struct fetch_pack_args *args,
*refs = newlist;
}
-static void mark_alternate_complete(struct object *obj)
+static void mark_alternate_complete(struct fetch_negotiator *unused,
+ struct object *obj)
{
mark_complete(&obj->oid);
}
@@ -736,12 +651,21 @@ static int add_loose_objects_to_set(const struct object_id *oid,
return 0;
}
-static int everything_local(struct fetch_pack_args *args,
- struct ref **refs,
- struct ref **sought, int nr_sought)
+/*
+ * Mark recent commits available locally and reachable from a local ref as
+ * COMPLETE. If args->no_dependents is false, also mark COMPLETE remote refs as
+ * COMMON_REF (otherwise, we are not planning to participate in negotiation, and
+ * thus do not need COMMON_REF marks).
+ *
+ * The cutoff time for recency is determined by this heuristic: it is the
+ * earliest commit time of the objects in refs that are commits and that we know
+ * the commit time of.
+ */
+static void mark_complete_and_common_ref(struct fetch_negotiator *negotiator,
+ struct fetch_pack_args *args,
+ struct ref **refs)
{
struct ref *ref;
- int retval;
int old_save_commit_buffer = save_commit_buffer;
timestamp_t cutoff = 0;
struct oidset loose_oid_set = OIDSET_INIT;
@@ -769,7 +693,7 @@ static int everything_local(struct fetch_pack_args *args,
if (!has_object_file_with_flags(&ref->old_oid, flags))
continue;
- o = parse_object(&ref->old_oid);
+ o = parse_object(the_repository, &ref->old_oid);
if (!o)
continue;
@@ -789,7 +713,7 @@ static int everything_local(struct fetch_pack_args *args,
if (!args->no_dependents) {
if (!args->deepen) {
for_each_ref(mark_complete_oid, NULL);
- for_each_cached_alternate(mark_alternate_complete);
+ for_each_cached_alternate(NULL, mark_alternate_complete);
commit_list_sort_by_date(&complete);
if (cutoff)
mark_recent_complete_commits(args, cutoff);
@@ -800,27 +724,37 @@ static int everything_local(struct fetch_pack_args *args,
* Don't mark them common yet; the server has to be told so first.
*/
for (ref = *refs; ref; ref = ref->next) {
- struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
+ struct object *o = deref_tag(the_repository,
+ lookup_object(the_repository,
+ ref->old_oid.hash),
NULL, 0);
if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
continue;
- if (!(o->flags & SEEN)) {
- rev_list_push((struct commit *)o, COMMON_REF | SEEN);
-
- mark_common((struct commit *)o, 1, 1);
- }
+ negotiator->known_common(negotiator,
+ (struct commit *)o);
}
}
- filter_refs(args, refs, sought, nr_sought);
+ save_commit_buffer = old_save_commit_buffer;
+}
+
+/*
+ * Returns 1 if every object pointed to by the given remote refs is available
+ * locally and reachable from a local ref, and 0 otherwise.
+ */
+static int everything_local(struct fetch_pack_args *args,
+ struct ref **refs)
+{
+ struct ref *ref;
+ int retval;
for (retval = 1, ref = *refs; ref ; ref = ref->next) {
const struct object_id *remote = &ref->old_oid;
struct object *o;
- o = lookup_object(remote->hash);
+ o = lookup_object(the_repository, remote->hash);
if (!o || !(o->flags & COMPLETE)) {
retval = 0;
print_verbose(args, "want %s (%s)", oid_to_hex(remote),
@@ -831,8 +765,6 @@ static int everything_local(struct fetch_pack_args *args,
ref->name);
}
- save_commit_buffer = old_save_commit_buffer;
-
return retval;
}
@@ -937,7 +869,8 @@ static int get_pack(struct fetch_pack_args *args,
*/
argv_array_push(&cmd.args, "--fsck-objects");
else
- argv_array_push(&cmd.args, "--strict");
+ argv_array_pushf(&cmd.args, "--strict%s",
+ fsck_msg_types.buf);
}
cmd.in = demux.out;
@@ -983,6 +916,8 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
struct object_id oid;
const char *agent_feature;
int agent_len;
+ struct fetch_negotiator negotiator;
+ fetch_negotiator_init(&negotiator, negotiation_algorithm);
sort_ref_list(&ref, ref_compare_name);
QSORT(sought, nr_sought, cmp_ref_by_name);
@@ -1055,11 +990,13 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
if (!server_supports("deepen-relative") && args->deepen_relative)
die(_("Server does not support --deepen"));
- if (everything_local(args, &ref, sought, nr_sought)) {
+ mark_complete_and_common_ref(&negotiator, args, &ref);
+ filter_refs(args, &ref, sought, nr_sought);
+ if (everything_local(args, &ref)) {
packet_flush(fd[1]);
goto all_done;
}
- if (find_common(args, fd, &oid, ref) < 0)
+ if (find_common(&negotiator, args, fd, &oid, ref) < 0)
if (!args->keep_pack)
/* When cloning, it is not unusual to have
* no common commit.
@@ -1079,6 +1016,7 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
die(_("git fetch-pack: fetch failed."));
all_done:
+ negotiator.release(&negotiator);
return ref;
}
@@ -1120,7 +1058,7 @@ static void add_wants(const struct ref *wants, struct strbuf *req_buf)
* interested in the case we *know* the object is
* reachable and we have already scanned it.
*/
- if (((o = lookup_object(remote->hash)) != NULL) &&
+ if (((o = lookup_object(the_repository, remote->hash)) != NULL) &&
(o->flags & COMPLETE)) {
continue;
}
@@ -1143,13 +1081,15 @@ static void add_common(struct strbuf *req_buf, struct oidset *common)
}
}
-static int add_haves(struct strbuf *req_buf, int *haves_to_send, int *in_vain)
+static int add_haves(struct fetch_negotiator *negotiator,
+ struct strbuf *req_buf,
+ int *haves_to_send, int *in_vain)
{
int ret = 0;
int haves_added = 0;
const struct object_id *oid;
- while ((oid = get_rev())) {
+ while ((oid = negotiator->next(negotiator))) {
packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
if (++haves_added >= *haves_to_send)
break;
@@ -1168,7 +1108,8 @@ static int add_haves(struct strbuf *req_buf, int *haves_to_send, int *in_vain)
return ret;
}
-static int send_fetch_request(int fd_out, const struct fetch_pack_args *args,
+static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out,
+ const struct fetch_pack_args *args,
const struct ref *wants, struct oidset *common,
int *haves_to_send, int *in_vain)
{
@@ -1224,7 +1165,7 @@ static int send_fetch_request(int fd_out, const struct fetch_pack_args *args,
add_common(&req_buf, common);
/* Add initial haves */
- ret = add_haves(&req_buf, haves_to_send, in_vain);
+ ret = add_haves(negotiator, &req_buf, haves_to_send, in_vain);
}
/* Send request */
@@ -1247,13 +1188,13 @@ static int process_section_header(struct packet_reader *reader,
int ret;
if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
- die("error reading section header '%s'", section);
+ die(_("error reading section header '%s'"), section);
ret = !strcmp(reader->line, section);
if (!peek) {
if (!ret)
- die("expected '%s', received '%s'",
+ die(_("expected '%s', received '%s'"),
section, reader->line);
packet_reader_read(reader);
}
@@ -1261,7 +1202,9 @@ static int process_section_header(struct packet_reader *reader,
return ret;
}
-static int process_acks(struct packet_reader *reader, struct oidset *common)
+static int process_acks(struct fetch_negotiator *negotiator,
+ struct packet_reader *reader,
+ struct oidset *common)
{
/* received */
int received_ready = 0;
@@ -1279,24 +1222,23 @@ static int process_acks(struct packet_reader *reader, struct oidset *common)
if (!get_oid_hex(arg, &oid)) {
struct commit *commit;
oidset_insert(common, &oid);
- commit = lookup_commit(&oid);
- mark_common(commit, 0, 1);
+ commit = lookup_commit(the_repository, &oid);
+ negotiator->ack(negotiator, commit);
}
continue;
}
if (!strcmp(reader->line, "ready")) {
- clear_prio_queue(&rev_list);
received_ready = 1;
continue;
}
- die("unexpected acknowledgment line: '%s'", reader->line);
+ die(_("unexpected acknowledgment line: '%s'"), reader->line);
}
if (reader->status != PACKET_READ_FLUSH &&
reader->status != PACKET_READ_DELIM)
- die("error processing acks: %d", reader->status);
+ die(_("error processing acks: %d"), reader->status);
/* return 0 if no common, 1 if there are common, or 2 if ready */
return received_ready ? 2 : (received_ack ? 1 : 0);
@@ -1319,10 +1261,10 @@ static void receive_shallow_info(struct fetch_pack_args *args,
if (skip_prefix(reader->line, "unshallow ", &arg)) {
if (get_oid_hex(arg, &oid))
die(_("invalid unshallow line: %s"), reader->line);
- if (!lookup_object(oid.hash))
+ if (!lookup_object(the_repository, oid.hash))
die(_("object not found: %s"), reader->line);
/* make sure that it is parsed as shallow */
- if (!parse_object(&oid))
+ if (!parse_object(the_repository, &oid))
die(_("error in object: %s"), reader->line);
if (unregister_shallow(&oid))
die(_("no shallow found: %s"), reader->line);
@@ -1333,36 +1275,37 @@ static void receive_shallow_info(struct fetch_pack_args *args,
if (reader->status != PACKET_READ_FLUSH &&
reader->status != PACKET_READ_DELIM)
- die("error processing shallow info: %d", reader->status);
+ die(_("error processing shallow info: %d"), reader->status);
setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
args->deepen = 1;
}
-static void receive_wanted_refs(struct packet_reader *reader, struct ref *refs)
+static void receive_wanted_refs(struct packet_reader *reader,
+ struct ref **sought, int nr_sought)
{
process_section_header(reader, "wanted-refs", 0);
while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
struct object_id oid;
const char *end;
- struct ref *r = NULL;
+ int i;
if (parse_oid_hex(reader->line, &oid, &end) || *end++ != ' ')
- die("expected wanted-ref, got '%s'", reader->line);
+ die(_("expected wanted-ref, got '%s'"), reader->line);
- for (r = refs; r; r = r->next) {
- if (!strcmp(end, r->name)) {
- oidcpy(&r->old_oid, &oid);
+ for (i = 0; i < nr_sought; i++) {
+ if (!strcmp(end, sought[i]->name)) {
+ oidcpy(&sought[i]->old_oid, &oid);
break;
}
}
- if (!r)
- die("unexpected wanted-ref: '%s'", reader->line);
+ if (i == nr_sought)
+ die(_("unexpected wanted-ref: '%s'"), reader->line);
}
if (reader->status != PACKET_READ_DELIM)
- die("error processing wanted refs: %d", reader->status);
+ die(_("error processing wanted refs: %d"), reader->status);
}
enum fetch_state {
@@ -1385,6 +1328,8 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
struct packet_reader reader;
int in_vain = 0;
int haves_to_send = INITIAL_FLUSH;
+ struct fetch_negotiator negotiator;
+ fetch_negotiator_init(&negotiator, negotiation_algorithm);
packet_reader_init(&reader, fd[0], NULL, 0,
PACKET_READ_CHOMP_NEWLINE);
@@ -1400,21 +1345,21 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
if (args->depth > 0 || args->deepen_since || args->deepen_not)
args->deepen = 1;
- if (marked)
- for_each_ref(clear_marks, NULL);
- marked = 1;
-
- for_each_ref(rev_list_insert_ref_oid, NULL);
- for_each_cached_alternate(insert_one_alternate_object);
-
/* Filter 'ref' by 'sought' and those that aren't local */
- if (everything_local(args, &ref, sought, nr_sought))
+ mark_complete_and_common_ref(&negotiator, args, &ref);
+ filter_refs(args, &ref, sought, nr_sought);
+ if (everything_local(args, &ref))
state = FETCH_DONE;
else
state = FETCH_SEND_REQUEST;
+
+ mark_tips(&negotiator, args->negotiation_tips);
+ for_each_cached_alternate(&negotiator,
+ insert_one_alternate_object);
break;
case FETCH_SEND_REQUEST:
- if (send_fetch_request(fd[1], args, ref, &common,
+ if (send_fetch_request(&negotiator, fd[1], args, ref,
+ &common,
&haves_to_send, &in_vain))
state = FETCH_GET_PACK;
else
@@ -1422,7 +1367,7 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
break;
case FETCH_PROCESS_ACKS:
/* Process ACKs/NAKs */
- switch (process_acks(&reader, &common)) {
+ switch (process_acks(&negotiator, &reader, &common)) {
case 2:
state = FETCH_GET_PACK;
break;
@@ -1440,7 +1385,7 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
receive_shallow_info(args, &reader);
if (process_section_header(&reader, "wanted-refs", 1))
- receive_wanted_refs(&reader, ref);
+ receive_wanted_refs(&reader, sought, nr_sought);
/* get the pack */
process_section_header(&reader, "packfile", 0);
@@ -1454,10 +1399,36 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
}
}
+ negotiator.release(&negotiator);
oidset_clear(&common);
return ref;
}
+static int fetch_pack_config_cb(const char *var, const char *value, void *cb)
+{
+ if (strcmp(var, "fetch.fsck.skiplist") == 0) {
+ const char *path;
+
+ if (git_config_pathname(&path, var, value))
+ return 1;
+ strbuf_addf(&fsck_msg_types, "%cskiplist=%s",
+ fsck_msg_types.len ? ',' : '=', path);
+ free((char *)path);
+ return 0;
+ }
+
+ if (skip_prefix(var, "fetch.fsck.", &var)) {
+ if (is_valid_msg_type(var, value))
+ strbuf_addf(&fsck_msg_types, "%c%s=%s",
+ fsck_msg_types.len ? ',' : '=', var, value);
+ else
+ warning("Skipping unknown msg id '%s'", var);
+ return 0;
+ }
+
+ return git_default_config(var, value, cb);
+}
+
static void fetch_pack_config(void)
{
git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
@@ -1465,8 +1436,10 @@ static void fetch_pack_config(void)
git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
+ git_config_get_string("fetch.negotiationalgorithm",
+ &negotiation_algorithm);
- git_config(git_default_config, NULL);
+ git_config(fetch_pack_config_cb, NULL);
}
static void fetch_pack_setup(void)
@@ -1504,13 +1477,12 @@ static int remove_duplicates_in_refs(struct ref **ref, int nr)
}
static void update_shallow(struct fetch_pack_args *args,
- struct ref *refs,
+ struct ref **sought, int nr_sought,
struct shallow_info *si)
{
struct oid_array ref = OID_ARRAY_INIT;
int *status;
int i;
- struct ref *r;
if (args->deepen && alternate_shallow_file) {
if (*alternate_shallow_file == '\0') { /* --unshallow */
@@ -1552,8 +1524,8 @@ static void update_shallow(struct fetch_pack_args *args,
remove_nonexistent_theirs_shallow(si);
if (!si->nr_ours && !si->nr_theirs)
return;
- for (r = refs; r; r = r->next)
- oid_array_append(&ref, &r->old_oid);
+ for (i = 0; i < nr_sought; i++)
+ oid_array_append(&ref, &sought[i]->old_oid);
si->ref = &ref;
if (args->update_shallow) {
@@ -1587,12 +1559,12 @@ static void update_shallow(struct fetch_pack_args *args,
* remote is also shallow, check what ref is safe to update
* without updating .git/shallow
*/
- status = xcalloc(ref.nr, sizeof(*status));
+ status = xcalloc(nr_sought, sizeof(*status));
assign_shallow_commits_to_refs(si, NULL, status);
if (si->nr_ours || si->nr_theirs) {
- for (r = refs, i = 0; r; r = r->next, i++)
+ for (i = 0; i < nr_sought; i++)
if (status[i])
- r->status = REF_STATUS_REJECT_SHALLOW;
+ sought[i]->status = REF_STATUS_REJECT_SHALLOW;
}
free(status);
oid_array_clear(&ref);
@@ -1655,7 +1627,7 @@ struct ref *fetch_pack(struct fetch_pack_args *args,
args->connectivity_checked = 1;
}
- update_shallow(args, ref_cpy, &si);
+ update_shallow(args, sought, nr_sought, &si);
cleanup:
clear_shallow_info(&si);
return ref_cpy;
diff --git a/fetch-pack.h b/fetch-pack.h
index 2160be9..5b6e868 100644
--- a/fetch-pack.h
+++ b/fetch-pack.h
@@ -16,6 +16,13 @@ struct fetch_pack_args {
const struct string_list *deepen_not;
struct list_objects_filter_options filter_options;
const struct string_list *server_options;
+
+ /*
+ * If not NULL, during packfile negotiation, fetch-pack will send "have"
+ * lines only with these tips and their ancestors.
+ */
+ const struct oid_array *negotiation_tips;
+
unsigned deepen_relative:1;
unsigned quiet:1;
unsigned keep_pack:1;
diff --git a/fsck.c b/fsck.c
index ae4b1f3..a0cee0b 100644
--- a/fsck.c
+++ b/fsck.c
@@ -1,5 +1,6 @@
#include "cache.h"
#include "object-store.h"
+#include "repository.h"
#include "object.h"
#include "blob.h"
#include "tree.h"
@@ -63,7 +64,7 @@ static struct oidset gitmodules_done = OIDSET_INIT;
FUNC(ZERO_PADDED_DATE, ERROR) \
FUNC(GITMODULES_MISSING, ERROR) \
FUNC(GITMODULES_BLOB, ERROR) \
- FUNC(GITMODULES_PARSE, ERROR) \
+ FUNC(GITMODULES_LARGE, ERROR) \
FUNC(GITMODULES_NAME, ERROR) \
FUNC(GITMODULES_SYMLINK, ERROR) \
/* warnings */ \
@@ -77,6 +78,7 @@ static struct oidset gitmodules_done = OIDSET_INIT;
FUNC(ZERO_PADDED_FILEMODE, WARN) \
FUNC(NUL_IN_COMMIT, WARN) \
/* infos (reported as warnings, but ignored by default) */ \
+ FUNC(GITMODULES_PARSE, INFO) \
FUNC(BAD_TAG_NAME, INFO) \
FUNC(MISSING_TAGGER_ENTRY, INFO)
@@ -412,14 +414,14 @@ static int fsck_walk_tree(struct tree *tree, void *data, struct fsck_options *op
continue;
if (S_ISDIR(entry.mode)) {
- obj = (struct object *)lookup_tree(entry.oid);
+ obj = (struct object *)lookup_tree(the_repository, entry.oid);
if (name && obj)
put_object_name(options, obj, "%s%s/", name,
entry.path);
result = options->walk(obj, OBJ_TREE, data, options);
}
else if (S_ISREG(entry.mode) || S_ISLNK(entry.mode)) {
- obj = (struct object *)lookup_blob(entry.oid);
+ obj = (struct object *)lookup_blob(the_repository, entry.oid);
if (name && obj)
put_object_name(options, obj, "%s%s", name,
entry.path);
@@ -517,7 +519,7 @@ int fsck_walk(struct object *obj, void *data, struct fsck_options *options)
return -1;
if (obj->type == OBJ_NONE)
- parse_object(&obj->oid);
+ parse_object(the_repository, &obj->oid);
switch (obj->type) {
case OBJ_BLOB:
@@ -999,6 +1001,7 @@ static int fsck_blob(struct blob *blob, const char *buf,
unsigned long size, struct fsck_options *options)
{
struct fsck_gitmodules_data data;
+ struct config_options config_opts = { 0 };
if (!oidset_contains(&gitmodules_found, &blob->object.oid))
return 0;
@@ -1014,15 +1017,16 @@ static int fsck_blob(struct blob *blob, const char *buf,
* that an error.
*/
return report(options, &blob->object,
- FSCK_MSG_GITMODULES_PARSE,
+ FSCK_MSG_GITMODULES_LARGE,
".gitmodules too large to parse");
}
data.obj = &blob->object;
data.options = options;
data.ret = 0;
+ config_opts.error_action = CONFIG_ERROR_SILENT;
if (git_config_from_mem(fsck_gitmodules_fn, CONFIG_ORIGIN_BLOB,
- ".gitmodules", buf, size, &data))
+ ".gitmodules", buf, size, &data, &config_opts))
data.ret |= report(options, &blob->object,
FSCK_MSG_GITMODULES_PARSE,
"could not parse gitmodules blob");
@@ -1078,7 +1082,7 @@ int fsck_finish(struct fsck_options *options)
if (oidset_contains(&gitmodules_done, oid))
continue;
- blob = lookup_blob(oid);
+ blob = lookup_blob(the_repository, oid);
if (!blob) {
struct object *obj = lookup_unknown_object(oid->hash);
ret |= report(options, obj,
diff --git a/git-compat-util.h b/git-compat-util.h
index 9a64998..89d3709 100644
--- a/git-compat-util.h
+++ b/git-compat-util.h
@@ -1239,4 +1239,10 @@ extern void unleak_memory(const void *ptr, size_t len);
#define UNLEAK(var) do {} while (0)
#endif
+/*
+ * This include must come after system headers, since it introduces macros that
+ * replace system names.
+ */
+#include "banned.h"
+
#endif
diff --git a/git-p4.py b/git-p4.py
index b449db1..7fab255 100755
--- a/git-p4.py
+++ b/git-p4.py
@@ -1494,7 +1494,13 @@ class P4Submit(Command, P4UserMap):
optparse.make_option("--disable-p4sync", dest="disable_p4sync", action="store_true",
help="Skip Perforce sync of p4/master after submit or shelve"),
]
- self.description = "Submit changes from git to the perforce depot."
+ self.description = """Submit changes from git to the perforce depot.\n
+ The `p4-pre-submit` hook is executed if it exists and is executable.
+ The hook takes no parameters and nothing from standard input. Exiting with
+ non-zero status from this script prevents `git-p4 submit` from launching.
+
+ One usage scenario is to run unit tests in the hook."""
+
self.usage += " [name of git branch to submit into perforce depot]"
self.origin = ""
self.detectRenames = False
@@ -2303,6 +2309,14 @@ class P4Submit(Command, P4UserMap):
sys.exit("number of commits (%d) must match number of shelved changelist (%d)" %
(len(commits), num_shelves))
+ hooks_path = gitConfig("core.hooksPath")
+ if len(hooks_path) <= 0:
+ hooks_path = os.path.join(os.environ.get("GIT_DIR", ".git"), "hooks")
+
+ hook_file = os.path.join(hooks_path, "p4-pre-submit")
+ if os.path.isfile(hook_file) and os.access(hook_file, os.X_OK) and subprocess.call([hook_file]) != 0:
+ sys.exit(1)
+
#
# Apply the commits, one at a time. On failure, ask if should
# continue to try the rest of the patches, or quit.
diff --git a/git-send-email.perl b/git-send-email.perl
index f4c0790..2be5dac 100755
--- a/git-send-email.perl
+++ b/git-send-email.perl
@@ -1479,7 +1479,7 @@ EOF
SSL => 1);
}
}
- else {
+ elsif (!$smtp) {
$smtp_server_port ||= 25;
$smtp ||= Net::SMTP->new($smtp_server,
Hello => $smtp_domain,
@@ -1501,7 +1501,6 @@ EOF
$smtp->starttls(ssl_verify_params())
or die sprintf(__("STARTTLS failed! %s"), IO::Socket::SSL::errstr());
}
- $smtp_encryption = '';
# Send EHLO again to receive fresh
# supported commands
$smtp->hello($smtp_domain);
diff --git a/git.c b/git.c
index 3fded74..cc0fad9 100644
--- a/git.c
+++ b/git.c
@@ -164,7 +164,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
if (envchanged)
*envchanged = 1;
} else if (!strcmp(cmd, "--no-replace-objects")) {
- check_replace_refs = 0;
+ read_replace_refs = 0;
setenv(NO_REPLACE_OBJECTS_ENVIRONMENT, "1", 1);
if (envchanged)
*envchanged = 1;
@@ -414,7 +414,10 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
trace_argv_printf(argv, "trace: built-in: git");
+ validate_cache_entries(&the_index);
status = p->fn(argc, argv, prefix);
+ validate_cache_entries(&the_index);
+
if (status)
return status;
diff --git a/gpg-interface.c b/gpg-interface.c
index 09ddfbc..bb8ea66 100644
--- a/gpg-interface.c
+++ b/gpg-interface.c
@@ -7,10 +7,64 @@
#include "tempfile.h"
static char *configured_signing_key;
-static const char *gpg_program = "gpg";
+struct gpg_format {
+ const char *name;
+ const char *program;
+ const char **verify_args;
+ const char **sigs;
+};
+
+static const char *openpgp_verify_args[] = {
+ "--keyid-format=long",
+ NULL
+};
+static const char *openpgp_sigs[] = {
+ "-----BEGIN PGP SIGNATURE-----",
+ "-----BEGIN PGP MESSAGE-----",
+ NULL
+};
+
+static const char *x509_verify_args[] = {
+ NULL
+};
+static const char *x509_sigs[] = {
+ "-----BEGIN SIGNED MESSAGE-----",
+ NULL
+};
-#define PGP_SIGNATURE "-----BEGIN PGP SIGNATURE-----"
-#define PGP_MESSAGE "-----BEGIN PGP MESSAGE-----"
+static struct gpg_format gpg_format[] = {
+ { .name = "openpgp", .program = "gpg",
+ .verify_args = openpgp_verify_args,
+ .sigs = openpgp_sigs
+ },
+ { .name = "x509", .program = "gpgsm",
+ .verify_args = x509_verify_args,
+ .sigs = x509_sigs
+ },
+};
+
+static struct gpg_format *use_format = &gpg_format[0];
+
+static struct gpg_format *get_format_by_name(const char *str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gpg_format); i++)
+ if (!strcmp(gpg_format[i].name, str))
+ return gpg_format + i;
+ return NULL;
+}
+
+static struct gpg_format *get_format_by_sig(const char *sig)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(gpg_format); i++)
+ for (j = 0; gpg_format[i].sigs[j]; j++)
+ if (starts_with(sig, gpg_format[i].sigs[j]))
+ return gpg_format + i;
+ return NULL;
+}
void signature_check_clear(struct signature_check *sigc)
{
@@ -53,10 +107,11 @@ static void parse_gpg_output(struct signature_check *sigc)
sigc->result = sigcheck_gpg_status[i].result;
/* The trust messages are not followed by key/signer information */
if (sigc->result != 'U') {
- sigc->key = xmemdupz(found, 16);
+ next = strchrnul(found, ' ');
+ sigc->key = xmemdupz(found, next - found);
/* The ERRSIG message is not followed by signer information */
- if (sigc-> result != 'E') {
- found += 17;
+ if (*next && sigc-> result != 'E') {
+ found = next + 1;
next = strchrnul(found, '\n');
sigc->signer = xmemdupz(found, next - found);
}
@@ -101,12 +156,6 @@ void print_signature_buffer(const struct signature_check *sigc, unsigned flags)
fputs(output, stderr);
}
-static int is_gpg_start(const char *line)
-{
- return starts_with(line, PGP_SIGNATURE) ||
- starts_with(line, PGP_MESSAGE);
-}
-
size_t parse_signature(const char *buf, size_t size)
{
size_t len = 0;
@@ -114,7 +163,7 @@ size_t parse_signature(const char *buf, size_t size)
while (len < size) {
const char *eol;
- if (is_gpg_start(buf + len))
+ if (get_format_by_sig(buf + len))
match = len;
eol = memchr(buf + len, '\n', size - len);
@@ -131,6 +180,9 @@ void set_signing_key(const char *key)
int git_gpg_config(const char *var, const char *value, void *cb)
{
+ struct gpg_format *fmt = NULL;
+ char *fmtname = NULL;
+
if (!strcmp(var, "user.signingkey")) {
if (!value)
return config_error_nonbool(var);
@@ -138,13 +190,28 @@ int git_gpg_config(const char *var, const char *value, void *cb)
return 0;
}
- if (!strcmp(var, "gpg.program")) {
+ if (!strcmp(var, "gpg.format")) {
if (!value)
return config_error_nonbool(var);
- gpg_program = xstrdup(value);
+ fmt = get_format_by_name(value);
+ if (!fmt)
+ return error("unsupported value for %s: %s",
+ var, value);
+ use_format = fmt;
return 0;
}
+ if (!strcmp(var, "gpg.program") || !strcmp(var, "gpg.openpgp.program"))
+ fmtname = "openpgp";
+
+ if (!strcmp(var, "gpg.x509.program"))
+ fmtname = "x509";
+
+ if (fmtname) {
+ fmt = get_format_by_name(fmtname);
+ return git_config_string(&fmt->program, var, value);
+ }
+
return 0;
}
@@ -163,7 +230,7 @@ int sign_buffer(struct strbuf *buffer, struct strbuf *signature, const char *sig
struct strbuf gpg_status = STRBUF_INIT;
argv_array_pushl(&gpg.args,
- gpg_program,
+ use_format->program,
"--status-fd=2",
"-bsau", signing_key,
NULL);
@@ -201,6 +268,7 @@ int verify_signed_buffer(const char *payload, size_t payload_size,
struct strbuf *gpg_output, struct strbuf *gpg_status)
{
struct child_process gpg = CHILD_PROCESS_INIT;
+ struct gpg_format *fmt;
struct tempfile *temp;
int ret;
struct strbuf buf = STRBUF_INIT;
@@ -216,10 +284,14 @@ int verify_signed_buffer(const char *payload, size_t payload_size,
return -1;
}
+ fmt = get_format_by_sig(signature);
+ if (!fmt)
+ BUG("bad signature '%s'", signature);
+
+ argv_array_push(&gpg.args, fmt->program);
+ argv_array_pushv(&gpg.args, fmt->verify_args);
argv_array_pushl(&gpg.args,
- gpg_program,
"--status-fd=1",
- "--keyid-format=long",
"--verify", temp->filename.buf, "-",
NULL);
diff --git a/grep.c b/grep.c
index cd7fc6f..2b26cee 100644
--- a/grep.c
+++ b/grep.c
@@ -65,6 +65,7 @@ void init_grep_defaults(void)
color_set(opt->colors[GREP_COLOR_MATCH_SELECTED], GIT_COLOR_BOLD_RED);
color_set(opt->colors[GREP_COLOR_SELECTED], "");
color_set(opt->colors[GREP_COLOR_SEP], GIT_COLOR_CYAN);
+ opt->only_matching = 0;
opt->color = -1;
opt->output = std_output;
}
@@ -159,6 +160,7 @@ void grep_init(struct grep_opt *opt, const char *prefix)
opt->pattern_tail = &opt->pattern_list;
opt->header_tail = &opt->header_list;
+ opt->only_matching = def->only_matching;
opt->color = def->color;
opt->extended_regexp_option = def->extended_regexp_option;
opt->pattern_type_option = def->pattern_type_option;
@@ -1404,26 +1406,9 @@ static int next_match(struct grep_opt *opt, char *bol, char *eol,
return hit;
}
-static void show_line(struct grep_opt *opt, char *bol, char *eol,
- const char *name, unsigned lno, ssize_t cno, char sign)
+static void show_line_header(struct grep_opt *opt, const char *name,
+ unsigned lno, ssize_t cno, char sign)
{
- int rest = eol - bol;
- const char *match_color, *line_color = NULL;
-
- if (opt->file_break && opt->last_shown == 0) {
- if (opt->show_hunk_mark)
- opt->output(opt, "\n", 1);
- } else if (opt->pre_context || opt->post_context || opt->funcbody) {
- if (opt->last_shown == 0) {
- if (opt->show_hunk_mark) {
- output_color(opt, "--", 2, opt->colors[GREP_COLOR_SEP]);
- opt->output(opt, "\n", 1);
- }
- } else if (lno > opt->last_shown + 1) {
- output_color(opt, "--", 2, opt->colors[GREP_COLOR_SEP]);
- opt->output(opt, "\n", 1);
- }
- }
if (opt->heading && opt->last_shown == 0) {
output_color(opt, name, strlen(name), opt->colors[GREP_COLOR_FILENAME]);
opt->output(opt, "\n", 1);
@@ -1451,38 +1436,78 @@ static void show_line(struct grep_opt *opt, char *bol, char *eol,
output_color(opt, buf, strlen(buf), opt->colors[GREP_COLOR_COLUMNNO]);
output_sep(opt, sign);
}
- if (opt->color) {
+}
+
+static void show_line(struct grep_opt *opt, char *bol, char *eol,
+ const char *name, unsigned lno, ssize_t cno, char sign)
+{
+ int rest = eol - bol;
+ const char *match_color = NULL;
+ const char *line_color = NULL;
+
+ if (opt->file_break && opt->last_shown == 0) {
+ if (opt->show_hunk_mark)
+ opt->output(opt, "\n", 1);
+ } else if (opt->pre_context || opt->post_context || opt->funcbody) {
+ if (opt->last_shown == 0) {
+ if (opt->show_hunk_mark) {
+ output_color(opt, "--", 2, opt->colors[GREP_COLOR_SEP]);
+ opt->output(opt, "\n", 1);
+ }
+ } else if (lno > opt->last_shown + 1) {
+ output_color(opt, "--", 2, opt->colors[GREP_COLOR_SEP]);
+ opt->output(opt, "\n", 1);
+ }
+ }
+ if (!opt->only_matching) {
+ /*
+ * In case the line we're being called with contains more than
+ * one match, leave printing each header to the loop below.
+ */
+ show_line_header(opt, name, lno, cno, sign);
+ }
+ if (opt->color || opt->only_matching) {
regmatch_t match;
enum grep_context ctx = GREP_CONTEXT_BODY;
int ch = *eol;
int eflags = 0;
- if (sign == ':')
- match_color = opt->colors[GREP_COLOR_MATCH_SELECTED];
- else
- match_color = opt->colors[GREP_COLOR_MATCH_CONTEXT];
- if (sign == ':')
- line_color = opt->colors[GREP_COLOR_SELECTED];
- else if (sign == '-')
- line_color = opt->colors[GREP_COLOR_CONTEXT];
- else if (sign == '=')
- line_color = opt->colors[GREP_COLOR_FUNCTION];
+ if (opt->color) {
+ if (sign == ':')
+ match_color = opt->colors[GREP_COLOR_MATCH_SELECTED];
+ else
+ match_color = opt->colors[GREP_COLOR_MATCH_CONTEXT];
+ if (sign == ':')
+ line_color = opt->colors[GREP_COLOR_SELECTED];
+ else if (sign == '-')
+ line_color = opt->colors[GREP_COLOR_CONTEXT];
+ else if (sign == '=')
+ line_color = opt->colors[GREP_COLOR_FUNCTION];
+ }
*eol = '\0';
while (next_match(opt, bol, eol, ctx, &match, eflags)) {
if (match.rm_so == match.rm_eo)
break;
- output_color(opt, bol, match.rm_so, line_color);
+ if (opt->only_matching)
+ show_line_header(opt, name, lno, cno, sign);
+ else
+ output_color(opt, bol, match.rm_so, line_color);
output_color(opt, bol + match.rm_so,
match.rm_eo - match.rm_so, match_color);
+ if (opt->only_matching)
+ opt->output(opt, "\n", 1);
bol += match.rm_eo;
+ cno += match.rm_eo;
rest -= match.rm_eo;
eflags = REG_NOTBOL;
}
*eol = ch;
}
- output_color(opt, bol, rest, line_color);
- opt->output(opt, "\n", 1);
+ if (!opt->only_matching) {
+ output_color(opt, bol, rest, line_color);
+ opt->output(opt, "\n", 1);
+ }
}
#ifndef NO_PTHREADS
diff --git a/grep.h b/grep.h
index 01d2cba..0ba62a1 100644
--- a/grep.h
+++ b/grep.h
@@ -163,6 +163,7 @@ struct grep_opt {
int relative;
int pathname;
int null_following_name;
+ int only_matching;
int color;
int max_depth;
int funcname;
diff --git a/help.c b/help.c
index 3ebf056..9c0b5a8 100644
--- a/help.c
+++ b/help.c
@@ -693,6 +693,7 @@ int cmd_version(int argc, const char **argv, const char *prefix)
else
printf("no commit associated with this build\n");
printf("sizeof-long: %d\n", (int)sizeof(long));
+ printf("sizeof-size_t: %d\n", (int)sizeof(size_t));
/* NEEDSWORK: also save and output GIT-BUILD_OPTIONS? */
}
return 0;
diff --git a/hex.c b/hex.c
index 8df2d63..10af1a2 100644
--- a/hex.c
+++ b/hex.c
@@ -50,7 +50,7 @@ int hex_to_bytes(unsigned char *binary, const char *hex, size_t len)
int get_sha1_hex(const char *hex, unsigned char *sha1)
{
int i;
- for (i = 0; i < GIT_SHA1_RAWSZ; i++) {
+ for (i = 0; i < the_hash_algo->rawsz; i++) {
int val = hex2chr(hex);
if (val < 0)
return -1;
@@ -69,7 +69,7 @@ int parse_oid_hex(const char *hex, struct object_id *oid, const char **end)
{
int ret = get_oid_hex(hex, oid);
if (!ret)
- *end = hex + GIT_SHA1_HEXSZ;
+ *end = hex + the_hash_algo->hexsz;
return ret;
}
@@ -79,7 +79,7 @@ char *sha1_to_hex_r(char *buffer, const unsigned char *sha1)
char *buf = buffer;
int i;
- for (i = 0; i < GIT_SHA1_RAWSZ; i++) {
+ for (i = 0; i < the_hash_algo->rawsz; i++) {
unsigned int val = *sha1++;
*buf++ = hex[val >> 4];
*buf++ = hex[val & 0xf];
diff --git a/http-backend.c b/http-backend.c
index adaef16..88c38c8 100644
--- a/http-backend.c
+++ b/http-backend.c
@@ -279,12 +279,18 @@ static struct rpc_service *select_service(struct strbuf *hdr, const char *name)
return svc;
}
+static void write_to_child(int out, const unsigned char *buf, ssize_t len, const char *prog_name)
+{
+ if (write_in_full(out, buf, len) < 0)
+ die("unable to write to '%s'", prog_name);
+}
+
/*
* This is basically strbuf_read(), except that if we
* hit max_request_buffer we die (we'd rather reject a
* maliciously large request than chew up infinite memory).
*/
-static ssize_t read_request(int fd, unsigned char **out)
+static ssize_t read_request_eof(int fd, unsigned char **out)
{
size_t len = 0, alloc = 8192;
unsigned char *buf = xmalloc(alloc);
@@ -321,13 +327,54 @@ static ssize_t read_request(int fd, unsigned char **out)
}
}
-static void inflate_request(const char *prog_name, int out, int buffer_input)
+static ssize_t read_request_fixed_len(int fd, ssize_t req_len, unsigned char **out)
+{
+ unsigned char *buf = NULL;
+ ssize_t cnt = 0;
+
+ if (max_request_buffer < req_len) {
+ die("request was larger than our maximum size (%lu): "
+ "%" PRIuMAX "; try setting GIT_HTTP_MAX_REQUEST_BUFFER",
+ max_request_buffer, (uintmax_t)req_len);
+ }
+
+ buf = xmalloc(req_len);
+ cnt = read_in_full(fd, buf, req_len);
+ if (cnt < 0) {
+ free(buf);
+ return -1;
+ }
+ *out = buf;
+ return cnt;
+}
+
+static ssize_t get_content_length(void)
+{
+ ssize_t val = -1;
+ const char *str = getenv("CONTENT_LENGTH");
+
+ if (str && !git_parse_ssize_t(str, &val))
+ die("failed to parse CONTENT_LENGTH: %s", str);
+ return val;
+}
+
+static ssize_t read_request(int fd, unsigned char **out, ssize_t req_len)
+{
+ if (req_len < 0)
+ return read_request_eof(fd, out);
+ else
+ return read_request_fixed_len(fd, req_len, out);
+}
+
+static void inflate_request(const char *prog_name, int out, int buffer_input, ssize_t req_len)
{
git_zstream stream;
unsigned char *full_request = NULL;
unsigned char in_buf[8192];
unsigned char out_buf[8192];
unsigned long cnt = 0;
+ int req_len_defined = req_len >= 0;
+ size_t req_remaining_len = req_len;
memset(&stream, 0, sizeof(stream));
git_inflate_init_gzip_only(&stream);
@@ -339,11 +386,18 @@ static void inflate_request(const char *prog_name, int out, int buffer_input)
if (full_request)
n = 0; /* nothing left to read */
else
- n = read_request(0, &full_request);
+ n = read_request(0, &full_request, req_len);
stream.next_in = full_request;
} else {
- n = xread(0, in_buf, sizeof(in_buf));
+ ssize_t buffer_len;
+ if (req_len_defined && req_remaining_len <= sizeof(in_buf))
+ buffer_len = req_remaining_len;
+ else
+ buffer_len = sizeof(in_buf);
+ n = xread(0, in_buf, buffer_len);
stream.next_in = in_buf;
+ if (req_len_defined && n > 0)
+ req_remaining_len -= n;
}
if (n <= 0)
@@ -361,9 +415,8 @@ static void inflate_request(const char *prog_name, int out, int buffer_input)
die("zlib error inflating request, result %d", ret);
n = stream.total_out - cnt;
- if (write_in_full(out, out_buf, n) < 0)
- die("%s aborted reading request", prog_name);
- cnt += n;
+ write_to_child(out, out_buf, stream.total_out - cnt, prog_name);
+ cnt = stream.total_out;
if (ret == Z_STREAM_END)
goto done;
@@ -376,18 +429,34 @@ done:
free(full_request);
}
-static void copy_request(const char *prog_name, int out)
+static void copy_request(const char *prog_name, int out, ssize_t req_len)
{
unsigned char *buf;
- ssize_t n = read_request(0, &buf);
+ ssize_t n = read_request(0, &buf, req_len);
if (n < 0)
die_errno("error reading request body");
- if (write_in_full(out, buf, n) < 0)
- die("%s aborted reading request", prog_name);
+ write_to_child(out, buf, n, prog_name);
close(out);
free(buf);
}
+static void pipe_fixed_length(const char *prog_name, int out, size_t req_len)
+{
+ unsigned char buf[8192];
+ size_t remaining_len = req_len;
+
+ while (remaining_len > 0) {
+ size_t chunk_length = remaining_len > sizeof(buf) ? sizeof(buf) : remaining_len;
+ ssize_t n = xread(0, buf, chunk_length);
+ if (n < 0)
+ die_errno("Reading request failed");
+ write_to_child(out, buf, n, prog_name);
+ remaining_len -= n;
+ }
+
+ close(out);
+}
+
static void run_service(const char **argv, int buffer_input)
{
const char *encoding = getenv("HTTP_CONTENT_ENCODING");
@@ -395,6 +464,7 @@ static void run_service(const char **argv, int buffer_input)
const char *host = getenv("REMOTE_ADDR");
int gzipped_request = 0;
struct child_process cld = CHILD_PROCESS_INIT;
+ ssize_t req_len = get_content_length();
if (encoding && !strcmp(encoding, "gzip"))
gzipped_request = 1;
@@ -413,7 +483,7 @@ static void run_service(const char **argv, int buffer_input)
"GIT_COMMITTER_EMAIL=%s@http.%s", user, host);
cld.argv = argv;
- if (buffer_input || gzipped_request)
+ if (buffer_input || gzipped_request || req_len >= 0)
cld.in = -1;
cld.git_cmd = 1;
if (start_command(&cld))
@@ -421,9 +491,11 @@ static void run_service(const char **argv, int buffer_input)
close(1);
if (gzipped_request)
- inflate_request(argv[0], cld.in, buffer_input);
+ inflate_request(argv[0], cld.in, buffer_input, req_len);
else if (buffer_input)
- copy_request(argv[0], cld.in);
+ copy_request(argv[0], cld.in, req_len);
+ else if (req_len >= 0)
+ pipe_fixed_length(argv[0], cld.in, req_len);
else
close(0);
@@ -436,13 +508,13 @@ static int show_text_ref(const char *name, const struct object_id *oid,
{
const char *name_nons = strip_namespace(name);
struct strbuf *buf = cb_data;
- struct object *o = parse_object(oid);
+ struct object *o = parse_object(the_repository, oid);
if (!o)
return 0;
strbuf_addf(buf, "%s\t%s\n", oid_to_hex(oid), name_nons);
if (o->type == OBJ_TAG) {
- o = deref_tag(o, name, 0);
+ o = deref_tag(the_repository, o, name, 0);
if (!o)
return 0;
strbuf_addf(buf, "%s\t%s^{}\n", oid_to_hex(&o->oid),
diff --git a/http-push.c b/http-push.c
index 7e38522..5eaf551 100644
--- a/http-push.c
+++ b/http-push.c
@@ -1,4 +1,5 @@
#include "cache.h"
+#include "repository.h"
#include "commit.h"
#include "tag.h"
#include "blob.h"
@@ -14,6 +15,7 @@
#include "packfile.h"
#include "object-store.h"
+
#ifdef EXPAT_NEEDS_XMLPARSE_H
#include <xmlparse.h>
#else
@@ -720,9 +722,9 @@ static void one_remote_object(const struct object_id *oid)
{
struct object *obj;
- obj = lookup_object(oid->hash);
+ obj = lookup_object(the_repository, oid->hash);
if (!obj)
- obj = parse_object(oid);
+ obj = parse_object(the_repository, oid);
/* Ignore remote objects that don't exist locally */
if (!obj)
@@ -1309,10 +1311,12 @@ static struct object_list **process_tree(struct tree *tree,
while (tree_entry(&desc, &entry))
switch (object_type(entry.mode)) {
case OBJ_TREE:
- p = process_tree(lookup_tree(entry.oid), p);
+ p = process_tree(lookup_tree(the_repository, entry.oid),
+ p);
break;
case OBJ_BLOB:
- p = process_blob(lookup_blob(entry.oid), p);
+ p = process_blob(lookup_blob(the_repository, entry.oid),
+ p);
break;
default:
/* Subproject commit - not in this repository */
@@ -1459,7 +1463,7 @@ static void add_remote_info_ref(struct remote_ls_ctx *ls)
return;
}
- o = parse_object(&ref->old_oid);
+ o = parse_object(the_repository, &ref->old_oid);
if (!o) {
fprintf(stderr,
"Unable to parse object %s for remote ref %s\n",
@@ -1473,7 +1477,7 @@ static void add_remote_info_ref(struct remote_ls_ctx *ls)
oid_to_hex(&ref->old_oid), ls->dentry_name);
if (o->type == OBJ_TAG) {
- o = deref_tag(o, ls->dentry_name, 0);
+ o = deref_tag(the_repository, o, ls->dentry_name, 0);
if (o)
strbuf_addf(buf, "%s\t%s^{}\n",
oid_to_hex(&o->oid), ls->dentry_name);
diff --git a/json-writer.c b/json-writer.c
new file mode 100644
index 0000000..aadb9db
--- /dev/null
+++ b/json-writer.c
@@ -0,0 +1,414 @@
+#include "cache.h"
+#include "json-writer.h"
+
+void jw_init(struct json_writer *jw)
+{
+ strbuf_init(&jw->json, 0);
+ strbuf_init(&jw->open_stack, 0);
+ jw->need_comma = 0;
+ jw->pretty = 0;
+}
+
+void jw_release(struct json_writer *jw)
+{
+ strbuf_release(&jw->json);
+ strbuf_release(&jw->open_stack);
+}
+
+/*
+ * Append JSON-quoted version of the given string to 'out'.
+ */
+static void append_quoted_string(struct strbuf *out, const char *in)
+{
+ unsigned char c;
+
+ strbuf_addch(out, '"');
+ while ((c = *in++) != '\0') {
+ if (c == '"')
+ strbuf_addstr(out, "\\\"");
+ else if (c == '\\')
+ strbuf_addstr(out, "\\\\");
+ else if (c == '\n')
+ strbuf_addstr(out, "\\n");
+ else if (c == '\r')
+ strbuf_addstr(out, "\\r");
+ else if (c == '\t')
+ strbuf_addstr(out, "\\t");
+ else if (c == '\f')
+ strbuf_addstr(out, "\\f");
+ else if (c == '\b')
+ strbuf_addstr(out, "\\b");
+ else if (c < 0x20)
+ strbuf_addf(out, "\\u%04x", c);
+ else
+ strbuf_addch(out, c);
+ }
+ strbuf_addch(out, '"');
+}
+
+static void indent_pretty(struct json_writer *jw)
+{
+ int k;
+
+ for (k = 0; k < jw->open_stack.len; k++)
+ strbuf_addstr(&jw->json, " ");
+}
+
+/*
+ * Begin an object or array (either top-level or nested within the currently
+ * open object or array).
+ */
+static void begin(struct json_writer *jw, char ch_open, int pretty)
+{
+ jw->pretty = pretty;
+
+ strbuf_addch(&jw->json, ch_open);
+
+ strbuf_addch(&jw->open_stack, ch_open);
+ jw->need_comma = 0;
+}
+
+/*
+ * Assert that the top of the open-stack is an object.
+ */
+static void assert_in_object(const struct json_writer *jw, const char *key)
+{
+ if (!jw->open_stack.len)
+ BUG("json-writer: object: missing jw_object_begin(): '%s'", key);
+ if (jw->open_stack.buf[jw->open_stack.len - 1] != '{')
+ BUG("json-writer: object: not in object: '%s'", key);
+}
+
+/*
+ * Assert that the top of the open-stack is an array.
+ */
+static void assert_in_array(const struct json_writer *jw)
+{
+ if (!jw->open_stack.len)
+ BUG("json-writer: array: missing jw_array_begin()");
+ if (jw->open_stack.buf[jw->open_stack.len - 1] != '[')
+ BUG("json-writer: array: not in array");
+}
+
+/*
+ * Add comma if we have already seen a member at this level.
+ */
+static void maybe_add_comma(struct json_writer *jw)
+{
+ if (jw->need_comma)
+ strbuf_addch(&jw->json, ',');
+ else
+ jw->need_comma = 1;
+}
+
+static void fmt_double(struct json_writer *jw, int precision,
+ double value)
+{
+ if (precision < 0) {
+ strbuf_addf(&jw->json, "%f", value);
+ } else {
+ struct strbuf fmt = STRBUF_INIT;
+ strbuf_addf(&fmt, "%%.%df", precision);
+ strbuf_addf(&jw->json, fmt.buf, value);
+ strbuf_release(&fmt);
+ }
+}
+
+static void object_common(struct json_writer *jw, const char *key)
+{
+ assert_in_object(jw, key);
+ maybe_add_comma(jw);
+
+ if (jw->pretty) {
+ strbuf_addch(&jw->json, '\n');
+ indent_pretty(jw);
+ }
+
+ append_quoted_string(&jw->json, key);
+ strbuf_addch(&jw->json, ':');
+ if (jw->pretty)
+ strbuf_addch(&jw->json, ' ');
+}
+
+static void array_common(struct json_writer *jw)
+{
+ assert_in_array(jw);
+ maybe_add_comma(jw);
+
+ if (jw->pretty) {
+ strbuf_addch(&jw->json, '\n');
+ indent_pretty(jw);
+ }
+}
+
+/*
+ * Assert that the given JSON object or JSON array has been properly
+ * terminated. (Has closing bracket.)
+ */
+static void assert_is_terminated(const struct json_writer *jw)
+{
+ if (jw->open_stack.len)
+ BUG("json-writer: object: missing jw_end(): '%s'",
+ jw->json.buf);
+}
+
+void jw_object_begin(struct json_writer *jw, int pretty)
+{
+ begin(jw, '{', pretty);
+}
+
+void jw_object_string(struct json_writer *jw, const char *key, const char *value)
+{
+ object_common(jw, key);
+ append_quoted_string(&jw->json, value);
+}
+
+void jw_object_intmax(struct json_writer *jw, const char *key, intmax_t value)
+{
+ object_common(jw, key);
+ strbuf_addf(&jw->json, "%"PRIdMAX, value);
+}
+
+void jw_object_double(struct json_writer *jw, const char *key, int precision,
+ double value)
+{
+ object_common(jw, key);
+ fmt_double(jw, precision, value);
+}
+
+void jw_object_true(struct json_writer *jw, const char *key)
+{
+ object_common(jw, key);
+ strbuf_addstr(&jw->json, "true");
+}
+
+void jw_object_false(struct json_writer *jw, const char *key)
+{
+ object_common(jw, key);
+ strbuf_addstr(&jw->json, "false");
+}
+
+void jw_object_bool(struct json_writer *jw, const char *key, int value)
+{
+ if (value)
+ jw_object_true(jw, key);
+ else
+ jw_object_false(jw, key);
+}
+
+void jw_object_null(struct json_writer *jw, const char *key)
+{
+ object_common(jw, key);
+ strbuf_addstr(&jw->json, "null");
+}
+
+static void increase_indent(struct strbuf *sb,
+ const struct json_writer *jw,
+ int indent)
+{
+ int k;
+
+ strbuf_reset(sb);
+ for (k = 0; k < jw->json.len; k++) {
+ char ch = jw->json.buf[k];
+ strbuf_addch(sb, ch);
+ if (ch == '\n')
+ strbuf_addchars(sb, ' ', indent);
+ }
+}
+
+static void kill_indent(struct strbuf *sb,
+ const struct json_writer *jw)
+{
+ int k;
+ int eat_it = 0;
+
+ strbuf_reset(sb);
+ for (k = 0; k < jw->json.len; k++) {
+ char ch = jw->json.buf[k];
+ if (eat_it && ch == ' ')
+ continue;
+ if (ch == '\n') {
+ eat_it = 1;
+ continue;
+ }
+ eat_it = 0;
+ strbuf_addch(sb, ch);
+ }
+}
+
+static void append_sub_jw(struct json_writer *jw,
+ const struct json_writer *value)
+{
+ /*
+ * If both are pretty, increase the indentation of the sub_jw
+ * to better fit under the super.
+ *
+ * If the super is pretty, but the sub_jw is compact, leave the
+ * sub_jw compact. (We don't want to parse and rebuild the sub_jw
+ * for this debug-ish feature.)
+ *
+ * If the super is compact, and the sub_jw is pretty, convert
+ * the sub_jw to compact.
+ *
+ * If both are compact, keep the sub_jw compact.
+ */
+ if (jw->pretty && jw->open_stack.len && value->pretty) {
+ struct strbuf sb = STRBUF_INIT;
+ increase_indent(&sb, value, jw->open_stack.len * 2);
+ strbuf_addbuf(&jw->json, &sb);
+ strbuf_release(&sb);
+ return;
+ }
+ if (!jw->pretty && value->pretty) {
+ struct strbuf sb = STRBUF_INIT;
+ kill_indent(&sb, value);
+ strbuf_addbuf(&jw->json, &sb);
+ strbuf_release(&sb);
+ return;
+ }
+
+ strbuf_addbuf(&jw->json, &value->json);
+}
+
+/*
+ * Append existing (properly terminated) JSON sub-data (object or array)
+ * as-is onto the given JSON data.
+ */
+void jw_object_sub_jw(struct json_writer *jw, const char *key,
+ const struct json_writer *value)
+{
+ assert_is_terminated(value);
+
+ object_common(jw, key);
+ append_sub_jw(jw, value);
+}
+
+void jw_object_inline_begin_object(struct json_writer *jw, const char *key)
+{
+ object_common(jw, key);
+
+ jw_object_begin(jw, jw->pretty);
+}
+
+void jw_object_inline_begin_array(struct json_writer *jw, const char *key)
+{
+ object_common(jw, key);
+
+ jw_array_begin(jw, jw->pretty);
+}
+
+void jw_array_begin(struct json_writer *jw, int pretty)
+{
+ begin(jw, '[', pretty);
+}
+
+void jw_array_string(struct json_writer *jw, const char *value)
+{
+ array_common(jw);
+ append_quoted_string(&jw->json, value);
+}
+
+void jw_array_intmax(struct json_writer *jw, intmax_t value)
+{
+ array_common(jw);
+ strbuf_addf(&jw->json, "%"PRIdMAX, value);
+}
+
+void jw_array_double(struct json_writer *jw, int precision, double value)
+{
+ array_common(jw);
+ fmt_double(jw, precision, value);
+}
+
+void jw_array_true(struct json_writer *jw)
+{
+ array_common(jw);
+ strbuf_addstr(&jw->json, "true");
+}
+
+void jw_array_false(struct json_writer *jw)
+{
+ array_common(jw);
+ strbuf_addstr(&jw->json, "false");
+}
+
+void jw_array_bool(struct json_writer *jw, int value)
+{
+ if (value)
+ jw_array_true(jw);
+ else
+ jw_array_false(jw);
+}
+
+void jw_array_null(struct json_writer *jw)
+{
+ array_common(jw);
+ strbuf_addstr(&jw->json, "null");
+}
+
+void jw_array_sub_jw(struct json_writer *jw, const struct json_writer *value)
+{
+ assert_is_terminated(value);
+
+ array_common(jw);
+ append_sub_jw(jw, value);
+}
+
+void jw_array_argc_argv(struct json_writer *jw, int argc, const char **argv)
+{
+ int k;
+
+ for (k = 0; k < argc; k++)
+ jw_array_string(jw, argv[k]);
+}
+
+void jw_array_argv(struct json_writer *jw, const char **argv)
+{
+ while (*argv)
+ jw_array_string(jw, *argv++);
+}
+
+void jw_array_inline_begin_object(struct json_writer *jw)
+{
+ array_common(jw);
+
+ jw_object_begin(jw, jw->pretty);
+}
+
+void jw_array_inline_begin_array(struct json_writer *jw)
+{
+ array_common(jw);
+
+ jw_array_begin(jw, jw->pretty);
+}
+
+int jw_is_terminated(const struct json_writer *jw)
+{
+ return !jw->open_stack.len;
+}
+
+void jw_end(struct json_writer *jw)
+{
+ char ch_open;
+ int len;
+
+ if (!jw->open_stack.len)
+ BUG("json-writer: too many jw_end(): '%s'", jw->json.buf);
+
+ len = jw->open_stack.len - 1;
+ ch_open = jw->open_stack.buf[len];
+
+ strbuf_setlen(&jw->open_stack, len);
+ jw->need_comma = 1;
+
+ if (jw->pretty) {
+ strbuf_addch(&jw->json, '\n');
+ indent_pretty(jw);
+ }
+
+ if (ch_open == '{')
+ strbuf_addch(&jw->json, '}');
+ else
+ strbuf_addch(&jw->json, ']');
+}
diff --git a/json-writer.h b/json-writer.h
new file mode 100644
index 0000000..fc18acc
--- /dev/null
+++ b/json-writer.h
@@ -0,0 +1,105 @@
+#ifndef JSON_WRITER_H
+#define JSON_WRITER_H
+
+/*
+ * JSON data structures are defined at:
+ * [1] http://www.ietf.org/rfc/rfc7159.txt
+ * [2] http://json.org/
+ *
+ * The JSON-writer API allows one to build JSON data structures using a
+ * simple wrapper around a "struct strbuf" buffer. It is intended as a
+ * simple API to build output strings; it is not intended to be a general
+ * object model for JSON data. In particular, it does not re-order keys
+ * in an object (dictionary), it does not de-dup keys in an object, and
+ * it does not allow lookup or parsing of JSON data.
+ *
+ * All string values (both keys and string r-values) are properly quoted
+ * and escaped if they contain special characters.
+ *
+ * These routines create compact JSON data (with no unnecessary whitespace,
+ * newlines, or indenting). If you get an unexpected response, verify
+ * that you're not expecting a pretty JSON string.
+ *
+ * Both "JSON objects" (aka sets of k/v pairs) and "JSON array" can be
+ * constructed using a 'begin append* end' model.
+ *
+ * Nested objects and arrays can either be constructed bottom up (by
+ * creating sub object/arrays first and appending them to the super
+ * object/array) -or- by building them inline in one pass. This is a
+ * personal style and/or data shape choice.
+ *
+ * See t/helper/test-json-writer.c for various usage examples.
+ *
+ * LIMITATIONS:
+ * ============
+ *
+ * The JSON specification [1,2] defines string values as Unicode data
+ * and probably UTF-8 encoded. The current json-writer API does not
+ * enforce this and will write any string as received. However, it will
+ * properly quote and backslash-escape them as necessary. It is up to
+ * the caller to UTF-8 encode their strings *before* passing them to this
+ * API. This layer should not have to try to guess the encoding or locale
+ * of the given strings.
+ */
+
+struct json_writer
+{
+ /*
+ * Buffer of the in-progress JSON currently being composed.
+ */
+ struct strbuf json;
+
+ /*
+ * Simple stack of the currently open array and object forms.
+ * This is a string of '{' and '[' characters indicating the
+ * currently unterminated forms. This is used to ensure the
+ * properly closing character is used when popping a level and
+ * to know when the JSON is completely closed.
+ */
+ struct strbuf open_stack;
+
+ unsigned int need_comma:1;
+ unsigned int pretty:1;
+};
+
+#define JSON_WRITER_INIT { STRBUF_INIT, STRBUF_INIT, 0, 0 }
+
+void jw_init(struct json_writer *jw);
+void jw_release(struct json_writer *jw);
+
+void jw_object_begin(struct json_writer *jw, int pretty);
+void jw_array_begin(struct json_writer *jw, int pretty);
+
+void jw_object_string(struct json_writer *jw, const char *key,
+ const char *value);
+void jw_object_intmax(struct json_writer *jw, const char *key, intmax_t value);
+void jw_object_double(struct json_writer *jw, const char *key, int precision,
+ double value);
+void jw_object_true(struct json_writer *jw, const char *key);
+void jw_object_false(struct json_writer *jw, const char *key);
+void jw_object_bool(struct json_writer *jw, const char *key, int value);
+void jw_object_null(struct json_writer *jw, const char *key);
+void jw_object_sub_jw(struct json_writer *jw, const char *key,
+ const struct json_writer *value);
+
+void jw_object_inline_begin_object(struct json_writer *jw, const char *key);
+void jw_object_inline_begin_array(struct json_writer *jw, const char *key);
+
+void jw_array_string(struct json_writer *jw, const char *value);
+void jw_array_intmax(struct json_writer *jw, intmax_t value);
+void jw_array_double(struct json_writer *jw, int precision, double value);
+void jw_array_true(struct json_writer *jw);
+void jw_array_false(struct json_writer *jw);
+void jw_array_bool(struct json_writer *jw, int value);
+void jw_array_null(struct json_writer *jw);
+void jw_array_sub_jw(struct json_writer *jw, const struct json_writer *value);
+void jw_array_argc_argv(struct json_writer *jw, int argc, const char **argv);
+void jw_array_argv(struct json_writer *jw, const char **argv);
+
+void jw_array_inline_begin_object(struct json_writer *jw);
+void jw_array_inline_begin_array(struct json_writer *jw);
+
+int jw_is_terminated(const struct json_writer *jw);
+void jw_end(struct json_writer *jw);
+
+#endif /* JSON_WRITER_H */
diff --git a/line-log.c b/line-log.c
index fa9cfd5..72a5fed 100644
--- a/line-log.c
+++ b/line-log.c
@@ -479,7 +479,7 @@ static struct commit *check_single_commit(struct rev_info *revs)
struct object *obj = revs->pending.objects[i].item;
if (obj->flags & UNINTERESTING)
continue;
- obj = deref_tag(obj, NULL, 0);
+ obj = deref_tag(the_repository, obj, NULL, 0);
if (obj->type != OBJ_COMMIT)
die("Non commit %s?", revs->pending.objects[i].name);
if (commit)
@@ -598,11 +598,11 @@ parse_lines(struct commit *commit, const char *prefix, struct string_list *args)
lines, anchor, &begin, &end,
full_name))
die("malformed -L argument '%s'", range_part);
- if (lines < end || ((lines || begin) && lines < begin))
+ if ((!lines && (begin || end)) || lines < begin)
die("file %s has only %lu lines", name_part, lines);
if (begin < 1)
begin = 1;
- if (end < 1)
+ if (end < 1 || lines < end)
end = lines;
begin--;
line_log_data_insert(&ranges, full_name, begin, end);
diff --git a/line-range.c b/line-range.c
index 323399d..232c390 100644
--- a/line-range.c
+++ b/line-range.c
@@ -47,7 +47,7 @@ static const char *parse_loc(const char *spec, nth_line_fn_t nth_line,
else if (!num)
*ret = begin;
else
- *ret = begin + num;
+ *ret = begin + num > 0 ? begin + num : 1;
return term;
}
return spec;
diff --git a/line-range.h b/line-range.h
index 83ba3c2..d3c54e4 100644
--- a/line-range.h
+++ b/line-range.h
@@ -19,11 +19,11 @@
typedef const char *(*nth_line_fn_t)(void *data, long lno);
-extern int parse_range_arg(const char *arg,
- nth_line_fn_t nth_line_cb,
- void *cb_data, long lines, long anchor,
- long *begin, long *end,
- const char *path);
+int parse_range_arg(const char *arg,
+ nth_line_fn_t nth_line_cb,
+ void *cb_data, long lines, long anchor,
+ long *begin, long *end,
+ const char *path);
/*
* Scan past a range argument that could be parsed by
@@ -34,6 +34,6 @@ extern int parse_range_arg(const char *arg,
* NULL in case the argument is obviously malformed.
*/
-extern const char *skip_range_arg(const char *arg);
+const char *skip_range_arg(const char *arg);
#endif /* LINE_RANGE_H */
diff --git a/list-objects.c b/list-objects.c
index b1f2138..c99c47a 100644
--- a/list-objects.c
+++ b/list-objects.c
@@ -158,7 +158,7 @@ static void process_tree(struct rev_info *revs,
if (S_ISDIR(entry.mode))
process_tree(revs,
- lookup_tree(entry.oid),
+ lookup_tree(the_repository, entry.oid),
show, base, entry.path,
cb_data, filter_fn, filter_data);
else if (S_ISGITLINK(entry.mode))
@@ -167,7 +167,7 @@ static void process_tree(struct rev_info *revs,
cb_data);
else
process_blob(revs,
- lookup_blob(entry.oid),
+ lookup_blob(the_repository, entry.oid),
show, base, entry.path,
cb_data, filter_fn, filter_data);
}
diff --git a/log-tree.c b/log-tree.c
index 4a3907f..7443e5f 100644
--- a/log-tree.c
+++ b/log-tree.c
@@ -2,6 +2,7 @@
#include "config.h"
#include "diff.h"
#include "object-store.h"
+#include "repository.h"
#include "commit.h"
#include "tag.h"
#include "graph.h"
@@ -91,20 +92,20 @@ static int add_ref_decoration(const char *refname, const struct object_id *oid,
if (starts_with(refname, git_replace_ref_base)) {
struct object_id original_oid;
- if (!check_replace_refs)
+ if (!read_replace_refs)
return 0;
if (get_oid_hex(refname + strlen(git_replace_ref_base),
&original_oid)) {
warning("invalid replace ref %s", refname);
return 0;
}
- obj = parse_object(&original_oid);
+ obj = parse_object(the_repository, &original_oid);
if (obj)
add_name_decoration(DECORATION_GRAFTED, "replaced", obj);
return 0;
}
- obj = parse_object(oid);
+ obj = parse_object(the_repository, oid);
if (!obj)
return 0;
@@ -125,7 +126,7 @@ static int add_ref_decoration(const char *refname, const struct object_id *oid,
if (!obj)
break;
if (!obj->parsed)
- parse_object(&obj->oid);
+ parse_object(the_repository, &obj->oid);
add_name_decoration(DECORATION_REF_TAG, refname, obj);
}
return 0;
@@ -133,7 +134,7 @@ static int add_ref_decoration(const char *refname, const struct object_id *oid,
static int add_graft_decoration(const struct commit_graft *graft, void *cb_data)
{
- struct commit *commit = lookup_commit(&graft->oid);
+ struct commit *commit = lookup_commit(the_repository, &graft->oid);
if (!commit)
return 0;
add_name_decoration(DECORATION_GRAFTED, "grafted", &commit->object);
@@ -497,12 +498,12 @@ static int show_one_mergetag(struct commit *commit,
size_t payload_size, gpg_message_offset;
hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &oid);
- tag = lookup_tag(&oid);
+ tag = lookup_tag(the_repository, &oid);
if (!tag)
return -1; /* error message already given */
strbuf_init(&verify_message, 256);
- if (parse_tag_buffer(tag, extra->value, extra->len))
+ if (parse_tag_buffer(the_repository, tag, extra->value, extra->len))
strbuf_addstr(&verify_message, "malformed mergetag\n");
else if (is_common_merge(commit) &&
!oidcmp(&tag->tagged->oid,
@@ -546,7 +547,7 @@ void show_log(struct rev_info *opt)
struct strbuf msgbuf = STRBUF_INIT;
struct log_info *log = opt->loginfo;
struct commit *commit = log->commit, *parent = log->parent;
- int abbrev_commit = opt->abbrev_commit ? opt->abbrev : GIT_SHA1_HEXSZ;
+ int abbrev_commit = opt->abbrev_commit ? opt->abbrev : the_hash_algo->hexsz;
const char *extra_headers = opt->extra_headers;
struct pretty_print_context ctx = {0};
diff --git a/match-trees.c b/match-trees.c
index 4cdeff5..3765330 100644
--- a/match-trees.c
+++ b/match-trees.c
@@ -83,34 +83,43 @@ static int score_trees(const struct object_id *hash1, const struct object_id *ha
int score = 0;
for (;;) {
- struct name_entry e1, e2;
- int got_entry_from_one = tree_entry(&one, &e1);
- int got_entry_from_two = tree_entry(&two, &e2);
int cmp;
- if (got_entry_from_one && got_entry_from_two)
- cmp = base_name_entries_compare(&e1, &e2);
- else if (got_entry_from_one)
+ if (one.size && two.size)
+ cmp = base_name_entries_compare(&one.entry, &two.entry);
+ else if (one.size)
/* two lacks this entry */
cmp = -1;
- else if (got_entry_from_two)
+ else if (two.size)
/* two has more entries */
cmp = 1;
else
break;
- if (cmp < 0)
+ if (cmp < 0) {
/* path1 does not appear in two */
- score += score_missing(e1.mode, e1.path);
- else if (cmp > 0)
+ score += score_missing(one.entry.mode, one.entry.path);
+ update_tree_entry(&one);
+ } else if (cmp > 0) {
/* path2 does not appear in one */
- score += score_missing(e2.mode, e2.path);
- else if (oidcmp(e1.oid, e2.oid))
- /* they are different */
- score += score_differs(e1.mode, e2.mode, e1.path);
- else
- /* same subtree or blob */
- score += score_matches(e1.mode, e2.mode, e1.path);
+ score += score_missing(two.entry.mode, two.entry.path);
+ update_tree_entry(&two);
+ } else {
+ /* path appears in both */
+ if (oidcmp(one.entry.oid, two.entry.oid)) {
+ /* they are different */
+ score += score_differs(one.entry.mode,
+ two.entry.mode,
+ one.entry.path);
+ } else {
+ /* same subtree or blob */
+ score += score_matches(one.entry.mode,
+ two.entry.mode,
+ one.entry.path);
+ }
+ update_tree_entry(&one);
+ update_tree_entry(&two);
+ }
}
free(one_buf);
free(two_buf);
diff --git a/mem-pool.c b/mem-pool.c
index 389d7af..a2841a4 100644
--- a/mem-pool.c
+++ b/mem-pool.c
@@ -5,40 +5,88 @@
#include "cache.h"
#include "mem-pool.h"
-static struct mp_block *mem_pool_alloc_block(struct mem_pool *mem_pool, size_t block_alloc)
+#define BLOCK_GROWTH_SIZE 1024*1024 - sizeof(struct mp_block);
+
+/*
+ * Allocate a new mp_block and insert it after the block specified in
+ * `insert_after`. If `insert_after` is NULL, then insert block at the
+ * head of the linked list.
+ */
+static struct mp_block *mem_pool_alloc_block(struct mem_pool *mem_pool, size_t block_alloc, struct mp_block *insert_after)
{
struct mp_block *p;
mem_pool->pool_alloc += sizeof(struct mp_block) + block_alloc;
p = xmalloc(st_add(sizeof(struct mp_block), block_alloc));
- p->next_block = mem_pool->mp_block;
+
p->next_free = (char *)p->space;
p->end = p->next_free + block_alloc;
- mem_pool->mp_block = p;
+
+ if (insert_after) {
+ p->next_block = insert_after->next_block;
+ insert_after->next_block = p;
+ } else {
+ p->next_block = mem_pool->mp_block;
+ mem_pool->mp_block = p;
+ }
return p;
}
+void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size)
+{
+ struct mem_pool *pool;
+
+ if (*mem_pool)
+ return;
+
+ pool = xcalloc(1, sizeof(*pool));
+
+ pool->block_alloc = BLOCK_GROWTH_SIZE;
+
+ if (initial_size > 0)
+ mem_pool_alloc_block(pool, initial_size, NULL);
+
+ *mem_pool = pool;
+}
+
+void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory)
+{
+ struct mp_block *block, *block_to_free;
+
+ block = mem_pool->mp_block;
+ while (block)
+ {
+ block_to_free = block;
+ block = block->next_block;
+
+ if (invalidate_memory)
+ memset(block_to_free->space, 0xDD, ((char *)block_to_free->end) - ((char *)block_to_free->space));
+
+ free(block_to_free);
+ }
+
+ free(mem_pool);
+}
+
void *mem_pool_alloc(struct mem_pool *mem_pool, size_t len)
{
- struct mp_block *p;
+ struct mp_block *p = NULL;
void *r;
/* round up to a 'uintmax_t' alignment */
if (len & (sizeof(uintmax_t) - 1))
len += sizeof(uintmax_t) - (len & (sizeof(uintmax_t) - 1));
- for (p = mem_pool->mp_block; p; p = p->next_block)
- if (p->end - p->next_free >= len)
- break;
+ if (mem_pool->mp_block &&
+ mem_pool->mp_block->end - mem_pool->mp_block->next_free >= len)
+ p = mem_pool->mp_block;
if (!p) {
- if (len >= (mem_pool->block_alloc / 2)) {
- mem_pool->pool_alloc += len;
- return xmalloc(len);
- }
+ if (len >= (mem_pool->block_alloc / 2))
+ return mem_pool_alloc_block(mem_pool, len, mem_pool->mp_block);
- p = mem_pool_alloc_block(mem_pool, mem_pool->block_alloc);
+ p = mem_pool_alloc_block(mem_pool, mem_pool->block_alloc, NULL);
}
r = p->next_free;
@@ -53,3 +101,45 @@ void *mem_pool_calloc(struct mem_pool *mem_pool, size_t count, size_t size)
memset(r, 0, len);
return r;
}
+
+int mem_pool_contains(struct mem_pool *mem_pool, void *mem)
+{
+ struct mp_block *p;
+
+ /* Check if memory is allocated in a block */
+ for (p = mem_pool->mp_block; p; p = p->next_block)
+ if ((mem >= ((void *)p->space)) &&
+ (mem < ((void *)p->end)))
+ return 1;
+
+ return 0;
+}
+
+void mem_pool_combine(struct mem_pool *dst, struct mem_pool *src)
+{
+ struct mp_block *p;
+
+ /* Append the blocks from src to dst */
+ if (dst->mp_block && src->mp_block) {
+ /*
+ * src and dst have blocks, append
+ * blocks from src to dst.
+ */
+ p = dst->mp_block;
+ while (p->next_block)
+ p = p->next_block;
+
+ p->next_block = src->mp_block;
+ } else if (src->mp_block) {
+ /*
+ * src has blocks, dst is empty.
+ */
+ dst->mp_block = src->mp_block;
+ } else {
+ /* src is empty, nothing to do. */
+ }
+
+ dst->pool_alloc += src->pool_alloc;
+ src->pool_alloc = 0;
+ src->mp_block = NULL;
+}
diff --git a/mem-pool.h b/mem-pool.h
index 829ad58..999d3c3 100644
--- a/mem-pool.h
+++ b/mem-pool.h
@@ -22,6 +22,16 @@ struct mem_pool {
};
/*
+ * Initialize mem_pool with specified initial size.
+ */
+void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size);
+
+/*
+ * Discard a memory pool and free all the memory it is responsible for.
+ */
+void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory);
+
+/*
* Alloc memory from the mem_pool.
*/
void *mem_pool_alloc(struct mem_pool *pool, size_t len);
@@ -31,4 +41,17 @@ void *mem_pool_alloc(struct mem_pool *pool, size_t len);
*/
void *mem_pool_calloc(struct mem_pool *pool, size_t count, size_t size);
+/*
+ * Move the memory associated with the 'src' pool to the 'dst' pool. The 'src'
+ * pool will be empty and not contain any memory. It still needs to be free'd
+ * with a call to `mem_pool_discard`.
+ */
+void mem_pool_combine(struct mem_pool *dst, struct mem_pool *src);
+
+/*
+ * Check if a memory pointed at by 'mem' is part of the range of
+ * memory managed by the specified mem_pool.
+ */
+int mem_pool_contains(struct mem_pool *mem_pool, void *mem);
+
#endif
diff --git a/merge-recursive.c b/merge-recursive.c
index 113c1d6..bd053c7 100644
--- a/merge-recursive.c
+++ b/merge-recursive.c
@@ -9,6 +9,7 @@
#include "lockfile.h"
#include "cache-tree.h"
#include "object-store.h"
+#include "repository.h"
#include "commit.h"
#include "blob.h"
#include "builtin.h"
@@ -157,7 +158,7 @@ static struct tree *shift_tree_object(struct tree *one, struct tree *two,
}
if (!oidcmp(&two->object.oid, &shifted))
return two;
- return lookup_tree(&shifted);
+ return lookup_tree(the_repository, &shifted);
}
static struct commit *make_virtual_commit(struct tree *tree, const char *comment)
@@ -319,7 +320,7 @@ static int add_cacheinfo(struct merge_options *o,
struct cache_entry *ce;
int ret;
- ce = make_cache_entry(mode, oid ? oid->hash : null_sha1, path, stage, 0);
+ ce = make_cache_entry(&the_index, mode, oid ? oid : &null_oid, path, stage, 0);
if (!ce)
return err(o, _("add_cacheinfo failed for path '%s'; merge aborting."), path);
@@ -327,7 +328,7 @@ static int add_cacheinfo(struct merge_options *o,
if (refresh) {
struct cache_entry *nce;
- nce = refresh_cache_entry(ce, CE_MATCH_REFRESH | CE_MATCH_IGNORE_MISSING);
+ nce = refresh_cache_entry(&the_index, ce, CE_MATCH_REFRESH | CE_MATCH_IGNORE_MISSING);
if (!nce)
return err(o, _("add_cacheinfo failed to refresh for path '%s'; merge aborting."), path);
if (nce != ce)
@@ -415,7 +416,7 @@ struct tree *write_tree_from_memory(struct merge_options *o)
return NULL;
}
- result = lookup_tree(&active_cache_tree->oid);
+ result = lookup_tree(the_repository, &active_cache_tree->oid);
return result;
}
@@ -1191,9 +1192,9 @@ static int merge_submodule(struct merge_options *o,
return 0;
}
- if (!(commit_base = lookup_commit_reference(base)) ||
- !(commit_a = lookup_commit_reference(a)) ||
- !(commit_b = lookup_commit_reference(b))) {
+ if (!(commit_base = lookup_commit_reference(the_repository, base)) ||
+ !(commit_a = lookup_commit_reference(the_repository, a)) ||
+ !(commit_b = lookup_commit_reference(the_repository, b))) {
output(o, 1, _("Failed to merge submodule %s (commits not present)"), path);
return 0;
}
@@ -3069,10 +3070,26 @@ static int merge_content(struct merge_options *o,
if (mfi.clean &&
was_tracked_and_matches(o, path, &mfi.oid, mfi.mode) &&
!df_conflict_remains) {
+ int pos;
+ struct cache_entry *ce;
+
output(o, 3, _("Skipped %s (merged same as existing)"), path);
if (add_cacheinfo(o, mfi.mode, &mfi.oid, path,
0, (!o->call_depth && !is_dirty), 0))
return -1;
+ /*
+ * However, add_cacheinfo() will delete the old cache entry
+ * and add a new one. We need to copy over any skip_worktree
+ * flag to avoid making the file appear as if it were
+ * deleted by the user.
+ */
+ pos = index_name_pos(&o->orig_index, path, strlen(path));
+ ce = o->orig_index.cache[pos];
+ if (ce_skip_worktree(ce)) {
+ pos = index_name_pos(&the_index, path, strlen(path));
+ ce = the_index.cache[pos];
+ ce->ce_flags |= CE_SKIP_WORKTREE;
+ }
return mfi.clean;
}
@@ -3280,6 +3297,13 @@ int merge_trees(struct merge_options *o,
struct tree **result)
{
int code, clean;
+ struct strbuf sb = STRBUF_INIT;
+
+ if (!o->call_depth && index_has_changes(&the_index, head, &sb)) {
+ err(o, _("Your local changes to the following files would be overwritten by merge:\n %s"),
+ sb.buf);
+ return -1;
+ }
if (o->subtree_shift) {
merge = shift_tree_object(head, merge, o->subtree_shift);
@@ -3287,13 +3311,6 @@ int merge_trees(struct merge_options *o,
}
if (oid_eq(&common->object.oid, &merge->object.oid)) {
- struct strbuf sb = STRBUF_INIT;
-
- if (!o->call_depth && index_has_changes(&sb)) {
- err(o, _("Dirty index: cannot merge (dirty: %s)"),
- sb.buf);
- return 0;
- }
output(o, 0, _("Already up to date!"));
*result = head;
return 1;
@@ -3426,7 +3443,7 @@ int merge_recursive(struct merge_options *o,
/* if there is no common ancestor, use an empty tree */
struct tree *tree;
- tree = lookup_tree(the_hash_algo->empty_tree);
+ tree = lookup_tree(the_repository, the_repository->hash_algo->empty_tree);
merged_common_ancestors = make_virtual_commit(tree, "ancestor");
}
@@ -3488,7 +3505,9 @@ static struct commit *get_ref(const struct object_id *oid, const char *name)
{
struct object *object;
- object = deref_tag(parse_object(oid), name, strlen(name));
+ object = deref_tag(the_repository, parse_object(the_repository, oid),
+ name,
+ strlen(name));
if (!object)
return NULL;
if (object->type == OBJ_TREE)
diff --git a/merge.c b/merge.c
index 0783858..e30e03f 100644
--- a/merge.c
+++ b/merge.c
@@ -14,37 +14,6 @@ static const char *merge_argument(struct commit *commit)
return oid_to_hex(commit ? &commit->object.oid : the_hash_algo->empty_tree);
}
-int index_has_changes(struct strbuf *sb)
-{
- struct object_id head;
- int i;
-
- if (!get_oid_tree("HEAD", &head)) {
- struct diff_options opt;
-
- diff_setup(&opt);
- opt.flags.exit_with_status = 1;
- if (!sb)
- opt.flags.quick = 1;
- do_diff_cache(&head, &opt);
- diffcore_std(&opt);
- for (i = 0; sb && i < diff_queued_diff.nr; i++) {
- if (i)
- strbuf_addch(sb, ' ');
- strbuf_addstr(sb, diff_queued_diff.queue[i]->two->path);
- }
- diff_flush(&opt);
- return opt.flags.has_changes != 0;
- } else {
- for (i = 0; sb && i < active_nr; i++) {
- if (i)
- strbuf_addch(sb, ' ');
- strbuf_addstr(sb, active_cache[i]->name);
- }
- return !!active_nr;
- }
-}
-
int try_merge_command(const char *strategy, size_t xopts_nr,
const char **xopts, struct commit_list *common,
const char *head_arg, struct commit_list *remotes)
diff --git a/negotiator/default.c b/negotiator/default.c
new file mode 100644
index 0000000..4b78f6b
--- /dev/null
+++ b/negotiator/default.c
@@ -0,0 +1,176 @@
+#include "cache.h"
+#include "default.h"
+#include "../commit.h"
+#include "../fetch-negotiator.h"
+#include "../prio-queue.h"
+#include "../refs.h"
+#include "../tag.h"
+
+/* Remember to update object flag allocation in object.h */
+#define COMMON (1U << 2)
+#define COMMON_REF (1U << 3)
+#define SEEN (1U << 4)
+#define POPPED (1U << 5)
+
+static int marked;
+
+struct negotiation_state {
+ struct prio_queue rev_list;
+ int non_common_revs;
+};
+
+static void rev_list_push(struct negotiation_state *ns,
+ struct commit *commit, int mark)
+{
+ if (!(commit->object.flags & mark)) {
+ commit->object.flags |= mark;
+
+ if (parse_commit(commit))
+ return;
+
+ prio_queue_put(&ns->rev_list, commit);
+
+ if (!(commit->object.flags & COMMON))
+ ns->non_common_revs++;
+ }
+}
+
+static int clear_marks(const char *refname, const struct object_id *oid,
+ int flag, void *cb_data)
+{
+ struct object *o = deref_tag(the_repository, parse_object(the_repository, oid), refname, 0);
+
+ if (o && o->type == OBJ_COMMIT)
+ clear_commit_marks((struct commit *)o,
+ COMMON | COMMON_REF | SEEN | POPPED);
+ return 0;
+}
+
+/*
+ * This function marks a rev and its ancestors as common.
+ * In some cases, it is desirable to mark only the ancestors (for example
+ * when only the server does not yet know that they are common).
+ */
+static void mark_common(struct negotiation_state *ns, struct commit *commit,
+ int ancestors_only, int dont_parse)
+{
+ if (commit != NULL && !(commit->object.flags & COMMON)) {
+ struct object *o = (struct object *)commit;
+
+ if (!ancestors_only)
+ o->flags |= COMMON;
+
+ if (!(o->flags & SEEN))
+ rev_list_push(ns, commit, SEEN);
+ else {
+ struct commit_list *parents;
+
+ if (!ancestors_only && !(o->flags & POPPED))
+ ns->non_common_revs--;
+ if (!o->parsed && !dont_parse)
+ if (parse_commit(commit))
+ return;
+
+ for (parents = commit->parents;
+ parents;
+ parents = parents->next)
+ mark_common(ns, parents->item, 0,
+ dont_parse);
+ }
+ }
+}
+
+/*
+ * Get the next rev to send, ignoring the common.
+ */
+static const struct object_id *get_rev(struct negotiation_state *ns)
+{
+ struct commit *commit = NULL;
+
+ while (commit == NULL) {
+ unsigned int mark;
+ struct commit_list *parents;
+
+ if (ns->rev_list.nr == 0 || ns->non_common_revs == 0)
+ return NULL;
+
+ commit = prio_queue_get(&ns->rev_list);
+ parse_commit(commit);
+ parents = commit->parents;
+
+ commit->object.flags |= POPPED;
+ if (!(commit->object.flags & COMMON))
+ ns->non_common_revs--;
+
+ if (commit->object.flags & COMMON) {
+ /* do not send "have", and ignore ancestors */
+ commit = NULL;
+ mark = COMMON | SEEN;
+ } else if (commit->object.flags & COMMON_REF)
+ /* send "have", and ignore ancestors */
+ mark = COMMON | SEEN;
+ else
+ /* send "have", also for its ancestors */
+ mark = SEEN;
+
+ while (parents) {
+ if (!(parents->item->object.flags & SEEN))
+ rev_list_push(ns, parents->item, mark);
+ if (mark & COMMON)
+ mark_common(ns, parents->item, 1, 0);
+ parents = parents->next;
+ }
+ }
+
+ return &commit->object.oid;
+}
+
+static void known_common(struct fetch_negotiator *n, struct commit *c)
+{
+ if (!(c->object.flags & SEEN)) {
+ rev_list_push(n->data, c, COMMON_REF | SEEN);
+ mark_common(n->data, c, 1, 1);
+ }
+}
+
+static void add_tip(struct fetch_negotiator *n, struct commit *c)
+{
+ n->known_common = NULL;
+ rev_list_push(n->data, c, SEEN);
+}
+
+static const struct object_id *next(struct fetch_negotiator *n)
+{
+ n->known_common = NULL;
+ n->add_tip = NULL;
+ return get_rev(n->data);
+}
+
+static int ack(struct fetch_negotiator *n, struct commit *c)
+{
+ int known_to_be_common = !!(c->object.flags & COMMON);
+ mark_common(n->data, c, 0, 1);
+ return known_to_be_common;
+}
+
+static void release(struct fetch_negotiator *n)
+{
+ clear_prio_queue(&((struct negotiation_state *)n->data)->rev_list);
+ FREE_AND_NULL(n->data);
+}
+
+void default_negotiator_init(struct fetch_negotiator *negotiator)
+{
+ struct negotiation_state *ns;
+ negotiator->known_common = known_common;
+ negotiator->add_tip = add_tip;
+ negotiator->next = next;
+ negotiator->ack = ack;
+ negotiator->release = release;
+ negotiator->data = ns = xcalloc(1, sizeof(*ns));
+ ns->rev_list.compare = compare_commits_by_commit_date;
+
+ if (marked)
+ for_each_ref(clear_marks, NULL);
+ marked = 1;
+}
diff --git a/negotiator/default.h b/negotiator/default.h
new file mode 100644
index 0000000..d23a8f2
--- /dev/null
+++ b/negotiator/default.h
@@ -0,0 +1,8 @@
+#ifndef NEGOTIATOR_DEFAULT_H
+#define NEGOTIATOR_DEFAULT_H
+
+struct fetch_negotiator;
+
+void default_negotiator_init(struct fetch_negotiator *negotiator);
+
+#endif
diff --git a/negotiator/skipping.c b/negotiator/skipping.c
new file mode 100644
index 0000000..dffbc76
--- /dev/null
+++ b/negotiator/skipping.c
@@ -0,0 +1,250 @@
+#include "cache.h"
+#include "skipping.h"
+#include "../commit.h"
+#include "../fetch-negotiator.h"
+#include "../prio-queue.h"
+#include "../refs.h"
+#include "../tag.h"
+
+/* Remember to update object flag allocation in object.h */
+/*
+ * Both us and the server know that both parties have this object.
+ */
+#define COMMON (1U << 2)
+/*
+ * The server has told us that it has this object. We still need to tell the
+ * server that we have this object (or one of its descendants), but since we are
+ * going to do that, we do not need to tell the server about its ancestors.
+ */
+#define ADVERTISED (1U << 3)
+/*
+ * This commit has entered the priority queue.
+ */
+#define SEEN (1U << 4)
+/*
+ * This commit has left the priority queue.
+ */
+#define POPPED (1U << 5)
+
+static int marked;
+
+/*
+ * An entry in the priority queue.
+ */
+struct entry {
+ struct commit *commit;
+
+ /*
+ * Used only if commit is not COMMON.
+ */
+ uint16_t original_ttl;
+ uint16_t ttl;
+};
+
+struct data {
+ struct prio_queue rev_list;
+
+ /*
+ * The number of non-COMMON commits in rev_list.
+ */
+ int non_common_revs;
+};
+
+static int compare(const void *a_, const void *b_, void *unused)
+{
+ const struct entry *a = a_;
+ const struct entry *b = b_;
+ return compare_commits_by_commit_date(a->commit, b->commit, NULL);
+}
+
+static struct entry *rev_list_push(struct data *data, struct commit *commit, int mark)
+{
+ struct entry *entry;
+ commit->object.flags |= mark | SEEN;
+
+ entry = xcalloc(1, sizeof(*entry));
+ entry->commit = commit;
+ prio_queue_put(&data->rev_list, entry);
+
+ if (!(mark & COMMON))
+ data->non_common_revs++;
+ return entry;
+}
+
+static int clear_marks(const char *refname, const struct object_id *oid,
+ int flag, void *cb_data)
+{
+ struct object *o = deref_tag(the_repository, parse_object(the_repository, oid), refname, 0);
+
+ if (o && o->type == OBJ_COMMIT)
+ clear_commit_marks((struct commit *)o,
+ COMMON | ADVERTISED | SEEN | POPPED);
+ return 0;
+}
+
+/*
+ * Mark this SEEN commit and all its SEEN ancestors as COMMON.
+ */
+static void mark_common(struct data *data, struct commit *c)
+{
+ struct commit_list *p;
+
+ if (c->object.flags & COMMON)
+ return;
+ c->object.flags |= COMMON;
+ if (!(c->object.flags & POPPED))
+ data->non_common_revs--;
+
+ if (!c->object.parsed)
+ return;
+ for (p = c->parents; p; p = p->next) {
+ if (p->item->object.flags & SEEN)
+ mark_common(data, p->item);
+ }
+}
+
+/*
+ * Ensure that the priority queue has an entry for to_push, and ensure that the
+ * entry has the correct flags and ttl.
+ *
+ * This function returns 1 if an entry was found or created, and 0 otherwise
+ * (because the entry for this commit had already been popped).
+ */
+static int push_parent(struct data *data, struct entry *entry,
+ struct commit *to_push)
+{
+ struct entry *parent_entry;
+
+ if (to_push->object.flags & SEEN) {
+ int i;
+ if (to_push->object.flags & POPPED)
+ /*
+ * The entry for this commit has already been popped,
+ * due to clock skew. Pretend that this parent does not
+ * exist.
+ */
+ return 0;
+ /*
+ * Find the existing entry and use it.
+ */
+ for (i = 0; i < data->rev_list.nr; i++) {
+ parent_entry = data->rev_list.array[i].data;
+ if (parent_entry->commit == to_push)
+ goto parent_found;
+ }
+ BUG("missing parent in priority queue");
+parent_found:
+ ;
+ } else {
+ parent_entry = rev_list_push(data, to_push, 0);
+ }
+
+ if (entry->commit->object.flags & (COMMON | ADVERTISED)) {
+ mark_common(data, to_push);
+ } else {
+ uint16_t new_original_ttl = entry->ttl
+ ? entry->original_ttl : entry->original_ttl * 3 / 2 + 1;
+ uint16_t new_ttl = entry->ttl
+ ? entry->ttl - 1 : new_original_ttl;
+ if (parent_entry->original_ttl < new_original_ttl) {
+ parent_entry->original_ttl = new_original_ttl;
+ parent_entry->ttl = new_ttl;
+ }
+ }
+
+ return 1;
+}
+
+static const struct object_id *get_rev(struct data *data)
+{
+ struct commit *to_send = NULL;
+
+ while (to_send == NULL) {
+ struct entry *entry;
+ struct commit *commit;
+ struct commit_list *p;
+ int parent_pushed = 0;
+
+ if (data->rev_list.nr == 0 || data->non_common_revs == 0)
+ return NULL;
+
+ entry = prio_queue_get(&data->rev_list);
+ commit = entry->commit;
+ commit->object.flags |= POPPED;
+ if (!(commit->object.flags & COMMON))
+ data->non_common_revs--;
+
+ if (!(commit->object.flags & COMMON) && !entry->ttl)
+ to_send = commit;
+
+ parse_commit(commit);
+ for (p = commit->parents; p; p = p->next)
+ parent_pushed |= push_parent(data, entry, p->item);
+
+ if (!(commit->object.flags & COMMON) && !parent_pushed)
+ /*
+ * This commit has no parents, or all of its parents
+ * have already been popped (due to clock skew), so send
+ * it anyway.
+ */
+ to_send = commit;
+
+ free(entry);
+ }
+
+ return &to_send->object.oid;
+}
+
+static void known_common(struct fetch_negotiator *n, struct commit *c)
+{
+ if (c->object.flags & SEEN)
+ return;
+ rev_list_push(n->data, c, ADVERTISED);
+}
+
+static void add_tip(struct fetch_negotiator *n, struct commit *c)
+{
+ n->known_common = NULL;
+ if (c->object.flags & SEEN)
+ return;
+ rev_list_push(n->data, c, 0);
+}
+
+static const struct object_id *next(struct fetch_negotiator *n)
+{
+ n->known_common = NULL;
+ n->add_tip = NULL;
+ return get_rev(n->data);
+}
+
+static int ack(struct fetch_negotiator *n, struct commit *c)
+{
+ int known_to_be_common = !!(c->object.flags & COMMON);
+ if (!(c->object.flags & SEEN))
+ die("received ack for commit %s not sent as 'have'\n",
+ oid_to_hex(&c->object.oid));
+ mark_common(n->data, c);
+ return known_to_be_common;
+}
+
+static void release(struct fetch_negotiator *n)
+{
+ clear_prio_queue(&((struct data *)n->data)->rev_list);
+ FREE_AND_NULL(n->data);
+}
+
+void skipping_negotiator_init(struct fetch_negotiator *negotiator)
+{
+ struct data *data;
+ negotiator->known_common = known_common;
+ negotiator->add_tip = add_tip;
+ negotiator->next = next;
+ negotiator->ack = ack;
+ negotiator->release = release;
+ negotiator->data = data = xcalloc(1, sizeof(*data));
+ data->rev_list.compare = compare;
+
+ if (marked)
+ for_each_ref(clear_marks, NULL);
+ marked = 1;
+}
diff --git a/negotiator/skipping.h b/negotiator/skipping.h
new file mode 100644
index 0000000..d7dfa6c
--- /dev/null
+++ b/negotiator/skipping.h
@@ -0,0 +1,8 @@
+#ifndef NEGOTIATOR_SKIPPING_H
+#define NEGOTIATOR_SKIPPING_H
+
+struct fetch_negotiator;
+
+void skipping_negotiator_init(struct fetch_negotiator *negotiator);
+
+#endif
diff --git a/notes-cache.c b/notes-cache.c
index d577003..d87e7ca 100644
--- a/notes-cache.c
+++ b/notes-cache.c
@@ -1,6 +1,7 @@
#include "cache.h"
#include "notes-cache.h"
#include "object-store.h"
+#include "repository.h"
#include "commit.h"
#include "refs.h"
@@ -15,7 +16,7 @@ static int notes_cache_match_validity(const char *ref, const char *validity)
if (read_ref(ref, &oid) < 0)
return 0;
- commit = lookup_commit_reference_gently(&oid, 1);
+ commit = lookup_commit_reference_gently(the_repository, &oid, 1);
if (!commit)
return 0;
diff --git a/notes-merge.c b/notes-merge.c
index 9cc2ee1..76ab19e 100644
--- a/notes-merge.c
+++ b/notes-merge.c
@@ -2,6 +2,7 @@
#include "commit.h"
#include "refs.h"
#include "object-store.h"
+#include "repository.h"
#include "diff.h"
#include "diffcore.h"
#include "xdiff-interface.h"
@@ -553,7 +554,7 @@ int notes_merge(struct notes_merge_options *o,
else if (!check_refname_format(o->local_ref, 0) &&
is_null_oid(&local_oid))
local = NULL; /* local_oid == null_oid indicates unborn ref */
- else if (!(local = lookup_commit_reference(&local_oid)))
+ else if (!(local = lookup_commit_reference(the_repository, &local_oid)))
die("Could not parse local commit %s (%s)",
oid_to_hex(&local_oid), o->local_ref);
trace_printf("\tlocal commit: %.7s\n", oid_to_hex(&local_oid));
@@ -571,7 +572,7 @@ int notes_merge(struct notes_merge_options *o,
die("Failed to resolve remote notes ref '%s'",
o->remote_ref);
}
- } else if (!(remote = lookup_commit_reference(&remote_oid))) {
+ } else if (!(remote = lookup_commit_reference(the_repository, &remote_oid))) {
die("Could not parse remote commit %s (%s)",
oid_to_hex(&remote_oid), o->remote_ref);
}
diff --git a/notes-utils.c b/notes-utils.c
index 02407fe..14ea031 100644
--- a/notes-utils.c
+++ b/notes-utils.c
@@ -3,6 +3,7 @@
#include "commit.h"
#include "refs.h"
#include "notes-utils.h"
+#include "repository.h"
void create_notes_commit(struct notes_tree *t, struct commit_list *parents,
const char *msg, size_t msg_len,
@@ -19,7 +20,8 @@ void create_notes_commit(struct notes_tree *t, struct commit_list *parents,
/* Deduce parent commit from t->ref */
struct object_id parent_oid;
if (!read_ref(t->ref, &parent_oid)) {
- struct commit *parent = lookup_commit(&parent_oid);
+ struct commit *parent = lookup_commit(the_repository,
+ &parent_oid);
if (parse_commit(parent))
die("Failed to find/parse commit %s", t->ref);
commit_list_insert(parent, &parents);
diff --git a/object-store.h b/object-store.h
index a3db17b..e481f7a 100644
--- a/object-store.h
+++ b/object-store.h
@@ -2,6 +2,9 @@
#define OBJECT_STORE_H
#include "oidmap.h"
+#include "list.h"
+#include "sha1-array.h"
+#include "strbuf.h"
struct alternate_object_database {
struct alternate_object_database *next;
@@ -103,6 +106,9 @@ struct raw_object_store {
*/
struct oidmap *replace_map;
+ struct commit_graph *commit_graph;
+ unsigned commit_graph_attempted : 1; /* if loading has been attempted */
+
/*
* private data
*
diff --git a/object.c b/object.c
index 9b0f819..51c4594 100644
--- a/object.c
+++ b/object.c
@@ -9,6 +9,7 @@
#include "alloc.h"
#include "object-store.h"
#include "packfile.h"
+#include "commit-graph.h"
unsigned int get_max_object_index(void)
{
@@ -50,7 +51,7 @@ int type_from_string_gently(const char *str, ssize_t len, int gentle)
if (gentle)
return -1;
- die("invalid object type \"%s\"", str);
+ die(_("invalid object type \"%s\""), str);
}
/*
@@ -84,21 +85,20 @@ static void insert_obj_hash(struct object *obj, struct object **hash, unsigned i
* Look up the record for the given sha1 in the hash map stored in
* obj_hash. Return NULL if it was not found.
*/
-struct object *lookup_object(const unsigned char *sha1)
+struct object *lookup_object(struct repository *r, const unsigned char *sha1)
{
unsigned int i, first;
struct object *obj;
- if (!the_repository->parsed_objects->obj_hash)
+ if (!r->parsed_objects->obj_hash)
return NULL;
- first = i = hash_obj(sha1,
- the_repository->parsed_objects->obj_hash_size);
- while ((obj = the_repository->parsed_objects->obj_hash[i]) != NULL) {
+ first = i = hash_obj(sha1, r->parsed_objects->obj_hash_size);
+ while ((obj = r->parsed_objects->obj_hash[i]) != NULL) {
if (!hashcmp(sha1, obj->oid.hash))
break;
i++;
- if (i == the_repository->parsed_objects->obj_hash_size)
+ if (i == r->parsed_objects->obj_hash_size)
i = 0;
}
if (obj && i != first) {
@@ -107,8 +107,8 @@ struct object *lookup_object(const unsigned char *sha1)
* that we do not need to walk the hash table the next
* time we look for it.
*/
- SWAP(the_repository->parsed_objects->obj_hash[i],
- the_repository->parsed_objects->obj_hash[first]);
+ SWAP(r->parsed_objects->obj_hash[i],
+ r->parsed_objects->obj_hash[first]);
}
return obj;
}
@@ -158,19 +158,19 @@ void *create_object(struct repository *r, const unsigned char *sha1, void *o)
return obj;
}
-void *object_as_type(struct object *obj, enum object_type type, int quiet)
+void *object_as_type(struct repository *r, struct object *obj, enum object_type type, int quiet)
{
if (obj->type == type)
return obj;
else if (obj->type == OBJ_NONE) {
if (type == OBJ_COMMIT)
- ((struct commit *)obj)->index = alloc_commit_index(the_repository);
+ ((struct commit *)obj)->index = alloc_commit_index(r);
obj->type = type;
return obj;
}
else {
if (!quiet)
- error("object %s is a %s, not a %s",
+ error(_("object %s is a %s, not a %s"),
oid_to_hex(&obj->oid),
type_name(obj->type), type_name(type));
return NULL;
@@ -179,28 +179,28 @@ void *object_as_type(struct object *obj, enum object_type type, int quiet)
struct object *lookup_unknown_object(const unsigned char *sha1)
{
- struct object *obj = lookup_object(sha1);
+ struct object *obj = lookup_object(the_repository, sha1);
if (!obj)
obj = create_object(the_repository, sha1,
alloc_object_node(the_repository));
return obj;
}
-struct object *parse_object_buffer(const struct object_id *oid, enum object_type type, unsigned long size, void *buffer, int *eaten_p)
+struct object *parse_object_buffer(struct repository *r, const struct object_id *oid, enum object_type type, unsigned long size, void *buffer, int *eaten_p)
{
struct object *obj;
*eaten_p = 0;
obj = NULL;
if (type == OBJ_BLOB) {
- struct blob *blob = lookup_blob(oid);
+ struct blob *blob = lookup_blob(r, oid);
if (blob) {
if (parse_blob_buffer(blob, buffer, size))
return NULL;
obj = &blob->object;
}
} else if (type == OBJ_TREE) {
- struct tree *tree = lookup_tree(oid);
+ struct tree *tree = lookup_tree(r, oid);
if (tree) {
obj = &tree->object;
if (!tree->buffer)
@@ -212,25 +212,25 @@ struct object *parse_object_buffer(const struct object_id *oid, enum object_type
}
}
} else if (type == OBJ_COMMIT) {
- struct commit *commit = lookup_commit(oid);
+ struct commit *commit = lookup_commit(r, oid);
if (commit) {
- if (parse_commit_buffer(commit, buffer, size, 1))
+ if (parse_commit_buffer(r, commit, buffer, size, 1))
return NULL;
- if (!get_cached_commit_buffer(commit, NULL)) {
- set_commit_buffer(commit, buffer, size);
+ if (!get_cached_commit_buffer(r, commit, NULL)) {
+ set_commit_buffer(r, commit, buffer, size);
*eaten_p = 1;
}
obj = &commit->object;
}
} else if (type == OBJ_TAG) {
- struct tag *tag = lookup_tag(oid);
+ struct tag *tag = lookup_tag(r, oid);
if (tag) {
- if (parse_tag_buffer(tag, buffer, size))
+ if (parse_tag_buffer(r, tag, buffer, size))
return NULL;
obj = &tag->object;
}
} else {
- warning("object %s has unknown type id %d", oid_to_hex(oid), type);
+ warning(_("object %s has unknown type id %d"), oid_to_hex(oid), type);
obj = NULL;
}
return obj;
@@ -239,46 +239,47 @@ struct object *parse_object_buffer(const struct object_id *oid, enum object_type
struct object *parse_object_or_die(const struct object_id *oid,
const char *name)
{
- struct object *o = parse_object(oid);
+ struct object *o = parse_object(the_repository, oid);
if (o)
return o;
die(_("unable to parse object: %s"), name ? name : oid_to_hex(oid));
}
-struct object *parse_object(const struct object_id *oid)
+struct object *parse_object(struct repository *r, const struct object_id *oid)
{
unsigned long size;
enum object_type type;
int eaten;
- const struct object_id *repl = lookup_replace_object(the_repository, oid);
+ const struct object_id *repl = lookup_replace_object(r, oid);
void *buffer;
struct object *obj;
- obj = lookup_object(oid->hash);
+ obj = lookup_object(r, oid->hash);
if (obj && obj->parsed)
return obj;
if ((obj && obj->type == OBJ_BLOB && has_object_file(oid)) ||
(!obj && has_object_file(oid) &&
- oid_object_info(the_repository, oid, NULL) == OBJ_BLOB)) {
+ oid_object_info(r, oid, NULL) == OBJ_BLOB)) {
if (check_object_signature(repl, NULL, 0, NULL) < 0) {
- error("sha1 mismatch %s", oid_to_hex(oid));
+ error(_("sha1 mismatch %s"), oid_to_hex(oid));
return NULL;
}
- parse_blob_buffer(lookup_blob(oid), NULL, 0);
- return lookup_object(oid->hash);
+ parse_blob_buffer(lookup_blob(r, oid), NULL, 0);
+ return lookup_object(r, oid->hash);
}
buffer = read_object_file(oid, &type, &size);
if (buffer) {
if (check_object_signature(repl, buffer, size, type_name(type)) < 0) {
free(buffer);
- error("sha1 mismatch %s", oid_to_hex(repl));
+ error(_("sha1 mismatch %s"), oid_to_hex(repl));
return NULL;
}
- obj = parse_object_buffer(oid, type, size, buffer, &eaten);
+ obj = parse_object_buffer(r, oid, type, size,
+ buffer, &eaten);
if (!eaten)
free(buffer);
return obj;
@@ -467,6 +468,8 @@ struct parsed_object_pool *parsed_object_pool_new(void)
o->is_shallow = -1;
o->shallow_stat = xcalloc(1, sizeof(*o->shallow_stat));
+ o->buffer_slab = allocate_commit_buffer_slab();
+
return o;
}
@@ -505,6 +508,10 @@ void raw_object_store_clear(struct raw_object_store *o)
oidmap_free(o->replace_map, 1);
FREE_AND_NULL(o->replace_map);
+ free_commit_graph(o->commit_graph);
+ o->commit_graph = NULL;
+ o->commit_graph_attempted = 0;
+
free_alt_odbs(o);
o->alt_odb_tail = NULL;
@@ -541,6 +548,9 @@ void parsed_object_pool_clear(struct parsed_object_pool *o)
FREE_AND_NULL(o->obj_hash);
o->obj_hash_size = 0;
+ free_commit_buffer_slab(o->buffer_slab);
+ o->buffer_slab = NULL;
+
clear_alloc_state(o->blob_state);
clear_alloc_state(o->tree_state);
clear_alloc_state(o->commit_state);
diff --git a/object.h b/object.h
index 3afd123..177b1a4 100644
--- a/object.h
+++ b/object.h
@@ -1,6 +1,8 @@
#ifndef OBJECT_H
#define OBJECT_H
+struct buffer_slab;
+
struct parsed_object_pool {
struct object **obj_hash;
int nr_objs, obj_hash_size;
@@ -22,6 +24,8 @@ struct parsed_object_pool {
char *alternate_shallow_file;
int commit_graft_prepared;
+
+ struct buffer_slab *buffer_slab;
};
struct parsed_object_pool *parsed_object_pool_new(void);
@@ -54,7 +58,8 @@ struct object_array {
/*
* object flag allocation:
* revision.h: 0---------10 2526
- * fetch-pack.c: 0----5
+ * fetch-pack.c: 01
+ * negotiator/default.c: 2--5
* walker.c: 0-2
* upload-pack.c: 4 11----------------19
* builtin/blame.c: 12-13
@@ -109,18 +114,18 @@ extern struct object *get_indexed_object(unsigned int);
* half-initialised objects, the caller is expected to initialize them
* by calling parse_object() on them.
*/
-struct object *lookup_object(const unsigned char *sha1);
+struct object *lookup_object(struct repository *r, const unsigned char *sha1);
extern void *create_object(struct repository *r, const unsigned char *sha1, void *obj);
-void *object_as_type(struct object *obj, enum object_type type, int quiet);
+void *object_as_type(struct repository *r, struct object *obj, enum object_type type, int quiet);
/*
* Returns the object, having parsed it to find out what it is.
*
* Returns NULL if the object is missing or corrupt.
*/
-struct object *parse_object(const struct object_id *oid);
+struct object *parse_object(struct repository *r, const struct object_id *oid);
/*
* Like parse_object, but will die() instead of returning NULL. If the
@@ -133,7 +138,7 @@ struct object *parse_object_or_die(const struct object_id *oid, const char *name
* parsing it. eaten_p indicates if the object has a borrowed copy
* of buffer and the caller should not free() it.
*/
-struct object *parse_object_buffer(const struct object_id *oid, enum object_type type, unsigned long size, void *buffer, int *eaten_p);
+struct object *parse_object_buffer(struct repository *r, const struct object_id *oid, enum object_type type, unsigned long size, void *buffer, int *eaten_p);
/** Returns the object, with potentially excess memory allocated. **/
struct object *lookup_unknown_object(const unsigned char *sha1);
diff --git a/packfile.c b/packfile.c
index 7cd45aa..6974903 100644
--- a/packfile.c
+++ b/packfile.c
@@ -1934,7 +1934,7 @@ static int add_promisor_object(const struct object_id *oid,
void *set_)
{
struct oidset *set = set_;
- struct object *obj = parse_object(oid);
+ struct object *obj = parse_object(the_repository, oid);
if (!obj)
return 1;
diff --git a/packfile.h b/packfile.h
index cc7eaff..fa36c47 100644
--- a/packfile.h
+++ b/packfile.h
@@ -1,12 +1,12 @@
#ifndef PACKFILE_H
#define PACKFILE_H
+#include "cache.h"
#include "oidset.h"
/* in object-store.h */
struct packed_git;
struct object_info;
-enum object_type;
/*
* Generate the filename to be used for a pack file with checksum "sha1" and
diff --git a/parse-options-cb.c b/parse-options-cb.c
index 0f9f311..e823653 100644
--- a/parse-options-cb.c
+++ b/parse-options-cb.c
@@ -91,7 +91,7 @@ int parse_opt_commits(const struct option *opt, const char *arg, int unset)
return -1;
if (get_oid(arg, &oid))
return error("malformed object name %s", arg);
- commit = lookup_commit_reference(&oid);
+ commit = lookup_commit_reference(the_repository, &oid);
if (!commit)
return error("no such commit %s", arg);
commit_list_insert(commit, opt->value);
diff --git a/parse-options.c b/parse-options.c
index 7db8422..3b874a8 100644
--- a/parse-options.c
+++ b/parse-options.c
@@ -660,7 +660,8 @@ int parse_options(int argc, const char **argv, const char *prefix,
static int usage_argh(const struct option *opts, FILE *outfile)
{
const char *s;
- int literal = (opts->flags & PARSE_OPT_LITERAL_ARGHELP) || !opts->argh;
+ int literal = (opts->flags & PARSE_OPT_LITERAL_ARGHELP) ||
+ !opts->argh || !!strpbrk(opts->argh, "()<>[]|");
if (opts->flags & PARSE_OPT_OPTARG)
if (opts->long_name)
s = literal ? "[=%s]" : "[=<%s>]";
diff --git a/pkt-line.c b/pkt-line.c
index a593c08..04d10bb 100644
--- a/pkt-line.c
+++ b/pkt-line.c
@@ -101,7 +101,7 @@ int packet_flush_gently(int fd)
{
packet_trace("0000", 4, 1);
if (write_in_full(fd, "0000", 4) < 0)
- return error("flush packet write failed");
+ return error(_("flush packet write failed"));
return 0;
}
@@ -139,7 +139,7 @@ static void format_packet(struct strbuf *out, const char *fmt, va_list args)
n = out->len - orig_len;
if (n > LARGE_PACKET_MAX)
- die("protocol error: impossibly long line");
+ die(_("protocol error: impossibly long line"));
set_packet_header(&out->buf[orig_len], n);
packet_trace(out->buf + orig_len + 4, n - 4, 1);
@@ -155,9 +155,9 @@ static int packet_write_fmt_1(int fd, int gently,
if (write_in_full(fd, buf.buf, buf.len) < 0) {
if (!gently) {
check_pipe(errno);
- die_errno("packet write with format failed");
+ die_errno(_("packet write with format failed"));
}
- return error("packet write with format failed");
+ return error(_("packet write with format failed"));
}
return 0;
@@ -189,21 +189,21 @@ static int packet_write_gently(const int fd_out, const char *buf, size_t size)
size_t packet_size;
if (size > sizeof(packet_write_buffer) - 4)
- return error("packet write failed - data exceeds max packet size");
+ return error(_("packet write failed - data exceeds max packet size"));
packet_trace(buf, size, 1);
packet_size = size + 4;
set_packet_header(packet_write_buffer, packet_size);
memcpy(packet_write_buffer + 4, buf, size);
if (write_in_full(fd_out, packet_write_buffer, packet_size) < 0)
- return error("packet write failed");
+ return error(_("packet write failed"));
return 0;
}
void packet_write(int fd_out, const char *buf, size_t size)
{
if (packet_write_gently(fd_out, buf, size))
- die_errno("packet write failed");
+ die_errno(_("packet write failed"));
}
void packet_buf_write(struct strbuf *buf, const char *fmt, ...)
@@ -225,7 +225,7 @@ void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len)
n = buf->len - orig_len;
if (n > LARGE_PACKET_MAX)
- die("protocol error: impossibly long line");
+ die(_("protocol error: impossibly long line"));
set_packet_header(&buf->buf[orig_len], n);
packet_trace(data, len, 1);
@@ -288,7 +288,7 @@ static int get_packet_data(int fd, char **src_buf, size_t *src_size,
} else {
ret = read_in_full(fd, dst, size);
if (ret < 0)
- die_errno("read error");
+ die_errno(_("read error"));
}
/* And complain if we didn't get enough bytes to satisfy the read. */
@@ -296,7 +296,7 @@ static int get_packet_data(int fd, char **src_buf, size_t *src_size,
if (options & PACKET_READ_GENTLE_ON_EOF)
return -1;
- die("The remote end hung up unexpectedly");
+ die(_("the remote end hung up unexpectedly"));
}
return ret;
@@ -324,7 +324,7 @@ enum packet_read_status packet_read_with_status(int fd, char **src_buffer,
len = packet_length(linelen);
if (len < 0) {
- die("protocol error: bad line length character: %.4s", linelen);
+ die(_("protocol error: bad line length character: %.4s"), linelen);
} else if (!len) {
packet_trace("0000", 4, 0);
*pktlen = 0;
@@ -334,12 +334,12 @@ enum packet_read_status packet_read_with_status(int fd, char **src_buffer,
*pktlen = 0;
return PACKET_READ_DELIM;
} else if (len < 4) {
- die("protocol error: bad line length %d", len);
+ die(_("protocol error: bad line length %d"), len);
}
len -= 4;
if ((unsigned)len >= size)
- die("protocol error: bad line length %d", len);
+ die(_("protocol error: bad line length %d"), len);
if (get_packet_data(fd, src_buffer, src_len, buffer, len, options) < 0) {
*pktlen = -1;
diff --git a/pretty.c b/pretty.c
index 703fa6f..98cf522 100644
--- a/pretty.c
+++ b/pretty.c
@@ -630,7 +630,7 @@ const char *logmsg_reencode(const struct commit *commit,
* the cached copy from get_commit_buffer, we need to duplicate it
* to avoid munging the cached copy.
*/
- if (msg == get_cached_commit_buffer(commit, NULL))
+ if (msg == get_cached_commit_buffer(the_repository, commit, NULL))
out = xstrdup(msg);
else
out = (char *)msg;
@@ -1146,7 +1146,7 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
/* these depend on the commit */
if (!commit->object.parsed)
- parse_object(&commit->object.oid);
+ parse_object(the_repository, &commit->object.oid);
switch (placeholder[0]) {
case 'H': /* commit hash */
@@ -1538,7 +1538,7 @@ void format_commit_message(const struct commit *commit,
}
if (output_enc) {
- int outsz;
+ size_t outsz;
char *out = reencode_string_len(sb->buf, sb->len,
output_enc, utf8, &outsz);
if (out)
@@ -1575,7 +1575,7 @@ static void pp_header(struct pretty_print_context *pp,
}
if (starts_with(line, "parent ")) {
- if (linelen != 48)
+ if (linelen != the_hash_algo->hexsz + 8)
die("bad parent line in commit");
continue;
}
@@ -1583,7 +1583,7 @@ static void pp_header(struct pretty_print_context *pp,
if (!parents_shown) {
unsigned num = commit_list_count(commit->parents);
/* with enough slop */
- strbuf_grow(sb, num * 50 + 20);
+ strbuf_grow(sb, num * (GIT_MAX_HEXSZ + 10) + 20);
add_merge_info(pp, sb, commit);
parents_shown = 1;
}
diff --git a/reachable.c b/reachable.c
index ffb976c..6e9b810 100644
--- a/reachable.c
+++ b/reachable.c
@@ -88,10 +88,10 @@ static void add_recent_object(const struct object_id *oid,
obj = parse_object_or_die(oid, NULL);
break;
case OBJ_TREE:
- obj = (struct object *)lookup_tree(oid);
+ obj = (struct object *)lookup_tree(the_repository, oid);
break;
case OBJ_BLOB:
- obj = (struct object *)lookup_blob(oid);
+ obj = (struct object *)lookup_blob(the_repository, oid);
break;
default:
die("unknown object type for %s: %s",
@@ -108,7 +108,7 @@ static int add_recent_loose(const struct object_id *oid,
const char *path, void *data)
{
struct stat st;
- struct object *obj = lookup_object(oid->hash);
+ struct object *obj = lookup_object(the_repository, oid->hash);
if (obj && obj->flags & SEEN)
return 0;
@@ -133,7 +133,7 @@ static int add_recent_packed(const struct object_id *oid,
struct packed_git *p, uint32_t pos,
void *data)
{
- struct object *obj = lookup_object(oid->hash);
+ struct object *obj = lookup_object(the_repository, oid->hash);
if (obj && obj->flags & SEEN)
return 0;
diff --git a/read-cache.c b/read-cache.c
index e865254..c5fabc8 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -6,6 +6,8 @@
#define NO_THE_INDEX_COMPATIBILITY_MACROS
#include "cache.h"
#include "config.h"
+#include "diff.h"
+#include "diffcore.h"
#include "tempfile.h"
#include "lockfile.h"
#include "cache-tree.h"
@@ -47,6 +49,48 @@
CE_ENTRY_ADDED | CE_ENTRY_REMOVED | CE_ENTRY_CHANGED | \
SPLIT_INDEX_ORDERED | UNTRACKED_CHANGED | FSMONITOR_CHANGED)
+
+/*
+ * This is an estimate of the pathname length in the index. We use
+ * this for V4 index files to guess the un-deltafied size of the index
+ * in memory because of pathname deltafication. This is not required
+ * for V2/V3 index formats because their pathnames are not compressed.
+ * If the initial amount of memory set aside is not sufficient, the
+ * mem pool will allocate extra memory.
+ */
+#define CACHE_ENTRY_PATH_LENGTH 80
+
+static inline struct cache_entry *mem_pool__ce_alloc(struct mem_pool *mem_pool, size_t len)
+{
+ struct cache_entry *ce;
+ ce = mem_pool_alloc(mem_pool, cache_entry_size(len));
+ ce->mem_pool_allocated = 1;
+ return ce;
+}
+
+static inline struct cache_entry *mem_pool__ce_calloc(struct mem_pool *mem_pool, size_t len)
+{
+ struct cache_entry * ce;
+ ce = mem_pool_calloc(mem_pool, 1, cache_entry_size(len));
+ ce->mem_pool_allocated = 1;
+ return ce;
+}
+
+static struct mem_pool *find_mem_pool(struct index_state *istate)
+{
+ struct mem_pool **pool_ptr;
+
+ if (istate->split_index && istate->split_index->base)
+ pool_ptr = &istate->split_index->base->ce_mem_pool;
+ else
+ pool_ptr = &istate->ce_mem_pool;
+
+ if (!*pool_ptr)
+ mem_pool_init(pool_ptr, 0);
+
+ return *pool_ptr;
+}
+
struct index_state the_index;
static const char *alternate_index_output;
@@ -62,7 +106,7 @@ static void replace_index_entry(struct index_state *istate, int nr, struct cache
replace_index_entry_in_base(istate, old, ce);
remove_name_hash(istate, old);
- free(old);
+ discard_cache_entry(old);
ce->ce_flags &= ~CE_HASHED;
set_index_entry(istate, nr, ce);
ce->ce_flags |= CE_UPDATE_IN_BASE;
@@ -75,7 +119,7 @@ void rename_index_entry_at(struct index_state *istate, int nr, const char *new_n
struct cache_entry *old_entry = istate->cache[nr], *new_entry;
int namelen = strlen(new_name);
- new_entry = xmalloc(cache_entry_size(namelen));
+ new_entry = make_empty_cache_entry(istate, namelen);
copy_cache_entry(new_entry, old_entry);
new_entry->ce_flags &= ~CE_HASHED;
new_entry->ce_namelen = namelen;
@@ -624,7 +668,7 @@ static struct cache_entry *create_alias_ce(struct index_state *istate,
/* Ok, create the new entry using the name of the existing alias */
len = ce_namelen(alias);
- new_entry = xcalloc(1, cache_entry_size(len));
+ new_entry = make_empty_cache_entry(istate, len);
memcpy(new_entry->name, alias->name, len);
copy_cache_entry(new_entry, ce);
save_or_free_index_entry(istate, ce);
@@ -641,7 +685,7 @@ void set_object_name_for_intent_to_add_entry(struct cache_entry *ce)
int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags)
{
- int size, namelen, was_same;
+ int namelen, was_same;
mode_t st_mode = st->st_mode;
struct cache_entry *ce, *alias = NULL;
unsigned ce_option = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE|CE_MATCH_RACY_IS_DIRTY;
@@ -663,8 +707,7 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st,
while (namelen && path[namelen-1] == '/')
namelen--;
}
- size = cache_entry_size(namelen);
- ce = xcalloc(1, size);
+ ce = make_empty_cache_entry(istate, namelen);
memcpy(ce->name, path, namelen);
ce->ce_namelen = namelen;
if (!intent_only)
@@ -705,13 +748,13 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st,
ce_mark_uptodate(alias);
alias->ce_flags |= CE_ADDED;
- free(ce);
+ discard_cache_entry(ce);
return 0;
}
}
if (!intent_only) {
if (index_path(&ce->oid, path, st, newflags)) {
- free(ce);
+ discard_cache_entry(ce);
return error("unable to index file %s", path);
}
} else
@@ -728,9 +771,9 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st,
ce->ce_mode == alias->ce_mode);
if (pretend)
- free(ce);
+ discard_cache_entry(ce);
else if (add_index_entry(istate, ce, add_option)) {
- free(ce);
+ discard_cache_entry(ce);
return error("unable to add %s to index", path);
}
if (verbose && !was_same)
@@ -746,12 +789,25 @@ int add_file_to_index(struct index_state *istate, const char *path, int flags)
return add_to_index(istate, path, &st, flags);
}
-struct cache_entry *make_cache_entry(unsigned int mode,
- const unsigned char *sha1, const char *path, int stage,
- unsigned int refresh_options)
+struct cache_entry *make_empty_cache_entry(struct index_state *istate, size_t len)
+{
+ return mem_pool__ce_calloc(find_mem_pool(istate), len);
+}
+
+struct cache_entry *make_empty_transient_cache_entry(size_t len)
+{
+ return xcalloc(1, cache_entry_size(len));
+}
+
+struct cache_entry *make_cache_entry(struct index_state *istate,
+ unsigned int mode,
+ const struct object_id *oid,
+ const char *path,
+ int stage,
+ unsigned int refresh_options)
{
- int size, len;
struct cache_entry *ce, *ret;
+ int len;
if (!verify_path(path, mode)) {
error("Invalid path '%s'", path);
@@ -759,21 +815,43 @@ struct cache_entry *make_cache_entry(unsigned int mode,
}
len = strlen(path);
- size = cache_entry_size(len);
- ce = xcalloc(1, size);
+ ce = make_empty_cache_entry(istate, len);
- hashcpy(ce->oid.hash, sha1);
+ oidcpy(&ce->oid, oid);
memcpy(ce->name, path, len);
ce->ce_flags = create_ce_flags(stage);
ce->ce_namelen = len;
ce->ce_mode = create_ce_mode(mode);
- ret = refresh_cache_entry(ce, refresh_options);
+ ret = refresh_cache_entry(&the_index, ce, refresh_options);
if (ret != ce)
- free(ce);
+ discard_cache_entry(ce);
return ret;
}
+struct cache_entry *make_transient_cache_entry(unsigned int mode, const struct object_id *oid,
+ const char *path, int stage)
+{
+ struct cache_entry *ce;
+ int len;
+
+ if (!verify_path(path, mode)) {
+ error("Invalid path '%s'", path);
+ return NULL;
+ }
+
+ len = strlen(path);
+ ce = make_empty_transient_cache_entry(len);
+
+ oidcpy(&ce->oid, oid);
+ memcpy(ce->name, path, len);
+ ce->ce_flags = create_ce_flags(stage);
+ ce->ce_namelen = len;
+ ce->ce_mode = create_ce_mode(mode);
+
+ return ce;
+}
+
/*
* Chmod an index entry with either +x or -x.
*
@@ -1269,7 +1347,7 @@ static struct cache_entry *refresh_cache_ent(struct index_state *istate,
{
struct stat st;
struct cache_entry *updated;
- int changed, size;
+ int changed;
int refresh = options & CE_MATCH_REFRESH;
int ignore_valid = options & CE_MATCH_IGNORE_VALID;
int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE;
@@ -1349,8 +1427,7 @@ static struct cache_entry *refresh_cache_ent(struct index_state *istate,
return NULL;
}
- size = ce_size(ce);
- updated = xmalloc(size);
+ updated = make_empty_cache_entry(istate, ce_namelen(ce));
copy_cache_entry(updated, ce);
memcpy(updated->name, ce->name, ce->ce_namelen + 1);
fill_stat_cache_info(updated, &st);
@@ -1474,10 +1551,11 @@ int refresh_index(struct index_state *istate, unsigned int flags,
return has_errors;
}
-struct cache_entry *refresh_cache_entry(struct cache_entry *ce,
- unsigned int options)
+struct cache_entry *refresh_cache_entry(struct index_state *istate,
+ struct cache_entry *ce,
+ unsigned int options)
{
- return refresh_cache_ent(&the_index, ce, options, NULL, NULL);
+ return refresh_cache_ent(istate, ce, options, NULL, NULL);
}
@@ -1635,12 +1713,13 @@ int read_index(struct index_state *istate)
return read_index_from(istate, get_index_file(), get_git_dir());
}
-static struct cache_entry *cache_entry_from_ondisk(struct ondisk_cache_entry *ondisk,
+static struct cache_entry *cache_entry_from_ondisk(struct mem_pool *mem_pool,
+ struct ondisk_cache_entry *ondisk,
unsigned int flags,
const char *name,
size_t len)
{
- struct cache_entry *ce = xmalloc(cache_entry_size(len));
+ struct cache_entry *ce = mem_pool__ce_alloc(mem_pool, len);
ce->ce_stat_data.sd_ctime.sec = get_be32(&ondisk->ctime.sec);
ce->ce_stat_data.sd_mtime.sec = get_be32(&ondisk->mtime.sec);
@@ -1682,7 +1761,8 @@ static unsigned long expand_name_field(struct strbuf *name, const char *cp_)
return (const char *)ep + 1 - cp_;
}
-static struct cache_entry *create_from_disk(struct ondisk_cache_entry *ondisk,
+static struct cache_entry *create_from_disk(struct mem_pool *mem_pool,
+ struct ondisk_cache_entry *ondisk,
unsigned long *ent_size,
struct strbuf *previous_name)
{
@@ -1713,13 +1793,13 @@ static struct cache_entry *create_from_disk(struct ondisk_cache_entry *ondisk,
/* v3 and earlier */
if (len == CE_NAMEMASK)
len = strlen(name);
- ce = cache_entry_from_ondisk(ondisk, flags, name, len);
+ ce = cache_entry_from_ondisk(mem_pool, ondisk, flags, name, len);
*ent_size = ondisk_ce_size(ce);
} else {
unsigned long consumed;
consumed = expand_name_field(previous_name, name);
- ce = cache_entry_from_ondisk(ondisk, flags,
+ ce = cache_entry_from_ondisk(mem_pool, ondisk, flags,
previous_name->buf,
previous_name->len);
@@ -1793,6 +1873,22 @@ static void post_read_index_from(struct index_state *istate)
tweak_fsmonitor(istate);
}
+static size_t estimate_cache_size_from_compressed(unsigned int entries)
+{
+ return entries * (sizeof(struct cache_entry) + CACHE_ENTRY_PATH_LENGTH);
+}
+
+static size_t estimate_cache_size(size_t ondisk_size, unsigned int entries)
+{
+ long per_entry = sizeof(struct cache_entry) - sizeof(struct ondisk_cache_entry);
+
+ /*
+ * Account for potential alignment differences.
+ */
+ per_entry += align_padding_size(sizeof(struct cache_entry), -sizeof(struct ondisk_cache_entry));
+ return ondisk_size + entries * per_entry;
+}
+
/* remember to discard_cache() before reading a different cache! */
int do_read_index(struct index_state *istate, const char *path, int must_exist)
{
@@ -1839,10 +1935,15 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
istate->cache = xcalloc(istate->cache_alloc, sizeof(*istate->cache));
istate->initialized = 1;
- if (istate->version == 4)
+ if (istate->version == 4) {
previous_name = &previous_name_buf;
- else
+ mem_pool_init(&istate->ce_mem_pool,
+ estimate_cache_size_from_compressed(istate->cache_nr));
+ } else {
previous_name = NULL;
+ mem_pool_init(&istate->ce_mem_pool,
+ estimate_cache_size(mmap_size, istate->cache_nr));
+ }
src_offset = sizeof(*hdr);
for (i = 0; i < istate->cache_nr; i++) {
@@ -1851,7 +1952,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
unsigned long consumed;
disk_ce = (struct ondisk_cache_entry *)((char *)mmap + src_offset);
- ce = create_from_disk(disk_ce, &consumed, previous_name);
+ ce = create_from_disk(istate->ce_mem_pool, disk_ce, &consumed, previous_name);
set_index_entry(istate, i, ce);
src_offset += consumed;
@@ -1948,17 +2049,15 @@ int is_index_unborn(struct index_state *istate)
int discard_index(struct index_state *istate)
{
- int i;
+ /*
+ * Cache entries in istate->cache[] should have been allocated
+ * from the memory pool associated with this index, or from an
+ * associated split_index. There is no need to free individual
+ * cache entries. validate_cache_entries can detect when this
+ * assertion does not hold.
+ */
+ validate_cache_entries(istate);
- for (i = 0; i < istate->cache_nr; i++) {
- if (istate->cache[i]->index &&
- istate->split_index &&
- istate->split_index->base &&
- istate->cache[i]->index <= istate->split_index->base->cache_nr &&
- istate->cache[i] == istate->split_index->base->cache[istate->cache[i]->index - 1])
- continue;
- free(istate->cache[i]);
- }
resolve_undo_clear_index(istate);
istate->cache_nr = 0;
istate->cache_changed = 0;
@@ -1972,9 +2071,47 @@ int discard_index(struct index_state *istate)
discard_split_index(istate);
free_untracked_cache(istate->untracked);
istate->untracked = NULL;
+
+ if (istate->ce_mem_pool) {
+ mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries());
+ istate->ce_mem_pool = NULL;
+ }
+
return 0;
}
+/*
+ * Validate the cache entries of this index.
+ * All cache entries associated with this index
+ * should have been allocated by the memory pool
+ * associated with this index, or by a referenced
+ * split index.
+ */
+void validate_cache_entries(const struct index_state *istate)
+{
+ int i;
+
+ if (!should_validate_cache_entries() ||!istate || !istate->initialized)
+ return;
+
+ for (i = 0; i < istate->cache_nr; i++) {
+ if (!istate) {
+ die("internal error: cache entry is not allocated from expected memory pool");
+ } else if (!istate->ce_mem_pool ||
+ !mem_pool_contains(istate->ce_mem_pool, istate->cache[i])) {
+ if (!istate->split_index ||
+ !istate->split_index->base ||
+ !istate->split_index->base->ce_mem_pool ||
+ !mem_pool_contains(istate->split_index->base->ce_mem_pool, istate->cache[i])) {
+ die("internal error: cache entry is not allocated from expected memory pool");
+ }
+ }
+ }
+
+ if (istate->split_index)
+ validate_cache_entries(istate->split_index->base);
+}
+
int unmerged_index(const struct index_state *istate)
{
int i;
@@ -1985,6 +2122,44 @@ int unmerged_index(const struct index_state *istate)
return 0;
}
+int index_has_changes(const struct index_state *istate,
+ struct tree *tree,
+ struct strbuf *sb)
+{
+ struct object_id cmp;
+ int i;
+
+ if (istate != &the_index) {
+ BUG("index_has_changes cannot yet accept istate != &the_index; do_diff_cache needs updating first.");
+ }
+ if (tree)
+ cmp = tree->object.oid;
+ if (tree || !get_oid_tree("HEAD", &cmp)) {
+ struct diff_options opt;
+
+ diff_setup(&opt);
+ opt.flags.exit_with_status = 1;
+ if (!sb)
+ opt.flags.quick = 1;
+ do_diff_cache(&cmp, &opt);
+ diffcore_std(&opt);
+ for (i = 0; sb && i < diff_queued_diff.nr; i++) {
+ if (i)
+ strbuf_addch(sb, ' ');
+ strbuf_addstr(sb, diff_queued_diff.queue[i]->two->path);
+ }
+ diff_flush(&opt);
+ return opt.flags.has_changes != 0;
+ } else {
+ for (i = 0; sb && i < istate->cache_nr; i++) {
+ if (i)
+ strbuf_addch(sb, ' ');
+ strbuf_addstr(sb, istate->cache[i]->name);
+ }
+ return !!istate->cache_nr;
+ }
+}
+
#define WRITE_BUFFER_SIZE 8192
static unsigned char write_buffer[WRITE_BUFFER_SIZE];
static unsigned long write_buffer_len;
@@ -2633,10 +2808,13 @@ out:
/*
* Read the index file that is potentially unmerged into given
- * index_state, dropping any unmerged entries. Returns true if
- * the index is unmerged. Callers who want to refuse to work
- * from an unmerged state can call this and check its return value,
- * instead of calling read_cache().
+ * index_state, dropping any unmerged entries to stage #0 (potentially
+ * resulting in a path appearing as both a file and a directory in the
+ * index; the caller is responsible to clear out the extra entries
+ * before writing the index to a tree). Returns true if the index is
+ * unmerged. Callers who want to refuse to work from an unmerged
+ * state can call this and check its return value, instead of calling
+ * read_cache().
*/
int read_index_unmerged(struct index_state *istate)
{
@@ -2647,19 +2825,18 @@ int read_index_unmerged(struct index_state *istate)
for (i = 0; i < istate->cache_nr; i++) {
struct cache_entry *ce = istate->cache[i];
struct cache_entry *new_ce;
- int size, len;
+ int len;
if (!ce_stage(ce))
continue;
unmerged = 1;
len = ce_namelen(ce);
- size = cache_entry_size(len);
- new_ce = xcalloc(1, size);
+ new_ce = make_empty_cache_entry(istate, len);
memcpy(new_ce->name, ce->name, len);
new_ce->ce_flags = create_ce_flags(0) | CE_CONFLICTED;
new_ce->ce_namelen = len;
new_ce->ce_mode = ce->ce_mode;
- if (add_index_entry(istate, new_ce, 0))
+ if (add_index_entry(istate, new_ce, ADD_CACHE_SKIP_DFCHECK))
return error("%s: cannot drop to stage #0",
new_ce->name);
}
@@ -2763,3 +2940,41 @@ void move_index_extensions(struct index_state *dst, struct index_state *src)
dst->untracked = src->untracked;
src->untracked = NULL;
}
+
+struct cache_entry *dup_cache_entry(const struct cache_entry *ce,
+ struct index_state *istate)
+{
+ unsigned int size = ce_size(ce);
+ int mem_pool_allocated;
+ struct cache_entry *new_entry = make_empty_cache_entry(istate, ce_namelen(ce));
+ mem_pool_allocated = new_entry->mem_pool_allocated;
+
+ memcpy(new_entry, ce, size);
+ new_entry->mem_pool_allocated = mem_pool_allocated;
+ return new_entry;
+}
+
+void discard_cache_entry(struct cache_entry *ce)
+{
+ if (ce && should_validate_cache_entries())
+ memset(ce, 0xCD, cache_entry_size(ce->ce_namelen));
+
+ if (ce && ce->mem_pool_allocated)
+ return;
+
+ free(ce);
+}
+
+int should_validate_cache_entries(void)
+{
+ static int validate_index_cache_entries = -1;
+
+ if (validate_index_cache_entries < 0) {
+ if (getenv("GIT_TEST_VALIDATE_INDEX_CACHE_ENTRIES"))
+ validate_index_cache_entries = 1;
+ else
+ validate_index_cache_entries = 0;
+ }
+
+ return validate_index_cache_entries;
+}
diff --git a/ref-filter.c b/ref-filter.c
index 492f2b7..0bccfce 100644
--- a/ref-filter.c
+++ b/ref-filter.c
@@ -4,6 +4,7 @@
#include "refs.h"
#include "wildmatch.h"
#include "object-store.h"
+#include "repository.h"
#include "commit.h"
#include "remote.h"
#include "color.h"
@@ -42,6 +43,7 @@ void setup_ref_filter_porcelain_msg(void)
typedef enum { FIELD_STR, FIELD_ULONG, FIELD_TIME } cmp_type;
typedef enum { COMPARE_EQUAL, COMPARE_UNEQUAL, COMPARE_NONE } cmp_status;
+typedef enum { SOURCE_NONE = 0, SOURCE_OBJ, SOURCE_OTHER } info_source;
struct align {
align_type position;
@@ -61,6 +63,17 @@ struct refname_atom {
int lstrip, rstrip;
};
+static struct expand_data {
+ struct object_id oid;
+ enum object_type type;
+ unsigned long size;
+ off_t disk_size;
+ struct object_id delta_base_oid;
+ void *content;
+
+ struct object_info info;
+} oi, oi_deref;
+
/*
* An atom is a valid field atom listed below, possibly prefixed with
* a "*" to denote deref_tag().
@@ -74,6 +87,7 @@ struct refname_atom {
static struct used_atom {
const char *name;
cmp_type type;
+ info_source source;
union {
char color[COLOR_MAXLEN];
struct align align;
@@ -201,6 +215,30 @@ static int remote_ref_atom_parser(const struct ref_format *format, struct used_a
return 0;
}
+static int objecttype_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
+{
+ if (arg)
+ return strbuf_addf_ret(err, -1, _("%%(objecttype) does not take arguments"));
+ if (*atom->name == '*')
+ oi_deref.info.typep = &oi_deref.type;
+ else
+ oi.info.typep = &oi.type;
+ return 0;
+}
+
+static int objectsize_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
+{
+ if (arg)
+ return strbuf_addf_ret(err, -1, _("%%(objectsize) does not take arguments"));
+ if (*atom->name == '*')
+ oi_deref.info.sizep = &oi_deref.size;
+ else
+ oi.info.sizep = &oi.size;
+ return 0;
+}
+
static int body_atom_parser(const struct ref_format *format, struct used_atom *atom,
const char *arg, struct strbuf *err)
{
@@ -381,49 +419,50 @@ static int head_atom_parser(const struct ref_format *format, struct used_atom *a
static struct {
const char *name;
+ info_source source;
cmp_type cmp_type;
int (*parser)(const struct ref_format *format, struct used_atom *atom,
const char *arg, struct strbuf *err);
} valid_atom[] = {
- { "refname" , FIELD_STR, refname_atom_parser },
- { "objecttype" },
- { "objectsize", FIELD_ULONG },
- { "objectname", FIELD_STR, objectname_atom_parser },
- { "tree" },
- { "parent" },
- { "numparent", FIELD_ULONG },
- { "object" },
- { "type" },
- { "tag" },
- { "author" },
- { "authorname" },
- { "authoremail" },
- { "authordate", FIELD_TIME },
- { "committer" },
- { "committername" },
- { "committeremail" },
- { "committerdate", FIELD_TIME },
- { "tagger" },
- { "taggername" },
- { "taggeremail" },
- { "taggerdate", FIELD_TIME },
- { "creator" },
- { "creatordate", FIELD_TIME },
- { "subject", FIELD_STR, subject_atom_parser },
- { "body", FIELD_STR, body_atom_parser },
- { "trailers", FIELD_STR, trailers_atom_parser },
- { "contents", FIELD_STR, contents_atom_parser },
- { "upstream", FIELD_STR, remote_ref_atom_parser },
- { "push", FIELD_STR, remote_ref_atom_parser },
- { "symref", FIELD_STR, refname_atom_parser },
- { "flag" },
- { "HEAD", FIELD_STR, head_atom_parser },
- { "color", FIELD_STR, color_atom_parser },
- { "align", FIELD_STR, align_atom_parser },
- { "end" },
- { "if", FIELD_STR, if_atom_parser },
- { "then" },
- { "else" },
+ { "refname", SOURCE_NONE, FIELD_STR, refname_atom_parser },
+ { "objecttype", SOURCE_OTHER, FIELD_STR, objecttype_atom_parser },
+ { "objectsize", SOURCE_OTHER, FIELD_ULONG, objectsize_atom_parser },
+ { "objectname", SOURCE_OTHER, FIELD_STR, objectname_atom_parser },
+ { "tree", SOURCE_OBJ },
+ { "parent", SOURCE_OBJ },
+ { "numparent", SOURCE_OBJ, FIELD_ULONG },
+ { "object", SOURCE_OBJ },
+ { "type", SOURCE_OBJ },
+ { "tag", SOURCE_OBJ },
+ { "author", SOURCE_OBJ },
+ { "authorname", SOURCE_OBJ },
+ { "authoremail", SOURCE_OBJ },
+ { "authordate", SOURCE_OBJ, FIELD_TIME },
+ { "committer", SOURCE_OBJ },
+ { "committername", SOURCE_OBJ },
+ { "committeremail", SOURCE_OBJ },
+ { "committerdate", SOURCE_OBJ, FIELD_TIME },
+ { "tagger", SOURCE_OBJ },
+ { "taggername", SOURCE_OBJ },
+ { "taggeremail", SOURCE_OBJ },
+ { "taggerdate", SOURCE_OBJ, FIELD_TIME },
+ { "creator", SOURCE_OBJ },
+ { "creatordate", SOURCE_OBJ, FIELD_TIME },
+ { "subject", SOURCE_OBJ, FIELD_STR, subject_atom_parser },
+ { "body", SOURCE_OBJ, FIELD_STR, body_atom_parser },
+ { "trailers", SOURCE_OBJ, FIELD_STR, trailers_atom_parser },
+ { "contents", SOURCE_OBJ, FIELD_STR, contents_atom_parser },
+ { "upstream", SOURCE_NONE, FIELD_STR, remote_ref_atom_parser },
+ { "push", SOURCE_NONE, FIELD_STR, remote_ref_atom_parser },
+ { "symref", SOURCE_NONE, FIELD_STR, refname_atom_parser },
+ { "flag", SOURCE_NONE },
+ { "HEAD", SOURCE_NONE, FIELD_STR, head_atom_parser },
+ { "color", SOURCE_NONE, FIELD_STR, color_atom_parser },
+ { "align", SOURCE_NONE, FIELD_STR, align_atom_parser },
+ { "end", SOURCE_NONE },
+ { "if", SOURCE_NONE, FIELD_STR, if_atom_parser },
+ { "then", SOURCE_NONE },
+ { "else", SOURCE_NONE },
};
#define REF_FORMATTING_STATE_INIT { 0, NULL }
@@ -499,6 +538,13 @@ static int parse_ref_filter_atom(const struct ref_format *format,
REALLOC_ARRAY(used_atom, used_atom_cnt);
used_atom[at].name = xmemdupz(atom, ep - atom);
used_atom[at].type = valid_atom[i].cmp_type;
+ used_atom[at].source = valid_atom[i].source;
+ if (used_atom[at].source == SOURCE_OBJ) {
+ if (*atom == '*')
+ oi_deref.info.contentp = &oi_deref.content;
+ else
+ oi.info.contentp = &oi.content;
+ }
if (arg) {
arg = used_atom[at].name + (arg - atom) + 1;
if (!*arg) {
@@ -794,24 +840,6 @@ int verify_ref_format(struct ref_format *format)
return 0;
}
-/*
- * Given an object name, read the object data and size, and return a
- * "struct object". If the object data we are returning is also borrowed
- * by the "struct object" representation, set *eaten as well---it is a
- * signal from parse_object_buffer to us not to free the buffer.
- */
-static void *get_obj(const struct object_id *oid, struct object **obj, unsigned long *sz, int *eaten)
-{
- enum object_type type;
- void *buf = read_object_file(oid, &type, sz);
-
- if (buf)
- *obj = parse_object_buffer(oid, type, *sz, buf, eaten);
- else
- *obj = NULL;
- return buf;
-}
-
static int grab_objectname(const char *name, const struct object_id *oid,
struct atom_value *v, struct used_atom *atom)
{
@@ -832,7 +860,7 @@ static int grab_objectname(const char *name, const struct object_id *oid,
}
/* See grab_values */
-static void grab_common_values(struct atom_value *val, int deref, struct object *obj, void *buf, unsigned long sz)
+static void grab_common_values(struct atom_value *val, int deref, struct expand_data *oi)
{
int i;
@@ -844,13 +872,13 @@ static void grab_common_values(struct atom_value *val, int deref, struct object
if (deref)
name++;
if (!strcmp(name, "objecttype"))
- v->s = type_name(obj->type);
+ v->s = type_name(oi->type);
else if (!strcmp(name, "objectsize")) {
- v->value = sz;
- v->s = xstrfmt("%lu", sz);
+ v->value = oi->size;
+ v->s = xstrfmt("%lu", oi->size);
}
else if (deref)
- grab_objectname(name, &obj->oid, v, &used_atom[i]);
+ grab_objectname(name, &oi->oid, v, &used_atom[i]);
}
}
@@ -1209,7 +1237,6 @@ static void fill_missing_values(struct atom_value *val)
*/
static void grab_values(struct atom_value *val, int deref, struct object *obj, void *buf, unsigned long sz)
{
- grab_common_values(val, deref, obj, buf, sz);
switch (obj->type) {
case OBJ_TAG:
grab_tag_values(val, deref, obj, buf, sz);
@@ -1433,24 +1460,36 @@ static const char *get_refname(struct used_atom *atom, struct ref_array_item *re
return show_ref(&atom->u.refname, ref->refname);
}
-static int get_object(struct ref_array_item *ref, const struct object_id *oid,
- int deref, struct object **obj, struct strbuf *err)
+static int get_object(struct ref_array_item *ref, int deref, struct object **obj,
+ struct expand_data *oi, struct strbuf *err)
{
- int eaten;
- int ret = 0;
- unsigned long size;
- void *buf = get_obj(oid, obj, &size, &eaten);
- if (!buf)
- ret = strbuf_addf_ret(err, -1, _("missing object %s for %s"),
- oid_to_hex(oid), ref->refname);
- else if (!*obj)
- ret = strbuf_addf_ret(err, -1, _("parse_object_buffer failed on %s for %s"),
- oid_to_hex(oid), ref->refname);
- else
- grab_values(ref->value, deref, *obj, buf, size);
+ /* parse_object_buffer() will set eaten to 0 if free() will be needed */
+ int eaten = 1;
+ if (oi->info.contentp) {
+ /* We need to know that to use parse_object_buffer properly */
+ oi->info.sizep = &oi->size;
+ oi->info.typep = &oi->type;
+ }
+ if (oid_object_info_extended(the_repository, &oi->oid, &oi->info,
+ OBJECT_INFO_LOOKUP_REPLACE))
+ return strbuf_addf_ret(err, -1, _("missing object %s for %s"),
+ oid_to_hex(&oi->oid), ref->refname);
+
+ if (oi->info.contentp) {
+ *obj = parse_object_buffer(the_repository, &oi->oid, oi->type, oi->size, oi->content, &eaten);
+ if (!obj) {
+ if (!eaten)
+ free(oi->content);
+ return strbuf_addf_ret(err, -1, _("parse_object_buffer failed on %s for %s"),
+ oid_to_hex(&oi->oid), ref->refname);
+ }
+ grab_values(ref->value, deref, *obj, oi->content, oi->size);
+ }
+
+ grab_common_values(ref->value, deref, oi);
if (!eaten)
- free(buf);
- return ret;
+ free(oi->content);
+ return 0;
}
/*
@@ -1460,7 +1499,7 @@ static int populate_value(struct ref_array_item *ref, struct strbuf *err)
{
struct object *obj;
int i;
- const struct object_id *tagged;
+ struct object_info empty = OBJECT_INFO_INIT;
ref->value = xcalloc(used_atom_cnt, sizeof(struct atom_value));
@@ -1494,6 +1533,7 @@ static int populate_value(struct ref_array_item *ref, struct strbuf *err)
refname = get_symref(atom, ref);
else if (starts_with(name, "upstream")) {
const char *branch_name;
+ v->s = "";
/* only local branches may have an upstream */
if (!skip_prefix(ref->refname, "refs/heads/",
&branch_name))
@@ -1506,6 +1546,7 @@ static int populate_value(struct ref_array_item *ref, struct strbuf *err)
continue;
} else if (atom->u.remote_ref.push) {
const char *branch_name;
+ v->s = "";
if (!skip_prefix(ref->refname, "refs/heads/",
&branch_name))
continue;
@@ -1546,22 +1587,26 @@ static int populate_value(struct ref_array_item *ref, struct strbuf *err)
continue;
} else if (starts_with(name, "align")) {
v->handler = align_atom_handler;
+ v->s = "";
continue;
} else if (!strcmp(name, "end")) {
v->handler = end_atom_handler;
+ v->s = "";
continue;
} else if (starts_with(name, "if")) {
const char *s;
-
+ v->s = "";
if (skip_prefix(name, "if:", &s))
v->s = xstrdup(s);
v->handler = if_atom_handler;
continue;
} else if (!strcmp(name, "then")) {
v->handler = then_atom_handler;
+ v->s = "";
continue;
} else if (!strcmp(name, "else")) {
v->handler = else_atom_handler;
+ v->s = "";
continue;
} else
continue;
@@ -1574,13 +1619,20 @@ static int populate_value(struct ref_array_item *ref, struct strbuf *err)
for (i = 0; i < used_atom_cnt; i++) {
struct atom_value *v = &ref->value[i];
- if (v->s == NULL)
- break;
+ if (v->s == NULL && used_atom[i].source == SOURCE_NONE)
+ return strbuf_addf_ret(err, -1, _("missing object %s for %s"),
+ oid_to_hex(&ref->objectname), ref->refname);
}
- if (used_atom_cnt <= i)
+
+ if (need_tagged)
+ oi.info.contentp = &oi.content;
+ if (!memcmp(&oi.info, &empty, sizeof(empty)) &&
+ !memcmp(&oi_deref.info, &empty, sizeof(empty)))
return 0;
- if (get_object(ref, &ref->objectname, 0, &obj, err))
+
+ oi.oid = ref->objectname;
+ if (get_object(ref, 0, &obj, &oi, err))
return -1;
/*
@@ -1594,7 +1646,7 @@ static int populate_value(struct ref_array_item *ref, struct strbuf *err)
* If it is a tag object, see if we use a value that derefs
* the object, and if we do grab the object it refers to.
*/
- tagged = &((struct tag *)obj)->tagged->oid;
+ oi_deref.oid = ((struct tag *)obj)->tagged->oid;
/*
* NEEDSWORK: This derefs tag only once, which
@@ -1602,7 +1654,7 @@ static int populate_value(struct ref_array_item *ref, struct strbuf *err)
* is not consistent with what deref_tag() does
* which peels the onion to the core.
*/
- return get_object(ref, tagged, 1, &obj, err);
+ return get_object(ref, 1, &obj, &oi_deref, err);
}
/*
@@ -1711,7 +1763,7 @@ static enum contains_result contains_tag_algo(struct commit *candidate,
for (p = want; p; p = p->next) {
struct commit *c = p->item;
- load_commit_graph_info(c);
+ load_commit_graph_info(the_repository, c);
if (c->generation < cutoff)
cutoff = c->generation;
}
@@ -1923,7 +1975,7 @@ static const struct object_id *match_points_at(struct oid_array *points_at,
if (oid_array_lookup(points_at, oid) >= 0)
return oid;
- obj = parse_object(oid);
+ obj = parse_object(the_repository, oid);
if (!obj)
die(_("malformed object at '%s'"), refname);
if (obj->type == OBJ_TAG)
@@ -2033,7 +2085,8 @@ static int ref_filter_handler(const char *refname, const struct object_id *oid,
* non-commits early. The actual filtering is done later.
*/
if (filter->merge_commit || filter->with_commit || filter->no_commit || filter->verbose) {
- commit = lookup_commit_reference_gently(oid, 1);
+ commit = lookup_commit_reference_gently(the_repository, oid,
+ 1);
if (!commit)
return 0;
/* We perform the filtering for the '--contains' option... */
@@ -2390,7 +2443,8 @@ int parse_opt_merge_filter(const struct option *opt, const char *arg, int unset)
if (get_oid(arg, &oid))
die(_("malformed object name %s"), arg);
- rf->merge_commit = lookup_commit_reference_gently(&oid, 0);
+ rf->merge_commit = lookup_commit_reference_gently(the_repository,
+ &oid, 0);
if (!rf->merge_commit)
return opterror(opt, "must point to a commit", 0);
diff --git a/reflog-walk.c b/reflog-walk.c
index 5008bbf..3a25b27 100644
--- a/reflog-walk.c
+++ b/reflog-walk.c
@@ -128,7 +128,7 @@ int add_reflog_for_walk(struct reflog_walk_info *info,
enum selector_type selector = SELECTOR_NONE;
if (commit->object.flags & UNINTERESTING)
- die ("Cannot walk reflogs for %s", name);
+ die("cannot walk reflogs for %s", name);
branch = xstrdup(name);
if (at && at[1] == '{') {
@@ -153,7 +153,7 @@ int add_reflog_for_walk(struct reflog_walk_info *info,
free(branch);
branch = resolve_refdup("HEAD", 0, NULL, NULL);
if (!branch)
- die ("No current branch");
+ die("no current branch");
}
reflogs = read_complete_reflog(branch);
@@ -305,7 +305,8 @@ static struct commit *next_reflog_commit(struct commit_reflog *log)
{
for (; log->recno >= 0; log->recno--) {
struct reflog_info *entry = &log->reflogs->items[log->recno];
- struct object *obj = parse_object(&entry->noid);
+ struct object *obj = parse_object(the_repository,
+ &entry->noid);
if (obj && obj->type == OBJ_COMMIT)
return (struct commit *)obj;
diff --git a/refs.c b/refs.c
index 08fb5a9..de81c7b 100644
--- a/refs.c
+++ b/refs.c
@@ -189,7 +189,7 @@ int ref_resolves_to_object(const char *refname,
if (flags & REF_ISBROKEN)
return 0;
if (!has_sha1_file(oid->hash)) {
- error("%s does not point to a valid object!", refname);
+ error(_("%s does not point to a valid object!"), refname);
return 0;
}
return 1;
@@ -305,7 +305,7 @@ enum peel_status peel_object(const struct object_id *name, struct object_id *oid
if (o->type == OBJ_NONE) {
int type = oid_object_info(the_repository, name, NULL);
- if (type < 0 || !object_as_type(o, type, 0))
+ if (type < 0 || !object_as_type(the_repository, o, type, 0))
return PEEL_INVALID;
}
@@ -490,16 +490,24 @@ static const char *ref_rev_parse_rules[] = {
NULL
};
+#define NUM_REV_PARSE_RULES (ARRAY_SIZE(ref_rev_parse_rules) - 1)
+
+/*
+ * Is it possible that the caller meant full_name with abbrev_name?
+ * If so return a non-zero value to signal "yes"; the magnitude of
+ * the returned value gives the precedence used for disambiguation.
+ *
+ * If abbrev_name cannot mean full_name, return 0.
+ */
int refname_match(const char *abbrev_name, const char *full_name)
{
const char **p;
const int abbrev_name_len = strlen(abbrev_name);
+ const int num_rules = NUM_REV_PARSE_RULES;
- for (p = ref_rev_parse_rules; *p; p++) {
- if (!strcmp(full_name, mkpath(*p, abbrev_name_len, abbrev_name))) {
- return 1;
- }
- }
+ for (p = ref_rev_parse_rules; *p; p++)
+ if (!strcmp(full_name, mkpath(*p, abbrev_name_len, abbrev_name)))
+ return &ref_rev_parse_rules[num_rules] - p;
return 0;
}
@@ -568,9 +576,9 @@ int expand_ref(const char *str, int len, struct object_id *oid, char **ref)
if (!warn_ambiguous_refs)
break;
} else if ((flag & REF_ISSYMREF) && strcmp(fullref.buf, "HEAD")) {
- warning("ignoring dangling symref %s.", fullref.buf);
+ warning(_("ignoring dangling symref %s"), fullref.buf);
} else if ((flag & REF_ISBROKEN) && strchr(fullref.buf, '/')) {
- warning("ignoring broken ref %s.", fullref.buf);
+ warning(_("ignoring broken ref %s"), fullref.buf);
}
}
strbuf_release(&fullref);
@@ -674,7 +682,7 @@ static int write_pseudoref(const char *pseudoref, const struct object_id *oid,
fd = hold_lock_file_for_update_timeout(&lock, filename, 0,
get_files_ref_lock_timeout_ms());
if (fd < 0) {
- strbuf_addf(err, "could not open '%s' for writing: %s",
+ strbuf_addf(err, _("could not open '%s' for writing: %s"),
filename, strerror(errno));
goto done;
}
@@ -684,18 +692,18 @@ static int write_pseudoref(const char *pseudoref, const struct object_id *oid,
if (read_ref(pseudoref, &actual_old_oid)) {
if (!is_null_oid(old_oid)) {
- strbuf_addf(err, "could not read ref '%s'",
+ strbuf_addf(err, _("could not read ref '%s'"),
pseudoref);
rollback_lock_file(&lock);
goto done;
}
} else if (is_null_oid(old_oid)) {
- strbuf_addf(err, "ref '%s' already exists",
+ strbuf_addf(err, _("ref '%s' already exists"),
pseudoref);
rollback_lock_file(&lock);
goto done;
} else if (oidcmp(&actual_old_oid, old_oid)) {
- strbuf_addf(err, "unexpected object ID when writing '%s'",
+ strbuf_addf(err, _("unexpected object ID when writing '%s'"),
pseudoref);
rollback_lock_file(&lock);
goto done;
@@ -703,7 +711,7 @@ static int write_pseudoref(const char *pseudoref, const struct object_id *oid,
}
if (write_in_full(fd, buf.buf, buf.len) < 0) {
- strbuf_addf(err, "could not write to '%s'", filename);
+ strbuf_addf(err, _("could not write to '%s'"), filename);
rollback_lock_file(&lock);
goto done;
}
@@ -735,9 +743,9 @@ static int delete_pseudoref(const char *pseudoref, const struct object_id *old_o
return -1;
}
if (read_ref(pseudoref, &actual_old_oid))
- die("could not read ref '%s'", pseudoref);
+ die(_("could not read ref '%s'"), pseudoref);
if (oidcmp(&actual_old_oid, old_oid)) {
- error("unexpected object ID when deleting '%s'",
+ error(_("unexpected object ID when deleting '%s'"),
pseudoref);
rollback_lock_file(&lock);
return -1;
@@ -868,13 +876,13 @@ static int read_ref_at_ent(struct object_id *ooid, struct object_id *noid,
if (!is_null_oid(&cb->ooid)) {
oidcpy(cb->oid, noid);
if (oidcmp(&cb->ooid, noid))
- warning("Log for ref %s has gap after %s.",
+ warning(_("log for ref %s has gap after %s"),
cb->refname, show_date(cb->date, cb->tz, DATE_MODE(RFC2822)));
}
else if (cb->date == cb->at_time)
oidcpy(cb->oid, noid);
else if (oidcmp(noid, cb->oid))
- warning("Log for ref %s unexpectedly ended on %s.",
+ warning(_("log for ref %s unexpectedly ended on %s"),
cb->refname, show_date(cb->date, cb->tz,
DATE_MODE(RFC2822)));
oidcpy(&cb->ooid, ooid);
@@ -932,7 +940,7 @@ int read_ref_at(const char *refname, unsigned int flags, timestamp_t at_time, in
if (flags & GET_OID_QUIETLY)
exit(128);
else
- die("Log for %s is empty.", refname);
+ die(_("log for %s is empty"), refname);
}
if (cb.found_it)
return 0;
@@ -1024,7 +1032,7 @@ int ref_transaction_update(struct ref_transaction *transaction,
if ((new_oid && !is_null_oid(new_oid)) ?
check_refname_format(refname, REFNAME_ALLOW_ONELEVEL) :
!refname_is_safe(refname)) {
- strbuf_addf(err, "refusing to update ref with bad name '%s'",
+ strbuf_addf(err, _("refusing to update ref with bad name '%s'"),
refname);
return -1;
}
@@ -1100,7 +1108,7 @@ int refs_update_ref(struct ref_store *refs, const char *msg,
}
}
if (ret) {
- const char *str = "update_ref failed for ref '%s': %s";
+ const char *str = _("update_ref failed for ref '%s': %s");
switch (onerr) {
case UPDATE_REFS_MSG_ON_ERR:
@@ -1842,7 +1850,7 @@ int ref_update_reject_duplicates(struct string_list *refnames,
if (!cmp) {
strbuf_addf(err,
- "multiple updates for ref '%s' not allowed.",
+ _("multiple updates for ref '%s' not allowed"),
refnames->items[i].string);
return 1;
} else if (cmp > 0) {
@@ -1970,13 +1978,13 @@ int refs_verify_refname_available(struct ref_store *refs,
continue;
if (!refs_read_raw_ref(refs, dirname.buf, &oid, &referent, &type)) {
- strbuf_addf(err, "'%s' exists; cannot create '%s'",
+ strbuf_addf(err, _("'%s' exists; cannot create '%s'"),
dirname.buf, refname);
goto cleanup;
}
if (extras && string_list_has_string(extras, dirname.buf)) {
- strbuf_addf(err, "cannot process '%s' and '%s' at the same time",
+ strbuf_addf(err, _("cannot process '%s' and '%s' at the same time"),
refname, dirname.buf);
goto cleanup;
}
@@ -2000,7 +2008,7 @@ int refs_verify_refname_available(struct ref_store *refs,
string_list_has_string(skip, iter->refname))
continue;
- strbuf_addf(err, "'%s' exists; cannot create '%s'",
+ strbuf_addf(err, _("'%s' exists; cannot create '%s'"),
iter->refname, refname);
ref_iterator_abort(iter);
goto cleanup;
@@ -2011,7 +2019,7 @@ int refs_verify_refname_available(struct ref_store *refs,
extra_refname = find_descendant_ref(dirname.buf, extras, skip);
if (extra_refname)
- strbuf_addf(err, "cannot process '%s' and '%s' at the same time",
+ strbuf_addf(err, _("cannot process '%s' and '%s' at the same time"),
refname, extra_refname);
else
ret = 0;
diff --git a/refs/files-backend.c b/refs/files-backend.c
index 054306d..1f1a98e 100644
--- a/refs/files-backend.c
+++ b/refs/files-backend.c
@@ -363,7 +363,7 @@ stat_ref:
/* Follow "normalized" - ie "refs/.." symlinks by hand */
if (S_ISLNK(st.st_mode)) {
strbuf_reset(&sb_contents);
- if (strbuf_readlink(&sb_contents, path, 0) < 0) {
+ if (strbuf_readlink(&sb_contents, path, st.st_size) < 0) {
if (errno == ENOENT || errno == EINVAL)
/* inconsistent with lstat; retry */
goto stat_ref;
@@ -1651,7 +1651,7 @@ static int write_ref_to_lockfile(struct ref_lock *lock,
struct object *o;
int fd;
- o = parse_object(oid);
+ o = parse_object(the_repository, oid);
if (!o) {
strbuf_addf(err,
"trying to write ref '%s' with nonexistent object %s",
@@ -1667,7 +1667,7 @@ static int write_ref_to_lockfile(struct ref_lock *lock,
return -1;
}
fd = get_lock_file_fd(&lock->lk);
- if (write_in_full(fd, oid_to_hex(oid), GIT_SHA1_HEXSZ) < 0 ||
+ if (write_in_full(fd, oid_to_hex(oid), the_hash_algo->hexsz) < 0 ||
write_in_full(fd, &term, 1) < 0 ||
close_ref_gently(lock) < 0) {
strbuf_addf(err,
@@ -3061,7 +3061,7 @@ static int files_reflog_expire(struct ref_store *ref_store,
rollback_lock_file(&reflog_lock);
} else if (update &&
(write_in_full(get_lock_file_fd(&lock->lk),
- oid_to_hex(&cb.last_kept_oid), GIT_SHA1_HEXSZ) < 0 ||
+ oid_to_hex(&cb.last_kept_oid), the_hash_algo->hexsz) < 0 ||
write_str_in_full(get_lock_file_fd(&lock->lk), "\n") < 0 ||
close_ref_gently(lock) < 0)) {
status |= error("couldn't write %s",
diff --git a/refspec.c b/refspec.c
index e8010dc..f529092 100644
--- a/refspec.c
+++ b/refspec.c
@@ -134,7 +134,7 @@ void refspec_item_init_or_die(struct refspec_item *item, const char *refspec,
int fetch)
{
if (!refspec_item_init(item, refspec, fetch))
- die("Invalid refspec '%s'", refspec);
+ die(_("invalid refspec '%s'"), refspec);
}
void refspec_item_clear(struct refspec_item *item)
diff --git a/remote.c b/remote.c
index 3fd4345..7f6277a 100644
--- a/remote.c
+++ b/remote.c
@@ -1149,7 +1149,7 @@ static void add_to_tips(struct tips *tips, const struct object_id *oid)
if (is_null_oid(oid))
return;
- commit = lookup_commit_reference_gently(oid, 1);
+ commit = lookup_commit_reference_gently(the_repository, oid, 1);
if (!commit || (commit->object.flags & TMP_MARK))
return;
commit->object.flags |= TMP_MARK;
@@ -1211,7 +1211,8 @@ static void add_missing_tags(struct ref *src, struct ref **dst, struct ref ***ds
if (is_null_oid(&ref->new_oid))
continue;
- commit = lookup_commit_reference_gently(&ref->new_oid,
+ commit = lookup_commit_reference_gently(the_repository,
+ &ref->new_oid,
1);
if (!commit)
/* not pushing a commit, which is not an error */
@@ -1435,8 +1436,8 @@ void set_ref_status_for_push(struct ref *remote_refs, int send_mirror,
reject_reason = REF_STATUS_REJECT_ALREADY_EXISTS;
else if (!has_object_file(&ref->old_oid))
reject_reason = REF_STATUS_REJECT_FETCH_FIRST;
- else if (!lookup_commit_reference_gently(&ref->old_oid, 1) ||
- !lookup_commit_reference_gently(&ref->new_oid, 1))
+ else if (!lookup_commit_reference_gently(the_repository, &ref->old_oid, 1) ||
+ !lookup_commit_reference_gently(the_repository, &ref->new_oid, 1))
reject_reason = REF_STATUS_REJECT_NEEDS_FORCE;
else if (!ref_newer(&ref->new_oid, &ref->old_oid))
reject_reason = REF_STATUS_REJECT_NONFASTFORWARD;
@@ -1688,11 +1689,18 @@ static struct ref *get_expanded_map(const struct ref *remote_refs,
static const struct ref *find_ref_by_name_abbrev(const struct ref *refs, const char *name)
{
const struct ref *ref;
+ const struct ref *best_match = NULL;
+ int best_score = 0;
+
for (ref = refs; ref; ref = ref->next) {
- if (refname_match(name, ref->name))
- return ref;
+ int score = refname_match(name, ref->name);
+
+ if (best_score < score) {
+ best_match = ref;
+ best_score = score;
+ }
}
- return NULL;
+ return best_match;
}
struct ref *get_remote_ref(const struct ref *remote_refs, const char *name)
@@ -1802,12 +1810,14 @@ int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid)
* Both new_commit and old_commit must be commit-ish and new_commit is descendant of
* old_commit. Otherwise we require --force.
*/
- o = deref_tag(parse_object(old_oid), NULL, 0);
+ o = deref_tag(the_repository, parse_object(the_repository, old_oid),
+ NULL, 0);
if (!o || o->type != OBJ_COMMIT)
return 0;
old_commit = (struct commit *) o;
- o = deref_tag(parse_object(new_oid), NULL, 0);
+ o = deref_tag(the_repository, parse_object(the_repository, new_oid),
+ NULL, 0);
if (!o || o->type != OBJ_COMMIT)
return 0;
new_commit = (struct commit *) o;
@@ -1865,13 +1875,13 @@ int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs,
/* Cannot stat if what we used to build on no longer exists */
if (read_ref(base, &oid))
return -1;
- theirs = lookup_commit_reference(&oid);
+ theirs = lookup_commit_reference(the_repository, &oid);
if (!theirs)
return -1;
if (read_ref(branch->refname, &oid))
return -1;
- ours = lookup_commit_reference(&oid);
+ ours = lookup_commit_reference(the_repository, &oid);
if (!ours)
return -1;
diff --git a/replace-object.c b/replace-object.c
index 801b5c1..4ec77ce 100644
--- a/replace-object.c
+++ b/replace-object.c
@@ -17,7 +17,7 @@ static int register_replace_ref(const char *refname,
if (get_oid_hex(hash, &repl_obj->original.oid)) {
free(repl_obj);
- warning("bad replace ref name: %s", refname);
+ warning(_("bad replace ref name: %s"), refname);
return 0;
}
@@ -26,7 +26,7 @@ static int register_replace_ref(const char *refname,
/* Register new object */
if (oidmap_put(the_repository->objects->replace_map, repl_obj))
- die("duplicate replace ref: %s", refname);
+ die(_("duplicate replace ref: %s"), refname);
return 0;
}
@@ -51,7 +51,7 @@ static void prepare_replace_object(struct repository *r)
* replacement object's name (replaced recursively, if necessary).
* The return value is either oid or a pointer to a
* permanently-allocated value. This function always respects replace
- * references, regardless of the value of check_replace_refs.
+ * references, regardless of the value of read_replace_refs.
*/
const struct object_id *do_lookup_replace_object(struct repository *r,
const struct object_id *oid)
@@ -69,5 +69,5 @@ const struct object_id *do_lookup_replace_object(struct repository *r,
return cur;
cur = &repl_obj->replacement;
}
- die("replace depth too high for object %s", oid_to_hex(oid));
+ die(_("replace depth too high for object %s"), oid_to_hex(oid));
}
diff --git a/replace-object.h b/replace-object.h
index f996de3..9345e10 100644
--- a/replace-object.h
+++ b/replace-object.h
@@ -26,7 +26,7 @@ extern const struct object_id *do_lookup_replace_object(struct repository *r,
static inline const struct object_id *lookup_replace_object(struct repository *r,
const struct object_id *oid)
{
- if (!check_replace_refs ||
+ if (!read_replace_refs ||
(r->objects->replace_map &&
r->objects->replace_map->map.tablesize == 0))
return oid;
diff --git a/repository.h b/repository.h
index b9413be..5896103 100644
--- a/repository.h
+++ b/repository.h
@@ -108,19 +108,16 @@ struct set_gitdir_args {
const char *alternate_db;
};
-extern void repo_set_gitdir(struct repository *repo,
- const char *root,
- const struct set_gitdir_args *extra_args);
-extern void repo_set_worktree(struct repository *repo, const char *path);
-extern void repo_set_hash_algo(struct repository *repo, int algo);
-extern void initialize_the_repository(void);
-extern int repo_init(struct repository *r,
- const char *gitdir,
- const char *worktree);
-extern int repo_submodule_init(struct repository *submodule,
- struct repository *superproject,
- const char *path);
-extern void repo_clear(struct repository *repo);
+void repo_set_gitdir(struct repository *repo, const char *root,
+ const struct set_gitdir_args *extra_args);
+void repo_set_worktree(struct repository *repo, const char *path);
+void repo_set_hash_algo(struct repository *repo, int algo);
+void initialize_the_repository(void);
+int repo_init(struct repository *r, const char *gitdir, const char *worktree);
+int repo_submodule_init(struct repository *submodule,
+ struct repository *superproject,
+ const char *path);
+void repo_clear(struct repository *repo);
/*
* Populates the repository's index from its index_file, an index struct will
@@ -130,6 +127,6 @@ extern void repo_clear(struct repository *repo);
* than zero if an error occured. If the repository's index has already been
* populated then the number of entries will simply be returned.
*/
-extern int repo_read_index(struct repository *repo);
+int repo_read_index(struct repository *repo);
#endif /* REPOSITORY_H */
diff --git a/rerere.h b/rerere.h
index c2961fe..cd948f2 100644
--- a/rerere.h
+++ b/rerere.h
@@ -22,19 +22,19 @@ struct rerere_id {
int variant;
};
-extern int setup_rerere(struct string_list *, int);
-extern int rerere(int);
+int setup_rerere(struct string_list *, int);
+int rerere(int);
/*
* Given the conflict ID and the name of a "file" used for replaying
* the recorded resolution (e.g. "preimage", "postimage"), return the
* path to that filesystem entity. With "file" specified with NULL,
* return the path to the directory that houses these files.
*/
-extern const char *rerere_path(const struct rerere_id *, const char *file);
-extern int rerere_forget(struct pathspec *);
-extern int rerere_remaining(struct string_list *);
-extern void rerere_clear(struct string_list *);
-extern void rerere_gc(struct string_list *);
+const char *rerere_path(const struct rerere_id *, const char *file);
+int rerere_forget(struct pathspec *);
+int rerere_remaining(struct string_list *);
+void rerere_clear(struct string_list *);
+void rerere_gc(struct string_list *);
#define OPT_RERERE_AUTOUPDATE(v) OPT_UYN(0, "rerere-autoupdate", (v), \
N_("update the index with reused conflict resolution if possible"))
diff --git a/resolve-undo.c b/resolve-undo.c
index fc5b3b8..c30ae5c 100644
--- a/resolve-undo.c
+++ b/resolve-undo.c
@@ -146,7 +146,9 @@ int unmerge_index_entry_at(struct index_state *istate, int pos)
struct cache_entry *nce;
if (!ru->mode[i])
continue;
- nce = make_cache_entry(ru->mode[i], ru->oid[i].hash,
+ nce = make_cache_entry(istate,
+ ru->mode[i],
+ &ru->oid[i],
name, i + 1, 0);
if (matched)
nce->ce_flags |= CE_MATCHED;
diff --git a/revision.c b/revision.c
index a257039..0627494 100644
--- a/revision.c
+++ b/revision.c
@@ -63,10 +63,10 @@ static void mark_tree_contents_uninteresting(struct tree *tree)
while (tree_entry(&desc, &entry)) {
switch (object_type(entry.mode)) {
case OBJ_TREE:
- mark_tree_uninteresting(lookup_tree(entry.oid));
+ mark_tree_uninteresting(lookup_tree(the_repository, entry.oid));
break;
case OBJ_BLOB:
- mark_blob_uninteresting(lookup_blob(entry.oid));
+ mark_blob_uninteresting(lookup_blob(the_repository, entry.oid));
break;
default:
/* Subproject commit - not in this repository */
@@ -198,7 +198,7 @@ void add_head_to_pending(struct rev_info *revs)
struct object *obj;
if (get_oid("HEAD", &oid))
return;
- obj = parse_object(&oid);
+ obj = parse_object(the_repository, &oid);
if (!obj)
return;
add_pending_object(revs, obj, "HEAD");
@@ -210,7 +210,7 @@ static struct object *get_reference(struct rev_info *revs, const char *name,
{
struct object *object;
- object = parse_object(oid);
+ object = parse_object(the_repository, oid);
if (!object) {
if (revs->ignore_missing)
return object;
@@ -247,10 +247,13 @@ static struct commit *handle_commit(struct rev_info *revs,
add_pending_object(revs, object, tag->tag);
if (!tag->tagged)
die("bad tag");
- object = parse_object(&tag->tagged->oid);
+ object = parse_object(the_repository, &tag->tagged->oid);
if (!object) {
if (revs->ignore_missing_links || (flags & UNINTERESTING))
return NULL;
+ if (revs->exclude_promisor_objects &&
+ is_promisor_object(&tag->tagged->oid))
+ return NULL;
die("bad object %s", oid_to_hex(&tag->tagged->oid));
}
object->flags |= flags;
@@ -1250,7 +1253,7 @@ static void handle_one_reflog_commit(struct object_id *oid, void *cb_data)
{
struct all_refs_cb *cb = cb_data;
if (!is_null_oid(oid)) {
- struct object *o = parse_object(oid);
+ struct object *o = parse_object(the_repository, oid);
if (o) {
o->flags |= cb->all_flags;
/* ??? CMDLINEFLAGS ??? */
@@ -1323,7 +1326,7 @@ static void add_cache_tree(struct cache_tree *it, struct rev_info *revs,
int i;
if (it->entry_count >= 0) {
- struct tree *tree = lookup_tree(&it->oid);
+ struct tree *tree = lookup_tree(the_repository, &it->oid);
add_pending_object_with_path(revs, &tree->object, "",
040000, path->buf);
}
@@ -1349,7 +1352,7 @@ static void do_add_index_objects_to_pending(struct rev_info *revs,
if (S_ISGITLINK(ce->ce_mode))
continue;
- blob = lookup_blob(&ce->oid);
+ blob = lookup_blob(the_repository, &ce->oid);
if (!blob)
die("unable to add index blob to traversal");
add_pending_object_with_path(revs, &blob->object, "",
@@ -1578,8 +1581,8 @@ static int handle_dotdot_1(const char *arg, char *dotdot,
*dotdot = '\0';
}
- a_obj = parse_object(&a_oid);
- b_obj = parse_object(&b_oid);
+ a_obj = parse_object(the_repository, &a_oid);
+ b_obj = parse_object(the_repository, &b_oid);
if (!a_obj || !b_obj)
return dotdot_missing(arg, dotdot, revs, symmetric);
@@ -1592,8 +1595,8 @@ static int handle_dotdot_1(const char *arg, char *dotdot,
struct commit *a, *b;
struct commit_list *exclude;
- a = lookup_commit_reference(&a_obj->oid);
- b = lookup_commit_reference(&b_obj->oid);
+ a = lookup_commit_reference(the_repository, &a_obj->oid);
+ b = lookup_commit_reference(the_repository, &b_obj->oid);
if (!a || !b)
return dotdot_missing(arg, dotdot, revs, symmetric);
@@ -2884,7 +2887,7 @@ static int mark_uninteresting(const struct object_id *oid,
uint32_t pos,
void *unused)
{
- struct object *o = parse_object(oid);
+ struct object *o = parse_object(the_repository, oid);
o->flags |= UNINTERESTING | SEEN;
return 0;
}
diff --git a/revision.h b/revision.h
index c599c34..51bb57d 100644
--- a/revision.h
+++ b/revision.h
@@ -230,7 +230,7 @@ struct rev_info {
struct revision_sources *sources;
};
-extern int ref_excluded(struct string_list *, const char *path);
+int ref_excluded(struct string_list *, const char *path);
void clear_ref_exclusion(struct string_list **);
void add_ref_exclusion(struct string_list **, const char *exclude);
@@ -252,39 +252,39 @@ struct setup_revision_opt {
unsigned revarg_opt;
};
-extern void init_revisions(struct rev_info *revs, const char *prefix);
-extern int setup_revisions(int argc, const char **argv, struct rev_info *revs,
- struct setup_revision_opt *);
-extern void parse_revision_opt(struct rev_info *revs, struct parse_opt_ctx_t *ctx,
- const struct option *options,
- const char * const usagestr[]);
+void init_revisions(struct rev_info *revs, const char *prefix);
+int setup_revisions(int argc, const char **argv, struct rev_info *revs,
+ struct setup_revision_opt *);
+void parse_revision_opt(struct rev_info *revs, struct parse_opt_ctx_t *ctx,
+ const struct option *options,
+ const char * const usagestr[]);
#define REVARG_CANNOT_BE_FILENAME 01
#define REVARG_COMMITTISH 02
-extern int handle_revision_arg(const char *arg, struct rev_info *revs,
- int flags, unsigned revarg_opt);
+int handle_revision_arg(const char *arg, struct rev_info *revs,
+ int flags, unsigned revarg_opt);
-extern void reset_revision_walk(void);
-extern int prepare_revision_walk(struct rev_info *revs);
-extern struct commit *get_revision(struct rev_info *revs);
-extern char *get_revision_mark(const struct rev_info *revs,
- const struct commit *commit);
-extern void put_revision_mark(const struct rev_info *revs,
- const struct commit *commit);
+void reset_revision_walk(void);
+int prepare_revision_walk(struct rev_info *revs);
+struct commit *get_revision(struct rev_info *revs);
+char *get_revision_mark(const struct rev_info *revs,
+ const struct commit *commit);
+void put_revision_mark(const struct rev_info *revs,
+ const struct commit *commit);
-extern void mark_parents_uninteresting(struct commit *commit);
-extern void mark_tree_uninteresting(struct tree *tree);
+void mark_parents_uninteresting(struct commit *commit);
+void mark_tree_uninteresting(struct tree *tree);
-extern void show_object_with_name(FILE *, struct object *, const char *);
+void show_object_with_name(FILE *, struct object *, const char *);
-extern void add_pending_object(struct rev_info *revs,
- struct object *obj, const char *name);
-extern void add_pending_oid(struct rev_info *revs,
- const char *name, const struct object_id *oid,
- unsigned int flags);
+void add_pending_object(struct rev_info *revs,
+ struct object *obj, const char *name);
+void add_pending_oid(struct rev_info *revs,
+ const char *name, const struct object_id *oid,
+ unsigned int flags);
-extern void add_head_to_pending(struct rev_info *);
-extern void add_reflogs_to_pending(struct rev_info *, unsigned int flags);
-extern void add_index_objects_to_pending(struct rev_info *, unsigned int flags);
+void add_head_to_pending(struct rev_info *);
+void add_reflogs_to_pending(struct rev_info *, unsigned int flags);
+void add_index_objects_to_pending(struct rev_info *, unsigned int flags);
enum commit_action {
commit_ignore,
@@ -292,10 +292,10 @@ enum commit_action {
commit_error
};
-extern enum commit_action get_commit_action(struct rev_info *revs,
- struct commit *commit);
-extern enum commit_action simplify_commit(struct rev_info *revs,
- struct commit *commit);
+enum commit_action get_commit_action(struct rev_info *revs,
+ struct commit *commit);
+enum commit_action simplify_commit(struct rev_info *revs,
+ struct commit *commit);
enum rewrite_result {
rewrite_one_ok,
@@ -305,8 +305,9 @@ enum rewrite_result {
typedef enum rewrite_result (*rewrite_parent_fn_t)(struct rev_info *revs, struct commit **pp);
-extern int rewrite_parents(struct rev_info *revs, struct commit *commit,
- rewrite_parent_fn_t rewrite_parent);
+int rewrite_parents(struct rev_info *revs,
+ struct commit *commit,
+ rewrite_parent_fn_t rewrite_parent);
/*
* The log machinery saves the original parent list so that
@@ -317,6 +318,6 @@ extern int rewrite_parents(struct rev_info *revs, struct commit *commit,
* get_saved_parents() will transparently return commit->parents if
* history simplification is off.
*/
-extern struct commit_list *get_saved_parents(struct rev_info *revs, const struct commit *commit);
+struct commit_list *get_saved_parents(struct rev_info *revs, const struct commit *commit);
#endif
diff --git a/sequencer.c b/sequencer.c
index 16c1411..f74dafb 100644
--- a/sequencer.c
+++ b/sequencer.c
@@ -307,7 +307,7 @@ static const char *action_name(const struct replay_opts *opts)
case REPLAY_INTERACTIVE_REBASE:
return N_("rebase -i");
}
- die(_("Unknown action: %d"), opts->action);
+ die(_("unknown action: %d"), opts->action);
}
struct commit_message {
@@ -433,7 +433,7 @@ static int read_oneliner(struct strbuf *buf,
static struct tree *empty_tree(void)
{
- return lookup_tree(the_hash_algo->empty_tree);
+ return lookup_tree(the_repository, the_repository->hash_algo->empty_tree);
}
static int error_dirty_index(struct replay_opts *opts)
@@ -594,7 +594,7 @@ static int is_index_unchanged(void)
if (!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING, &head_oid, NULL))
return error(_("could not resolve HEAD commit"));
- head_commit = lookup_commit(&head_oid);
+ head_commit = lookup_commit(the_repository, &head_oid);
/*
* If head_commit is NULL, check_commit, called from
@@ -654,6 +654,7 @@ missing_author:
strbuf_addch(&buf, *(message++));
else
strbuf_addf(&buf, "'\\\\%c'", *(message++));
+ strbuf_addch(&buf, '\'');
res = write_message(buf.buf, buf.len, rebase_path_author_script(), 1);
strbuf_release(&buf);
return res;
@@ -708,43 +709,51 @@ static const char *read_author_ident(struct strbuf *buf)
const char *keys[] = {
"GIT_AUTHOR_NAME=", "GIT_AUTHOR_EMAIL=", "GIT_AUTHOR_DATE="
};
- char *in, *out, *eol;
- int i = 0, len;
+ struct strbuf out = STRBUF_INIT;
+ char *in, *eol;
+ const char *val[3];
+ int i = 0;
if (strbuf_read_file(buf, rebase_path_author_script(), 256) <= 0)
return NULL;
/* dequote values and construct ident line in-place */
- for (in = out = buf->buf; i < 3 && in - buf->buf < buf->len; i++) {
+ for (in = buf->buf; i < 3 && in - buf->buf < buf->len; i++) {
if (!skip_prefix(in, keys[i], (const char **)&in)) {
- warning("could not parse '%s' (looking for '%s'",
+ warning(_("could not parse '%s' (looking for '%s'"),
rebase_path_author_script(), keys[i]);
return NULL;
}
eol = strchrnul(in, '\n');
*eol = '\0';
- sq_dequote(in);
- len = strlen(in);
-
- if (i > 0) /* separate values by spaces */
- *(out++) = ' ';
- if (i == 1) /* email needs to be surrounded by <...> */
- *(out++) = '<';
- memmove(out, in, len);
- out += len;
- if (i == 1) /* email needs to be surrounded by <...> */
- *(out++) = '>';
+ if (!sq_dequote(in)) {
+ warning(_("bad quoting on %s value in '%s'"),
+ keys[i], rebase_path_author_script());
+ return NULL;
+ }
+ val[i] = in;
in = eol + 1;
}
if (i < 3) {
- warning("could not parse '%s' (looking for '%s')",
+ warning(_("could not parse '%s' (looking for '%s')"),
rebase_path_author_script(), keys[i]);
return NULL;
}
- buf->len = out - buf->buf;
+ /* validate date since fmt_ident() will die() on bad value */
+ if (parse_date(val[2], &out)){
+ warning(_("invalid date format '%s' in '%s'"),
+ val[2], rebase_path_author_script());
+ strbuf_release(&out);
+ return NULL;
+ }
+
+ strbuf_reset(&out);
+ strbuf_addstr(&out, fmt_ident(val[0], val[1], val[2], 0));
+ strbuf_swap(buf, &out);
+ strbuf_release(&out);
return buf->buf;
}
@@ -1101,7 +1110,7 @@ void print_commit_summary(const char *prefix, const struct object_id *oid,
struct strbuf author_ident = STRBUF_INIT;
struct strbuf committer_ident = STRBUF_INIT;
- commit = lookup_commit(oid);
+ commit = lookup_commit(the_repository, oid);
if (!commit)
die(_("couldn't look up newly created commit"));
if (parse_commit(commit))
@@ -1176,7 +1185,7 @@ static int parse_head(struct commit **head)
if (get_oid("HEAD", &oid)) {
current_head = NULL;
} else {
- current_head = lookup_commit_reference(&oid);
+ current_head = lookup_commit_reference(the_repository, &oid);
if (!current_head)
return error(_("could not parse HEAD"));
if (oidcmp(&oid, &current_head->object.oid)) {
@@ -1445,7 +1454,7 @@ static const char *command_to_string(const enum todo_command command)
{
if (command < TODO_COMMENT)
return todo_command_info[command].str;
- die("Unknown command: %d", command);
+ die(_("unknown command: %d"), command);
}
static char command_to_char(const enum todo_command command)
@@ -1511,7 +1520,7 @@ static int update_squash_messages(enum todo_command command,
if (get_oid("HEAD", &head))
return error(_("need a HEAD to fixup"));
- if (!(head_commit = lookup_commit_reference(&head)))
+ if (!(head_commit = lookup_commit_reference(the_repository, &head)))
return error(_("could not read HEAD"));
if (!(head_message = get_commit_buffer(head_commit, NULL)))
return error(_("could not read HEAD's commit message"));
@@ -2007,7 +2016,7 @@ static int parse_insn_line(struct todo_item *item, const char *bol, char *eol)
if (status < 0)
return -1;
- item->commit = lookup_commit_reference(&commit_oid);
+ item->commit = lookup_commit_reference(the_repository, &commit_oid);
return !item->commit;
}
@@ -2608,15 +2617,17 @@ static int error_with_patch(struct commit *commit,
if (intend_to_amend())
return -1;
- fprintf(stderr, "You can amend the commit now, with\n"
- "\n"
- " git commit --amend %s\n"
- "\n"
- "Once you are satisfied with your changes, run\n"
- "\n"
- " git rebase --continue\n", gpg_sign_opt_quoted(opts));
+ fprintf(stderr,
+ _("You can amend the commit now, with\n"
+ "\n"
+ " git commit --amend %s\n"
+ "\n"
+ "Once you are satisfied with your changes, run\n"
+ "\n"
+ " git rebase --continue\n"),
+ gpg_sign_opt_quoted(opts));
} else if (exit_code)
- fprintf(stderr, "Could not apply %s... %.*s\n",
+ fprintf_ln(stderr, _("Could not apply %s... %.*s"),
short_commit_name(commit), subject_len, subject);
return exit_code;
@@ -2645,6 +2656,8 @@ static int do_exec(const char *command_line)
fprintf(stderr, "Executing: %s\n", command_line);
child_argv[0] = command_line;
argv_array_pushf(&child_env, "GIT_DIR=%s", absolute_path(get_git_dir()));
+ argv_array_pushf(&child_env, "GIT_WORK_TREE=%s",
+ absolute_path(get_git_work_tree()));
status = run_command_v_opt_cd_env(child_argv, RUN_USING_SHELL, NULL,
child_env.argv);
@@ -2728,7 +2741,7 @@ static int do_label(const char *name, int len)
struct object_id head_oid;
if (len == 1 && *name == '#')
- return error("Illegal label name: '%.*s'", len, name);
+ return error(_("illegal label name: '%.*s'"), len, name);
strbuf_addf(&ref_name, "refs/rewritten/%.*s", len, name);
strbuf_addf(&msg, "rebase -i (label) '%.*s'", len, name);
@@ -2850,6 +2863,26 @@ static int do_reset(const char *name, int len, struct replay_opts *opts)
return ret;
}
+static struct commit *lookup_label(const char *label, int len,
+ struct strbuf *buf)
+{
+ struct commit *commit;
+
+ strbuf_reset(buf);
+ strbuf_addf(buf, "refs/rewritten/%.*s", len, label);
+ commit = lookup_commit_reference_by_name(buf->buf);
+ if (!commit) {
+ /* fall back to non-rewritten ref or commit */
+ strbuf_splice(buf, 0, strlen("refs/rewritten/"), "", 0);
+ commit = lookup_commit_reference_by_name(buf->buf);
+ }
+
+ if (!commit)
+ error(_("could not resolve '%s'"), buf->buf);
+
+ return commit;
+}
+
static int do_merge(struct commit *commit, const char *arg, int arg_len,
int flags, struct replay_opts *opts)
{
@@ -2858,8 +2891,9 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len,
struct strbuf ref_name = STRBUF_INIT;
struct commit *head_commit, *merge_commit, *i;
struct commit_list *bases, *j, *reversed = NULL;
+ struct commit_list *to_merge = NULL, **tail = &to_merge;
struct merge_options o;
- int merge_arg_len, oneline_offset, can_fast_forward, ret;
+ int merge_arg_len, oneline_offset, can_fast_forward, ret, k;
static struct lock_file lock;
const char *p;
@@ -2874,26 +2908,34 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len,
goto leave_merge;
}
- oneline_offset = arg_len;
- merge_arg_len = strcspn(arg, " \t\n");
- p = arg + merge_arg_len;
- p += strspn(p, " \t\n");
- if (*p == '#' && (!p[1] || isspace(p[1]))) {
- p += 1 + strspn(p + 1, " \t\n");
- oneline_offset = p - arg;
- } else if (p - arg < arg_len)
- BUG("octopus merges are not supported yet: '%s'", p);
-
- strbuf_addf(&ref_name, "refs/rewritten/%.*s", merge_arg_len, arg);
- merge_commit = lookup_commit_reference_by_name(ref_name.buf);
- if (!merge_commit) {
- /* fall back to non-rewritten ref or commit */
- strbuf_splice(&ref_name, 0, strlen("refs/rewritten/"), "", 0);
- merge_commit = lookup_commit_reference_by_name(ref_name.buf);
+ /*
+ * For octopus merges, the arg starts with the list of revisions to be
+ * merged. The list is optionally followed by '#' and the oneline.
+ */
+ merge_arg_len = oneline_offset = arg_len;
+ for (p = arg; p - arg < arg_len; p += strspn(p, " \t\n")) {
+ if (!*p)
+ break;
+ if (*p == '#' && (!p[1] || isspace(p[1]))) {
+ p += 1 + strspn(p + 1, " \t\n");
+ oneline_offset = p - arg;
+ break;
+ }
+ k = strcspn(p, " \t\n");
+ if (!k)
+ continue;
+ merge_commit = lookup_label(p, k, &ref_name);
+ if (!merge_commit) {
+ ret = error(_("unable to parse '%.*s'"), k, p);
+ goto leave_merge;
+ }
+ tail = &commit_list_insert(merge_commit, tail)->next;
+ p += k;
+ merge_arg_len = p - arg;
}
- if (!merge_commit) {
- ret = error(_("could not resolve '%s'"), ref_name.buf);
+ if (!to_merge) {
+ ret = error(_("nothing to merge: '%.*s'"), arg_len, arg);
goto leave_merge;
}
@@ -2904,8 +2946,13 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len,
* "[new root]", let's simply fast-forward to the merge head.
*/
rollback_lock_file(&lock);
- ret = fast_forward_to(&merge_commit->object.oid,
- &head_commit->object.oid, 0, opts);
+ if (to_merge->next)
+ ret = error(_("octopus merge cannot be executed on "
+ "top of a [new root]"));
+ else
+ ret = fast_forward_to(&to_merge->item->object.oid,
+ &head_commit->object.oid, 0,
+ opts);
goto leave_merge;
}
@@ -2941,7 +2988,8 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len,
p = arg + oneline_offset;
len = arg_len - oneline_offset;
} else {
- strbuf_addf(&buf, "Merge branch '%.*s'",
+ strbuf_addf(&buf, "Merge %s '%.*s'",
+ to_merge->next ? "branches" : "branch",
merge_arg_len, arg);
p = buf.buf;
len = buf.len;
@@ -2965,28 +3013,76 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len,
&head_commit->object.oid);
/*
- * If the merge head is different from the original one, we cannot
+ * If any merge head is different from the original one, we cannot
* fast-forward.
*/
if (can_fast_forward) {
- struct commit_list *second_parent = commit->parents->next;
+ struct commit_list *p = commit->parents->next;
- if (second_parent && !second_parent->next &&
- oidcmp(&merge_commit->object.oid,
- &second_parent->item->object.oid))
+ for (j = to_merge; j && p; j = j->next, p = p->next)
+ if (oidcmp(&j->item->object.oid,
+ &p->item->object.oid)) {
+ can_fast_forward = 0;
+ break;
+ }
+ /*
+ * If the number of merge heads differs from the original merge
+ * commit, we cannot fast-forward.
+ */
+ if (j || p)
can_fast_forward = 0;
}
- if (can_fast_forward && commit->parents->next &&
- !commit->parents->next->next &&
- !oidcmp(&commit->parents->next->item->object.oid,
- &merge_commit->object.oid)) {
+ if (can_fast_forward) {
rollback_lock_file(&lock);
ret = fast_forward_to(&commit->object.oid,
&head_commit->object.oid, 0, opts);
goto leave_merge;
}
+ if (to_merge->next) {
+ /* Octopus merge */
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ if (read_env_script(&cmd.env_array)) {
+ const char *gpg_opt = gpg_sign_opt_quoted(opts);
+
+ ret = error(_(staged_changes_advice), gpg_opt, gpg_opt);
+ goto leave_merge;
+ }
+
+ cmd.git_cmd = 1;
+ argv_array_push(&cmd.args, "merge");
+ argv_array_push(&cmd.args, "-s");
+ argv_array_push(&cmd.args, "octopus");
+ argv_array_push(&cmd.args, "--no-edit");
+ argv_array_push(&cmd.args, "--no-ff");
+ argv_array_push(&cmd.args, "--no-log");
+ argv_array_push(&cmd.args, "--no-stat");
+ argv_array_push(&cmd.args, "-F");
+ argv_array_push(&cmd.args, git_path_merge_msg(the_repository));
+ if (opts->gpg_sign)
+ argv_array_push(&cmd.args, opts->gpg_sign);
+
+ /* Add the tips to be merged */
+ for (j = to_merge; j; j = j->next)
+ argv_array_push(&cmd.args,
+ oid_to_hex(&j->item->object.oid));
+
+ strbuf_release(&ref_name);
+ unlink(git_path_cherry_pick_head(the_repository));
+ rollback_lock_file(&lock);
+
+ rollback_lock_file(&lock);
+ ret = run_command(&cmd);
+
+ /* force re-reading of the cache */
+ if (!ret && (discard_cache() < 0 || read_cache() < 0))
+ ret = error(_("could not read index"));
+ goto leave_merge;
+ }
+
+ merge_commit = to_merge->item;
write_message(oid_to_hex(&merge_commit->object.oid), GIT_SHA1_HEXSZ,
git_path_merge_head(the_repository), 0);
write_message("no-ff", 5, git_path_merge_mode(the_repository), 0);
@@ -3049,6 +3145,7 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len,
leave_merge:
strbuf_release(&ref_name);
rollback_lock_file(&lock);
+ free_commit_list(to_merge);
return ret;
}
@@ -3634,7 +3731,7 @@ int sequencer_pick_revisions(struct replay_opts *opts)
continue;
if (!get_oid(name, &oid)) {
- if (!lookup_commit_reference_gently(&oid, 1)) {
+ if (!lookup_commit_reference_gently(the_repository, &oid, 1)) {
enum object_type type = oid_object_info(the_repository,
&oid,
NULL);
@@ -3905,7 +4002,6 @@ static int make_script_with_merges(struct pretty_print_context *pp,
*/
while ((commit = get_revision(revs))) {
struct commit_list *to_merge;
- int is_octopus;
const char *p1, *p2;
struct object_id *oid;
int is_empty;
@@ -3937,11 +4033,6 @@ static int make_script_with_merges(struct pretty_print_context *pp,
continue;
}
- is_octopus = to_merge && to_merge->next;
-
- if (is_octopus)
- BUG("Octopus merges not yet supported");
-
/* Create a label */
strbuf_reset(&label);
if (skip_prefix(oneline.buf, "Merge ", &p1) &&
@@ -3963,13 +4054,17 @@ static int make_script_with_merges(struct pretty_print_context *pp,
strbuf_addf(&buf, "%s -C %s",
cmd_merge, oid_to_hex(&commit->object.oid));
- /* label the tip of merged branch */
- oid = &to_merge->item->object.oid;
- strbuf_addch(&buf, ' ');
+ /* label the tips of merged branches */
+ for (; to_merge; to_merge = to_merge->next) {
+ oid = &to_merge->item->object.oid;
+ strbuf_addch(&buf, ' ');
+
+ if (!oidset_contains(&interesting, oid)) {
+ strbuf_addstr(&buf, label_oid(oid, NULL,
+ &state));
+ continue;
+ }
- if (!oidset_contains(&interesting, oid))
- strbuf_addstr(&buf, label_oid(oid, NULL, &state));
- else {
tips_tail = &commit_list_insert(to_merge->item,
tips_tail)->next;
diff --git a/server-info.c b/server-info.c
index 7ce6dcd..41050c2 100644
--- a/server-info.c
+++ b/server-info.c
@@ -56,7 +56,7 @@ static int add_info_ref(const char *path, const struct object_id *oid,
int flag, void *cb_data)
{
FILE *fp = cb_data;
- struct object *o = parse_object(oid);
+ struct object *o = parse_object(the_repository, oid);
if (!o)
return -1;
@@ -64,7 +64,7 @@ static int add_info_ref(const char *path, const struct object_id *oid,
return -1;
if (o->type == OBJ_TAG) {
- o = deref_tag(o, path, 0);
+ o = deref_tag(the_repository, o, path, 0);
if (o)
if (fprintf(fp, "%s %s^{}\n",
oid_to_hex(&o->oid), path) < 0)
diff --git a/sha1-file.c b/sha1-file.c
index de4839e..c6ca960 100644
--- a/sha1-file.c
+++ b/sha1-file.c
@@ -71,17 +71,17 @@ static void git_hash_sha1_final(unsigned char *hash, git_hash_ctx *ctx)
static void git_hash_unknown_init(git_hash_ctx *ctx)
{
- die("trying to init unknown hash");
+ BUG("trying to init unknown hash");
}
static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len)
{
- die("trying to update unknown hash");
+ BUG("trying to update unknown hash");
}
static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx)
{
- die("trying to finalize unknown hash");
+ BUG("trying to finalize unknown hash");
}
const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
@@ -336,7 +336,7 @@ out:
static void fill_sha1_path(struct strbuf *buf, const unsigned char *sha1)
{
int i;
- for (i = 0; i < 20; i++) {
+ for (i = 0; i < the_hash_algo->rawsz; i++) {
static char hex[] = "0123456789abcdef";
unsigned int val = sha1[i];
strbuf_addch(buf, hex[val >> 4]);
@@ -378,8 +378,8 @@ static int alt_odb_usable(struct raw_object_store *o,
/* Detect cases where alternate disappeared */
if (!is_directory(path->buf)) {
- error("object directory %s does not exist; "
- "check .git/objects/info/alternates.",
+ error(_("object directory %s does not exist; "
+ "check .git/objects/info/alternates"),
path->buf);
return 0;
}
@@ -429,7 +429,7 @@ static int link_alt_odb_entry(struct repository *r, const char *entry,
strbuf_addstr(&pathbuf, entry);
if (strbuf_normalize_path(&pathbuf) < 0 && relative_base) {
- error("unable to normalize alternate object path: %s",
+ error(_("unable to normalize alternate object path: %s"),
pathbuf.buf);
strbuf_release(&pathbuf);
return -1;
@@ -500,14 +500,14 @@ static void link_alt_odb_entries(struct repository *r, const char *alt,
return;
if (depth > 5) {
- error("%s: ignoring alternate object stores, nesting too deep.",
+ error(_("%s: ignoring alternate object stores, nesting too deep"),
relative_base);
return;
}
strbuf_add_absolute_path(&objdirbuf, r->objects->objectdir);
if (strbuf_normalize_path(&objdirbuf) < 0)
- die("unable to normalize object directory: %s",
+ die(_("unable to normalize object directory: %s"),
objdirbuf.buf);
while (*alt) {
@@ -562,7 +562,7 @@ void add_to_alternates_file(const char *reference)
hold_lock_file_for_update(&lock, alts, LOCK_DIE_ON_ERROR);
out = fdopen_lock_file(&lock, "w");
if (!out)
- die_errno("unable to fdopen alternates lockfile");
+ die_errno(_("unable to fdopen alternates lockfile"));
in = fopen(alts, "r");
if (in) {
@@ -580,14 +580,14 @@ void add_to_alternates_file(const char *reference)
fclose(in);
}
else if (errno != ENOENT)
- die_errno("unable to read alternates file");
+ die_errno(_("unable to read alternates file"));
if (found) {
rollback_lock_file(&lock);
} else {
fprintf_or_die(out, "%s\n", reference);
if (commit_lock_file(&lock))
- die_errno("unable to move new alternates file into place");
+ die_errno(_("unable to move new alternates file into place"));
if (the_repository->objects->alt_odb_tail)
link_alt_odb_entries(the_repository, reference,
'\n', NULL, 0);
@@ -778,7 +778,7 @@ static void mmap_limit_check(size_t length)
limit = SIZE_MAX;
}
if (length > limit)
- die("attempting to mmap %"PRIuMAX" over limit %"PRIuMAX,
+ die(_("attempting to mmap %"PRIuMAX" over limit %"PRIuMAX),
(uintmax_t)length, (uintmax_t)limit);
}
@@ -803,7 +803,7 @@ void *xmmap(void *start, size_t length,
{
void *ret = xmmap_gently(start, length, prot, flags, fd, offset);
if (ret == MAP_FAILED)
- die_errno("mmap failed");
+ die_errno(_("mmap failed"));
return ret;
}
@@ -970,7 +970,7 @@ static void *map_sha1_file_1(struct repository *r, const char *path,
*size = xsize_t(st.st_size);
if (!*size) {
/* mmap() is forbidden on empty files */
- error("object file %s is empty", path);
+ error(_("object file %s is empty"), path);
return NULL;
}
map = xmmap(NULL, *size, PROT_READ, MAP_PRIVATE, fd, 0);
@@ -1090,9 +1090,9 @@ static void *unpack_sha1_rest(git_zstream *stream, void *buffer, unsigned long s
}
if (status < 0)
- error("corrupt loose object '%s'", sha1_to_hex(sha1));
+ error(_("corrupt loose object '%s'"), sha1_to_hex(sha1));
else if (stream->avail_in)
- error("garbage at end of loose object '%s'",
+ error(_("garbage at end of loose object '%s'"),
sha1_to_hex(sha1));
free(buf);
return NULL;
@@ -1134,7 +1134,7 @@ static int parse_sha1_header_extended(const char *hdr, struct object_info *oi,
if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE) && (type < 0))
type = 0;
else if (type < 0)
- die("invalid object type");
+ die(_("invalid object type"));
if (oi->typep)
*oi->typep = type;
@@ -1216,19 +1216,19 @@ static int sha1_loose_object_info(struct repository *r,
*oi->disk_sizep = mapsize;
if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE)) {
if (unpack_sha1_header_to_strbuf(&stream, map, mapsize, hdr, sizeof(hdr), &hdrbuf) < 0)
- status = error("unable to unpack %s header with --allow-unknown-type",
+ status = error(_("unable to unpack %s header with --allow-unknown-type"),
sha1_to_hex(sha1));
} else if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0)
- status = error("unable to unpack %s header",
+ status = error(_("unable to unpack %s header"),
sha1_to_hex(sha1));
if (status < 0)
; /* Do nothing */
else if (hdrbuf.len) {
if ((status = parse_sha1_header_extended(hdrbuf.buf, oi, flags)) < 0)
- status = error("unable to parse %s header with --allow-unknown-type",
+ status = error(_("unable to parse %s header with --allow-unknown-type"),
sha1_to_hex(sha1));
} else if ((status = parse_sha1_header_extended(hdr, oi, flags)) < 0)
- status = error("unable to parse %s header", sha1_to_hex(sha1));
+ status = error(_("unable to parse %s header"), sha1_to_hex(sha1));
if (status >= 0 && oi->contentp) {
*oi->contentp = unpack_sha1_rest(&stream, hdr,
@@ -1419,19 +1419,19 @@ void *read_object_file_extended(const struct object_id *oid,
return data;
if (errno && errno != ENOENT)
- die_errno("failed to read object %s", oid_to_hex(oid));
+ die_errno(_("failed to read object %s"), oid_to_hex(oid));
/* die if we replaced an object with one that does not exist */
if (repl != oid)
- die("replacement %s not found for %s",
+ die(_("replacement %s not found for %s"),
oid_to_hex(repl), oid_to_hex(oid));
if (!stat_sha1_file(the_repository, repl->hash, &st, &path))
- die("loose object %s (stored in %s) is corrupt",
+ die(_("loose object %s (stored in %s) is corrupt"),
oid_to_hex(repl), path);
if ((p = has_packed_and_bad(repl->hash)) != NULL)
- die("packed object %s (stored in %s) is corrupt",
+ die(_("packed object %s (stored in %s) is corrupt"),
oid_to_hex(repl), p->pack_name);
return NULL;
@@ -1473,7 +1473,7 @@ void *read_object_with_reference(const struct object_id *oid,
}
ref_length = strlen(ref_type);
- if (ref_length + GIT_SHA1_HEXSZ > isize ||
+ if (ref_length + the_hash_algo->hexsz > isize ||
memcmp(buffer, ref_type, ref_length) ||
get_oid_hex((char *) buffer + ref_length, &actual_oid)) {
free(buffer);
@@ -1533,21 +1533,21 @@ int finalize_object_file(const char *tmpfile, const char *filename)
unlink_or_warn(tmpfile);
if (ret) {
if (ret != EEXIST) {
- return error_errno("unable to write sha1 filename %s", filename);
+ return error_errno(_("unable to write sha1 filename %s"), filename);
}
/* FIXME!!! Collision check here ? */
}
out:
if (adjust_shared_perm(filename))
- return error("unable to set permission to '%s'", filename);
+ return error(_("unable to set permission to '%s'"), filename);
return 0;
}
static int write_buffer(int fd, const void *buf, size_t len)
{
if (write_in_full(fd, buf, len) < 0)
- return error_errno("file write error");
+ return error_errno(_("file write error"));
return 0;
}
@@ -1566,7 +1566,7 @@ static void close_sha1_file(int fd)
if (fsync_object_files)
fsync_or_die(fd, "sha1 file");
if (close(fd) != 0)
- die_errno("error when closing sha1 file");
+ die_errno(_("error when closing sha1 file"));
}
/* Size of directory component, including the ending '/' */
@@ -1632,9 +1632,9 @@ static int write_loose_object(const struct object_id *oid, char *hdr,
fd = create_tmpfile(&tmp_file, filename.buf);
if (fd < 0) {
if (errno == EACCES)
- return error("insufficient permission for adding an object to repository database %s", get_object_directory());
+ return error(_("insufficient permission for adding an object to repository database %s"), get_object_directory());
else
- return error_errno("unable to create temporary file");
+ return error_errno(_("unable to create temporary file"));
}
/* Set it up */
@@ -1658,21 +1658,21 @@ static int write_loose_object(const struct object_id *oid, char *hdr,
ret = git_deflate(&stream, Z_FINISH);
the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
- die("unable to write sha1 file");
+ die(_("unable to write sha1 file"));
stream.next_out = compressed;
stream.avail_out = sizeof(compressed);
} while (ret == Z_OK);
if (ret != Z_STREAM_END)
- die("unable to deflate new object %s (%d)", oid_to_hex(oid),
+ die(_("unable to deflate new object %s (%d)"), oid_to_hex(oid),
ret);
ret = git_deflate_end_gently(&stream);
if (ret != Z_OK)
- die("deflateEnd on object %s failed (%d)", oid_to_hex(oid),
+ die(_("deflateEnd on object %s failed (%d)"), oid_to_hex(oid),
ret);
the_hash_algo->final_fn(parano_oid.hash, &c);
if (oidcmp(oid, &parano_oid) != 0)
- die("confused by unstable object source data for %s",
+ die(_("confused by unstable object source data for %s"),
oid_to_hex(oid));
close_sha1_file(fd);
@@ -1682,7 +1682,7 @@ static int write_loose_object(const struct object_id *oid, char *hdr,
utb.actime = mtime;
utb.modtime = mtime;
if (utime(tmp_file.buf, &utb) < 0)
- warning_errno("failed utime() on %s", tmp_file.buf);
+ warning_errno(_("failed utime() on %s"), tmp_file.buf);
}
return finalize_object_file(tmp_file.buf, filename.buf);
@@ -1757,7 +1757,7 @@ int force_object_loose(const struct object_id *oid, time_t mtime)
return 0;
buf = read_object(oid->hash, &type, &len);
if (!buf)
- return error("cannot read sha1_file for %s", oid_to_hex(oid));
+ return error(_("cannot read sha1_file for %s"), oid_to_hex(oid));
hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
free(buf);
@@ -1801,16 +1801,16 @@ static void check_commit(const void *buf, size_t size)
{
struct commit c;
memset(&c, 0, sizeof(c));
- if (parse_commit_buffer(&c, buf, size, 0))
- die("corrupt commit");
+ if (parse_commit_buffer(the_repository, &c, buf, size, 0))
+ die(_("corrupt commit"));
}
static void check_tag(const void *buf, size_t size)
{
struct tag t;
memset(&t, 0, sizeof(t));
- if (parse_tag_buffer(&t, buf, size))
- die("corrupt tag");
+ if (parse_tag_buffer(the_repository, &t, buf, size))
+ die(_("corrupt tag"));
}
static int index_mem(struct object_id *oid, void *buf, size_t size,
@@ -1903,10 +1903,10 @@ static int index_core(struct object_id *oid, int fd, size_t size,
char *buf = xmalloc(size);
ssize_t read_result = read_in_full(fd, buf, size);
if (read_result < 0)
- ret = error_errno("read error while indexing %s",
+ ret = error_errno(_("read error while indexing %s"),
path ? path : "<unknown>");
else if (read_result != size)
- ret = error("short read while indexing %s",
+ ret = error(_("short read while indexing %s"),
path ? path : "<unknown>");
else
ret = index_mem(oid, buf, size, type, path, flags);
@@ -1977,7 +1977,7 @@ int index_path(struct object_id *oid, const char *path, struct stat *st, unsigne
if (fd < 0)
return error_errno("open(\"%s\")", path);
if (index_fd(oid, fd, st, OBJ_BLOB, path, flags) < 0)
- return error("%s: failed to insert into database",
+ return error(_("%s: failed to insert into database"),
path);
break;
case S_IFLNK:
@@ -1986,13 +1986,13 @@ int index_path(struct object_id *oid, const char *path, struct stat *st, unsigne
if (!(flags & HASH_WRITE_OBJECT))
hash_object_file(sb.buf, sb.len, blob_type, oid);
else if (write_object_file(sb.buf, sb.len, blob_type, oid))
- rc = error("%s: failed to insert into database", path);
+ rc = error(_("%s: failed to insert into database"), path);
strbuf_release(&sb);
break;
case S_IFDIR:
return resolve_gitlink_ref(path, "HEAD", oid);
default:
- return error("%s: unsupported file type", path);
+ return error(_("%s: unsupported file type"), path);
}
return rc;
}
@@ -2016,9 +2016,9 @@ void assert_oid_type(const struct object_id *oid, enum object_type expect)
{
enum object_type type = oid_object_info(the_repository, oid, NULL);
if (type < 0)
- die("%s is not a valid object", oid_to_hex(oid));
+ die(_("%s is not a valid object"), oid_to_hex(oid));
if (type != expect)
- die("%s is not a valid '%s' object", oid_to_hex(oid),
+ die(_("%s is not a valid '%s' object"), oid_to_hex(oid),
type_name(expect));
}
@@ -2045,7 +2045,7 @@ int for_each_file_in_obj_subdir(unsigned int subdir_nr,
dir = opendir(path->buf);
if (!dir) {
if (errno != ENOENT)
- r = error_errno("unable to open %s", path->buf);
+ r = error_errno(_("unable to open %s"), path->buf);
strbuf_setlen(path, origlen);
return r;
}
@@ -2062,9 +2062,9 @@ int for_each_file_in_obj_subdir(unsigned int subdir_nr,
namelen = strlen(de->d_name);
strbuf_setlen(path, baselen);
strbuf_add(path, de->d_name, namelen);
- if (namelen == GIT_SHA1_HEXSZ - 2 &&
+ if (namelen == the_hash_algo->hexsz - 2 &&
!hex_to_bytes(oid.hash + 1, de->d_name,
- GIT_SHA1_RAWSZ - 1)) {
+ the_hash_algo->rawsz - 1)) {
if (obj_cb) {
r = obj_cb(&oid, path->buf, data);
if (r)
@@ -2202,18 +2202,18 @@ static int check_stream_sha1(git_zstream *stream,
git_inflate_end(stream);
if (status != Z_STREAM_END) {
- error("corrupt loose object '%s'", sha1_to_hex(expected_sha1));
+ error(_("corrupt loose object '%s'"), sha1_to_hex(expected_sha1));
return -1;
}
if (stream->avail_in) {
- error("garbage at end of loose object '%s'",
+ error(_("garbage at end of loose object '%s'"),
sha1_to_hex(expected_sha1));
return -1;
}
the_hash_algo->final_fn(real_sha1, &c);
if (hashcmp(expected_sha1, real_sha1)) {
- error("sha1 mismatch for %s (expected %s)", path,
+ error(_("sha1 mismatch for %s (expected %s)"), path,
sha1_to_hex(expected_sha1));
return -1;
}
@@ -2237,18 +2237,18 @@ int read_loose_object(const char *path,
map = map_sha1_file_1(the_repository, path, NULL, &mapsize);
if (!map) {
- error_errno("unable to mmap %s", path);
+ error_errno(_("unable to mmap %s"), path);
goto out;
}
if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0) {
- error("unable to unpack header of %s", path);
+ error(_("unable to unpack header of %s"), path);
goto out;
}
*type = parse_sha1_header(hdr, size);
if (*type < 0) {
- error("unable to parse header of %s", path);
+ error(_("unable to parse header of %s"), path);
git_inflate_end(&stream);
goto out;
}
@@ -2259,13 +2259,13 @@ int read_loose_object(const char *path,
} else {
*contents = unpack_sha1_rest(&stream, hdr, *size, expected_oid->hash);
if (!*contents) {
- error("unable to unpack contents of %s", path);
+ error(_("unable to unpack contents of %s"), path);
git_inflate_end(&stream);
goto out;
}
if (check_object_signature(expected_oid, *contents,
*size, type_name(*type))) {
- error("sha1 mismatch for %s (expected %s)", path,
+ error(_("sha1 mismatch for %s (expected %s)"), path,
oid_to_hex(expected_oid));
free(*contents);
goto out;
diff --git a/sha1-name.c b/sha1-name.c
index 641ca12..c9cc131 100644
--- a/sha1-name.c
+++ b/sha1-name.c
@@ -239,7 +239,8 @@ static int disambiguate_committish_only(const struct object_id *oid, void *cb_da
return 0;
/* We need to do this the hard way... */
- obj = deref_tag(parse_object(oid), NULL, 0);
+ obj = deref_tag(the_repository, parse_object(the_repository, oid),
+ NULL, 0);
if (obj && obj->type == OBJ_COMMIT)
return 1;
return 0;
@@ -263,7 +264,8 @@ static int disambiguate_treeish_only(const struct object_id *oid, void *cb_data_
return 0;
/* We need to do this the hard way... */
- obj = deref_tag(parse_object(oid), NULL, 0);
+ obj = deref_tag(the_repository, parse_object(the_repository, oid),
+ NULL, 0);
if (obj && (obj->type == OBJ_TREE || obj->type == OBJ_COMMIT))
return 1;
return 0;
@@ -310,7 +312,7 @@ static int init_object_disambiguation(const char *name, int len,
{
int i;
- if (len < MINIMUM_ABBREV || len > GIT_SHA1_HEXSZ)
+ if (len < MINIMUM_ABBREV || len > the_hash_algo->hexsz)
return -1;
memset(ds, 0, sizeof(*ds));
@@ -351,14 +353,14 @@ static int show_ambiguous_object(const struct object_id *oid, void *data)
type = oid_object_info(the_repository, oid, NULL);
if (type == OBJ_COMMIT) {
- struct commit *commit = lookup_commit(oid);
+ struct commit *commit = lookup_commit(the_repository, oid);
if (commit) {
struct pretty_print_context pp = {0};
pp.date_mode.type = DATE_SHORT;
format_commit_message(commit, " %ad - %s", &desc, &pp);
}
} else if (type == OBJ_TAG) {
- struct tag *tag = lookup_tag(oid);
+ struct tag *tag = lookup_tag(the_repository, oid);
if (!parse_tag(tag) && tag->tag)
strbuf_addf(&desc, " %s", tag->tag);
}
@@ -576,6 +578,8 @@ int find_unique_abbrev_r(char *hex, const struct object_id *oid, int len)
struct disambiguate_state ds;
struct min_abbrev_data mad;
struct object_id oid_ret;
+ const unsigned hexsz = the_hash_algo->hexsz;
+
if (len < 0) {
unsigned long count = approximate_object_count();
/*
@@ -599,8 +603,8 @@ int find_unique_abbrev_r(char *hex, const struct object_id *oid, int len)
}
oid_to_hex_r(hex, oid);
- if (len == GIT_SHA1_HEXSZ || !len)
- return GIT_SHA1_HEXSZ;
+ if (len == hexsz || !len)
+ return hexsz;
mad.init_len = len;
mad.cur_len = len;
@@ -706,7 +710,7 @@ static int get_oid_basic(const char *str, int len, struct object_id *oid,
int refs_found = 0;
int at, reflog_len, nth_prior = 0;
- if (len == GIT_SHA1_HEXSZ && !get_oid_hex(str, oid)) {
+ if (len == the_hash_algo->hexsz && !get_oid_hex(str, oid)) {
if (warn_ambiguous_refs && warn_on_object_refname_ambiguity) {
refs_found = dwim_ref(str, len, &tmp_oid, &real_ref);
if (refs_found > 0) {
@@ -750,7 +754,7 @@ static int get_oid_basic(const char *str, int len, struct object_id *oid,
int detached;
if (interpret_nth_prior_checkout(str, len, &buf) > 0) {
- detached = (buf.len == GIT_SHA1_HEXSZ && !get_oid_hex(buf.buf, oid));
+ detached = (buf.len == the_hash_algo->hexsz && !get_oid_hex(buf.buf, oid));
strbuf_release(&buf);
if (detached)
return 0;
@@ -844,7 +848,7 @@ static int get_parent(const char *name, int len,
if (ret)
return ret;
- commit = lookup_commit_reference(&oid);
+ commit = lookup_commit_reference(the_repository, &oid);
if (parse_commit(commit))
return -1;
if (!idx) {
@@ -872,7 +876,7 @@ static int get_nth_ancestor(const char *name, int len,
ret = get_oid_1(name, len, &oid, GET_OID_COMMITTISH);
if (ret)
return ret;
- commit = lookup_commit_reference(&oid);
+ commit = lookup_commit_reference(the_repository, &oid);
if (!commit)
return -1;
@@ -891,7 +895,7 @@ struct object *peel_to_type(const char *name, int namelen,
if (name && !namelen)
namelen = strlen(name);
while (1) {
- if (!o || (!o->parsed && !parse_object(&o->oid)))
+ if (!o || (!o->parsed && !parse_object(the_repository, &o->oid)))
return NULL;
if (expected_type == OBJ_ANY || o->type == expected_type)
return o;
@@ -964,12 +968,12 @@ static int peel_onion(const char *name, int len, struct object_id *oid,
if (get_oid_1(name, sp - name - 2, &outer, lookup_flags))
return -1;
- o = parse_object(&outer);
+ o = parse_object(the_repository, &outer);
if (!o)
return -1;
if (!expected_type) {
- o = deref_tag(o, name, sp - name - 2);
- if (!o || (!o->parsed && !parse_object(&o->oid)))
+ o = deref_tag(the_repository, o, name, sp - name - 2);
+ if (!o || (!o->parsed && !parse_object(the_repository, &o->oid)))
return -1;
oidcpy(oid, &o->oid);
return 0;
@@ -1096,11 +1100,12 @@ static int handle_one_ref(const char *path, const struct object_id *oid,
int flag, void *cb_data)
{
struct commit_list **list = cb_data;
- struct object *object = parse_object(oid);
+ struct object *object = parse_object(the_repository, oid);
if (!object)
return 0;
if (object->type == OBJ_TAG) {
- object = deref_tag(object, path, strlen(path));
+ object = deref_tag(the_repository, object, path,
+ strlen(path));
if (!object)
return 0;
}
@@ -1142,7 +1147,7 @@ static int get_oid_oneline(const char *prefix, struct object_id *oid,
int matches;
commit = pop_most_recent_commit(&list, ONELINE_SEEN);
- if (!parse_object(&commit->object.oid))
+ if (!parse_object(the_repository, &commit->object.oid))
continue;
buf = get_commit_buffer(commit, NULL);
p = strstr(buf, "\n\n");
@@ -1251,13 +1256,13 @@ int get_oid_mb(const char *name, struct object_id *oid)
}
if (st)
return st;
- one = lookup_commit_reference_gently(&oid_tmp, 0);
+ one = lookup_commit_reference_gently(the_repository, &oid_tmp, 0);
if (!one)
return -1;
if (get_oid_committish(dots[3] ? (dots + 3) : "HEAD", &oid_tmp))
return -1;
- two = lookup_commit_reference_gently(&oid_tmp, 0);
+ two = lookup_commit_reference_gently(the_repository, &oid_tmp, 0);
if (!two)
return -1;
mbs = get_merge_bases(one, two);
diff --git a/sha1collisiondetection b/sha1collisiondetection
-Subproject 19d97bf5af05312267c2e874ee6bcf584d9e968
+Subproject 232357eb2ea0397388254a4b188333a227bf5b1
diff --git a/sha1dc/sha1.c b/sha1dc/sha1.c
index 25eded1..df0630b 100644
--- a/sha1dc/sha1.c
+++ b/sha1dc/sha1.c
@@ -93,13 +93,23 @@
#define SHA1DC_BIGENDIAN
/* Not under GCC-alike or glibc or *BSD or newlib or <processor whitelist> */
+#elif (defined(_AIX))
+
+/*
+ * Defines Big Endian on a whitelist of OSs that are known to be Big
+ * Endian-only. See
+ * https://public-inbox.org/git/93056823-2740-d072-1ebd-46b440b33d7e@felt.demon.nl/
+ */
+#define SHA1DC_BIGENDIAN
+
+/* Not under GCC-alike or glibc or *BSD or newlib or <processor whitelist> or <os whitelist> */
#elif defined(SHA1DC_ON_INTEL_LIKE_PROCESSOR)
/*
* As a last resort before we do anything else we're not 100% sure
* about below, we blacklist specific processors here. We could add
* more, see e.g. https://wiki.debian.org/ArchitectureSpecificsMemo
*/
-#else /* Not under GCC-alike or glibc or *BSD or newlib or <processor whitelist> or <processor blacklist> */
+#else /* Not under GCC-alike or glibc or *BSD or newlib or <processor whitelist> or <os whitelist> or <processor blacklist> */
/* We do nothing more here for now */
/*#error "Uncomment this to see if you fall through all the detection"*/
diff --git a/shallow.c b/shallow.c
index e53067c..dbe8a2a 100644
--- a/shallow.c
+++ b/shallow.c
@@ -31,7 +31,7 @@ int register_shallow(struct repository *r, const struct object_id *oid)
{
struct commit_graft *graft =
xmalloc(sizeof(struct commit_graft));
- struct commit *commit = lookup_commit(oid);
+ struct commit *commit = lookup_commit(the_repository, oid);
oidcpy(&graft->oid, oid);
graft->nr_parent = -1;
@@ -96,7 +96,9 @@ struct commit_list *get_shallow_commits(struct object_array *heads, int depth,
if (i < heads->nr) {
int **depth_slot;
commit = (struct commit *)
- deref_tag(heads->objects[i++].item, NULL, 0);
+ deref_tag(the_repository,
+ heads->objects[i++].item,
+ NULL, 0);
if (!commit || commit->object.type != OBJ_COMMIT) {
commit = NULL;
continue;
@@ -259,7 +261,7 @@ static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
if (graft->nr_parent != -1)
return 0;
if (data->flags & SEEN_ONLY) {
- struct commit *c = lookup_commit(&graft->oid);
+ struct commit *c = lookup_commit(the_repository, &graft->oid);
if (!c || !(c->object.flags & SEEN)) {
if (data->flags & VERBOSE)
printf("Removing %s from .git/shallow\n",
@@ -492,7 +494,8 @@ static void paint_down(struct paint_info *info, const struct object_id *oid,
struct commit_list *head = NULL;
int bitmap_nr = DIV_ROUND_UP(info->nr_bits, 32);
size_t bitmap_size = st_mult(sizeof(uint32_t), bitmap_nr);
- struct commit *c = lookup_commit_reference_gently(oid, 1);
+ struct commit *c = lookup_commit_reference_gently(the_repository, oid,
+ 1);
uint32_t *tmp; /* to be freed before return */
uint32_t *bitmap;
@@ -554,7 +557,8 @@ static void paint_down(struct paint_info *info, const struct object_id *oid,
static int mark_uninteresting(const char *refname, const struct object_id *oid,
int flags, void *cb_data)
{
- struct commit *commit = lookup_commit_reference_gently(oid, 1);
+ struct commit *commit = lookup_commit_reference_gently(the_repository,
+ oid, 1);
if (!commit)
return 0;
commit->object.flags |= UNINTERESTING;
@@ -622,7 +626,8 @@ void assign_shallow_commits_to_refs(struct shallow_info *info,
/* Mark potential bottoms so we won't go out of bound */
for (i = 0; i < nr_shallow; i++) {
- struct commit *c = lookup_commit(&oid[shallow[i]]);
+ struct commit *c = lookup_commit(the_repository,
+ &oid[shallow[i]]);
c->object.flags |= BOTTOM;
}
@@ -633,7 +638,8 @@ void assign_shallow_commits_to_refs(struct shallow_info *info,
int bitmap_size = DIV_ROUND_UP(pi.nr_bits, 32) * sizeof(uint32_t);
memset(used, 0, sizeof(*used) * info->shallow->nr);
for (i = 0; i < nr_shallow; i++) {
- const struct commit *c = lookup_commit(&oid[shallow[i]]);
+ const struct commit *c = lookup_commit(the_repository,
+ &oid[shallow[i]]);
uint32_t **map = ref_bitmap_at(&pi.ref_bitmap, c);
if (*map)
used[shallow[i]] = xmemdupz(*map, bitmap_size);
@@ -664,7 +670,8 @@ static int add_ref(const char *refname, const struct object_id *oid,
{
struct commit_array *ca = cb_data;
ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc);
- ca->commits[ca->nr] = lookup_commit_reference_gently(oid, 1);
+ ca->commits[ca->nr] = lookup_commit_reference_gently(the_repository,
+ oid, 1);
if (ca->commits[ca->nr])
ca->nr++;
return 0;
@@ -702,7 +709,7 @@ static void post_assign_shallow(struct shallow_info *info,
for (i = dst = 0; i < info->nr_theirs; i++) {
if (i != dst)
info->theirs[dst] = info->theirs[i];
- c = lookup_commit(&oid[info->theirs[i]]);
+ c = lookup_commit(the_repository, &oid[info->theirs[i]]);
bitmap = ref_bitmap_at(ref_bitmap, c);
if (!*bitmap)
continue;
@@ -723,7 +730,7 @@ static void post_assign_shallow(struct shallow_info *info,
for (i = dst = 0; i < info->nr_ours; i++) {
if (i != dst)
info->ours[dst] = info->ours[i];
- c = lookup_commit(&oid[info->ours[i]]);
+ c = lookup_commit(the_repository, &oid[info->ours[i]]);
bitmap = ref_bitmap_at(ref_bitmap, c);
if (!*bitmap)
continue;
@@ -745,7 +752,8 @@ static void post_assign_shallow(struct shallow_info *info,
int delayed_reachability_test(struct shallow_info *si, int c)
{
if (si->need_reachability_test[c]) {
- struct commit *commit = lookup_commit(&si->shallow->oid[c]);
+ struct commit *commit = lookup_commit(the_repository,
+ &si->shallow->oid[c]);
if (!si->commits) {
struct commit_array ca;
diff --git a/split-index.c b/split-index.c
index 660c75f..84f067e 100644
--- a/split-index.c
+++ b/split-index.c
@@ -73,16 +73,31 @@ void move_cache_to_base_index(struct index_state *istate)
int i;
/*
- * do not delete old si->base, its index entries may be shared
- * with istate->cache[]. Accept a bit of leaking here because
- * this code is only used by short-lived update-index.
+ * If there was a previous base index, then transfer ownership of allocated
+ * entries to the parent index.
*/
+ if (si->base &&
+ si->base->ce_mem_pool) {
+
+ if (!istate->ce_mem_pool)
+ mem_pool_init(&istate->ce_mem_pool, 0);
+
+ mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
+ }
+
si->base = xcalloc(1, sizeof(*si->base));
si->base->version = istate->version;
/* zero timestamp disables racy test in ce_write_index() */
si->base->timestamp = istate->timestamp;
ALLOC_GROW(si->base->cache, istate->cache_nr, si->base->cache_alloc);
si->base->cache_nr = istate->cache_nr;
+
+ /*
+ * The mem_pool needs to move with the allocated entries.
+ */
+ si->base->ce_mem_pool = istate->ce_mem_pool;
+ istate->ce_mem_pool = NULL;
+
COPY_ARRAY(si->base->cache, istate->cache, istate->cache_nr);
mark_base_index_entries(si->base);
for (i = 0; i < si->base->cache_nr; i++)
@@ -123,7 +138,7 @@ static void replace_entry(size_t pos, void *data)
src->ce_flags |= CE_UPDATE_IN_BASE;
src->ce_namelen = dst->ce_namelen;
copy_cache_entry(dst, src);
- free(src);
+ discard_cache_entry(src);
si->nr_replacements++;
}
@@ -224,7 +239,7 @@ void prepare_to_write_split_index(struct index_state *istate)
base->ce_flags = base_flags;
if (ret)
ce->ce_flags |= CE_UPDATE_IN_BASE;
- free(base);
+ discard_cache_entry(base);
si->base->cache[ce->index - 1] = ce;
}
for (i = 0; i < si->base->cache_nr; i++) {
@@ -301,7 +316,7 @@ void save_or_free_index_entry(struct index_state *istate, struct cache_entry *ce
ce == istate->split_index->base->cache[ce->index - 1])
ce->ce_flags |= CE_REMOVE;
else
- free(ce);
+ discard_cache_entry(ce);
}
void replace_index_entry_in_base(struct index_state *istate,
@@ -314,7 +329,7 @@ void replace_index_entry_in_base(struct index_state *istate,
old_entry->index <= istate->split_index->base->cache_nr) {
new_entry->index = old_entry->index;
if (old_entry != istate->split_index->base->cache[new_entry->index - 1])
- free(istate->split_index->base->cache[new_entry->index - 1]);
+ discard_cache_entry(istate->split_index->base->cache[new_entry->index - 1]);
istate->split_index->base->cache[new_entry->index - 1] = new_entry;
}
}
@@ -331,12 +346,31 @@ void remove_split_index(struct index_state *istate)
{
if (istate->split_index) {
/*
- * can't discard_split_index(&the_index); because that
- * will destroy split_index->base->cache[], which may
- * be shared with the_index.cache[]. So yeah we're
- * leaking a bit here.
+ * When removing the split index, we need to move
+ * ownership of the mem_pool associated with the
+ * base index to the main index. There may be cache entries
+ * allocated from the base's memory pool that are shared with
+ * the_index.cache[].
*/
- istate->split_index = NULL;
+ mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
+
+ /*
+ * The split index no longer owns the mem_pool backing
+ * its cache array. As we are discarding this index,
+ * mark the index as having no cache entries, so it
+ * will not attempt to clean up the cache entries or
+ * validate them.
+ */
+ if (istate->split_index->base)
+ istate->split_index->base->cache_nr = 0;
+
+ /*
+ * We can discard the split index because its
+ * memory pool has been incorporated into the
+ * memory pool associated with the the_index.
+ */
+ discard_split_index(istate);
+
istate->cache_changed |= SOMETHING_CHANGED;
}
}
diff --git a/strbuf.c b/strbuf.c
index b0716ac..64041c3 100644
--- a/strbuf.c
+++ b/strbuf.c
@@ -134,7 +134,7 @@ void strbuf_ltrim(struct strbuf *sb)
int strbuf_reencode(struct strbuf *sb, const char *from, const char *to)
{
char *out;
- int len;
+ size_t len;
if (same_encoding(from, to))
return 0;
@@ -209,7 +209,7 @@ void strbuf_list_free(struct strbuf **sbs)
int strbuf_cmp(const struct strbuf *a, const struct strbuf *b)
{
- int len = a->len < b->len ? a->len: b->len;
+ size_t len = a->len < b->len ? a->len: b->len;
int cmp = memcmp(a->buf, b->buf, len);
if (cmp)
return cmp;
@@ -389,7 +389,7 @@ size_t strbuf_expand_dict_cb(struct strbuf *sb, const char *placeholder,
void strbuf_addbuf_percentquote(struct strbuf *dst, const struct strbuf *src)
{
- int i, len = src->len;
+ size_t i, len = src->len;
for (i = 0; i < len; i++) {
if (src->buf[i] == '%')
@@ -469,7 +469,7 @@ int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint)
hint = 32;
while (hint < STRBUF_MAXLINK) {
- int len;
+ ssize_t len;
strbuf_grow(sb, hint);
len = readlink(path, sb->buf, hint);
@@ -734,18 +734,18 @@ void strbuf_humanise_bytes(struct strbuf *buf, off_t bytes)
{
if (bytes > 1 << 30) {
strbuf_addf(buf, "%u.%2.2u GiB",
- (int)(bytes >> 30),
- (int)(bytes & ((1 << 30) - 1)) / 10737419);
+ (unsigned)(bytes >> 30),
+ (unsigned)(bytes & ((1 << 30) - 1)) / 10737419);
} else if (bytes > 1 << 20) {
- int x = bytes + 5243; /* for rounding */
+ unsigned x = bytes + 5243; /* for rounding */
strbuf_addf(buf, "%u.%2.2u MiB",
x >> 20, ((x & ((1 << 20) - 1)) * 100) >> 20);
} else if (bytes > 1 << 10) {
- int x = bytes + 5; /* for rounding */
+ unsigned x = bytes + 5; /* for rounding */
strbuf_addf(buf, "%u.%2.2u KiB",
x >> 10, ((x & ((1 << 10) - 1)) * 100) >> 10);
} else {
- strbuf_addf(buf, "%u bytes", (int)bytes);
+ strbuf_addf(buf, "%u bytes", (unsigned)bytes);
}
}
@@ -921,7 +921,7 @@ void strbuf_add_unique_abbrev(struct strbuf *sb, const struct object_id *oid,
int abbrev_len)
{
int r;
- strbuf_grow(sb, GIT_SHA1_HEXSZ + 1);
+ strbuf_grow(sb, GIT_MAX_HEXSZ + 1);
r = find_unique_abbrev_r(sb->buf + sb->len, oid, abbrev_len);
strbuf_setlen(sb, sb->len + r);
}
@@ -960,7 +960,7 @@ static size_t cleanup(char *line, size_t len)
*/
void strbuf_stripspace(struct strbuf *sb, int skip_comments)
{
- int empties = 0;
+ size_t empties = 0;
size_t i, j, len, newlen;
char *eol;
diff --git a/submodule-config.c b/submodule-config.c
index 2a7259b..fc2c41b 100644
--- a/submodule-config.c
+++ b/submodule-config.c
@@ -562,7 +562,7 @@ static const struct submodule *config_from(struct submodule_cache *cache,
parameter.gitmodules_oid = &oid;
parameter.overwrite = 0;
git_config_from_mem(parse_config, CONFIG_ORIGIN_SUBMODULE_BLOB, rev.buf,
- config, config_size, &parameter);
+ config, config_size, &parameter, NULL);
strbuf_release(&rev);
free(config);
diff --git a/submodule.c b/submodule.c
index 2a63818..6e14547 100644
--- a/submodule.c
+++ b/submodule.c
@@ -517,8 +517,8 @@ static void show_submodule_header(struct diff_options *o, const char *path,
* Attempt to lookup the commit references, and determine if this is
* a fast forward or fast backwards update.
*/
- *left = lookup_commit_reference(one);
- *right = lookup_commit_reference(two);
+ *left = lookup_commit_reference(the_repository, one);
+ *right = lookup_commit_reference(the_repository, two);
/*
* Warn about missing commits in the submodule project, but only if
diff --git a/submodule.h b/submodule.h
index 4644683..c98cd98 100644
--- a/submodule.h
+++ b/submodule.h
@@ -33,62 +33,62 @@ struct submodule_update_strategy {
};
#define SUBMODULE_UPDATE_STRATEGY_INIT {SM_UPDATE_UNSPECIFIED, NULL}
-extern int is_gitmodules_unmerged(const struct index_state *istate);
-extern int is_staging_gitmodules_ok(struct index_state *istate);
-extern int update_path_in_gitmodules(const char *oldpath, const char *newpath);
-extern int remove_path_from_gitmodules(const char *path);
-extern void stage_updated_gitmodules(struct index_state *istate);
-extern void set_diffopt_flags_from_submodule_config(struct diff_options *,
- const char *path);
-extern int git_default_submodule_config(const char *var, const char *value, void *cb);
+int is_gitmodules_unmerged(const struct index_state *istate);
+int is_staging_gitmodules_ok(struct index_state *istate);
+int update_path_in_gitmodules(const char *oldpath, const char *newpath);
+int remove_path_from_gitmodules(const char *path);
+void stage_updated_gitmodules(struct index_state *istate);
+void set_diffopt_flags_from_submodule_config(struct diff_options *,
+ const char *path);
+int git_default_submodule_config(const char *var, const char *value, void *cb);
struct option;
int option_parse_recurse_submodules_worktree_updater(const struct option *opt,
const char *arg, int unset);
-extern int is_submodule_active(struct repository *repo, const char *path);
+int is_submodule_active(struct repository *repo, const char *path);
/*
* Determine if a submodule has been populated at a given 'path' by checking if
* the <path>/.git resolves to a valid git repository.
* If return_error_code is NULL, die on error.
* Otherwise the return error code is the same as of resolve_gitdir_gently.
*/
-extern int is_submodule_populated_gently(const char *path, int *return_error_code);
-extern void die_in_unpopulated_submodule(const struct index_state *istate,
- const char *prefix);
-extern void die_path_inside_submodule(const struct index_state *istate,
- const struct pathspec *ps);
-extern enum submodule_update_type parse_submodule_update_type(const char *value);
-extern int parse_submodule_update_strategy(const char *value,
- struct submodule_update_strategy *dst);
-extern const char *submodule_strategy_to_string(const struct submodule_update_strategy *s);
-extern void handle_ignore_submodules_arg(struct diff_options *, const char *);
-extern void show_submodule_summary(struct diff_options *o, const char *path,
- struct object_id *one, struct object_id *two,
- unsigned dirty_submodule);
-extern void show_submodule_inline_diff(struct diff_options *o, const char *path,
- struct object_id *one, struct object_id *two,
- unsigned dirty_submodule);
+int is_submodule_populated_gently(const char *path, int *return_error_code);
+void die_in_unpopulated_submodule(const struct index_state *istate,
+ const char *prefix);
+void die_path_inside_submodule(const struct index_state *istate,
+ const struct pathspec *ps);
+enum submodule_update_type parse_submodule_update_type(const char *value);
+int parse_submodule_update_strategy(const char *value,
+ struct submodule_update_strategy *dst);
+const char *submodule_strategy_to_string(const struct submodule_update_strategy *s);
+void handle_ignore_submodules_arg(struct diff_options *, const char *);
+void show_submodule_summary(struct diff_options *o, const char *path,
+ struct object_id *one, struct object_id *two,
+ unsigned dirty_submodule);
+void show_submodule_inline_diff(struct diff_options *o, const char *path,
+ struct object_id *one, struct object_id *two,
+ unsigned dirty_submodule);
/* Check if we want to update any submodule.*/
-extern int should_update_submodules(void);
+int should_update_submodules(void);
/*
* Returns the submodule struct if the given ce entry is a submodule
* and it should be updated. Returns NULL otherwise.
*/
-extern const struct submodule *submodule_from_ce(const struct cache_entry *ce);
-extern void check_for_new_submodule_commits(struct object_id *oid);
-extern int fetch_populated_submodules(struct repository *r,
- const struct argv_array *options,
- const char *prefix,
- int command_line_option,
- int default_option,
- int quiet, int max_parallel_jobs);
-extern unsigned is_submodule_modified(const char *path, int ignore_untracked);
-extern int submodule_uses_gitfile(const char *path);
+const struct submodule *submodule_from_ce(const struct cache_entry *ce);
+void check_for_new_submodule_commits(struct object_id *oid);
+int fetch_populated_submodules(struct repository *r,
+ const struct argv_array *options,
+ const char *prefix,
+ int command_line_option,
+ int default_option,
+ int quiet, int max_parallel_jobs);
+unsigned is_submodule_modified(const char *path, int ignore_untracked);
+int submodule_uses_gitfile(const char *path);
#define SUBMODULE_REMOVAL_DIE_ON_ERROR (1<<0)
#define SUBMODULE_REMOVAL_IGNORE_UNTRACKED (1<<1)
#define SUBMODULE_REMOVAL_IGNORE_IGNORED_UNTRACKED (1<<2)
-extern int bad_to_remove_submodule(const char *path, unsigned flags);
+int bad_to_remove_submodule(const char *path, unsigned flags);
int add_submodule_odb(const char *path);
@@ -96,17 +96,17 @@ int add_submodule_odb(const char *path);
* Checks if there are submodule changes in a..b. If a is the null OID,
* checks b and all its ancestors instead.
*/
-extern int submodule_touches_in_range(struct object_id *a,
- struct object_id *b);
-extern int find_unpushed_submodules(struct oid_array *commits,
- const char *remotes_name,
- struct string_list *needs_pushing);
+int submodule_touches_in_range(struct object_id *a,
+ struct object_id *b);
+int find_unpushed_submodules(struct oid_array *commits,
+ const char *remotes_name,
+ struct string_list *needs_pushing);
struct refspec;
-extern int push_unpushed_submodules(struct oid_array *commits,
- const struct remote *remote,
- const struct refspec *rs,
- const struct string_list *push_options,
- int dry_run);
+int push_unpushed_submodules(struct oid_array *commits,
+ const struct remote *remote,
+ const struct refspec *rs,
+ const struct string_list *push_options,
+ int dry_run);
/*
* Given a submodule path (as in the index), return the repository
* path of that submodule in 'buf'. Return -1 on error or when the
@@ -116,10 +116,10 @@ int submodule_to_gitdir(struct strbuf *buf, const char *submodule);
#define SUBMODULE_MOVE_HEAD_DRY_RUN (1<<0)
#define SUBMODULE_MOVE_HEAD_FORCE (1<<1)
-extern int submodule_move_head(const char *path,
- const char *old,
- const char *new_head,
- unsigned flags);
+int submodule_move_head(const char *path,
+ const char *old,
+ const char *new_head,
+ unsigned flags);
void submodule_unset_core_worktree(const struct submodule *sub);
@@ -128,18 +128,18 @@ void submodule_unset_core_worktree(const struct submodule *sub);
* a submodule by clearing any repo-specific environment variables, but
* retaining any config in the environment.
*/
-extern void prepare_submodule_repo_env(struct argv_array *out);
+void prepare_submodule_repo_env(struct argv_array *out);
#define ABSORB_GITDIR_RECURSE_SUBMODULES (1<<0)
-extern void absorb_git_dir_into_superproject(const char *prefix,
- const char *path,
- unsigned flags);
+void absorb_git_dir_into_superproject(const char *prefix,
+ const char *path,
+ unsigned flags);
/*
* Return the absolute path of the working tree of the superproject, which this
* project is a submodule of. If this repository is not a submodule of
* another repository, return NULL.
*/
-extern const char *get_superproject_working_tree(void);
+const char *get_superproject_working_tree(void);
#endif
diff --git a/t/.gitignore b/t/.gitignore
index 4e731dc..348715f 100644
--- a/t/.gitignore
+++ b/t/.gitignore
@@ -1,3 +1,4 @@
/trash directory*
/test-results
/.prove
+/chainlinttmp
diff --git a/t/Makefile b/t/Makefile
index 96317a3..c83fd18 100644
--- a/t/Makefile
+++ b/t/Makefile
@@ -18,8 +18,10 @@ TEST_LINT ?= test-lint
ifdef TEST_OUTPUT_DIRECTORY
TEST_RESULTS_DIRECTORY = $(TEST_OUTPUT_DIRECTORY)/test-results
+CHAINLINTTMP = $(TEST_OUTPUT_DIRECTORY)/chainlinttmp
else
TEST_RESULTS_DIRECTORY = test-results
+CHAINLINTTMP = chainlinttmp
endif
# Shell quote;
@@ -27,14 +29,17 @@ SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
TEST_SHELL_PATH_SQ = $(subst ','\'',$(TEST_SHELL_PATH))
PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH))
TEST_RESULTS_DIRECTORY_SQ = $(subst ','\'',$(TEST_RESULTS_DIRECTORY))
+CHAINLINTTMP_SQ = $(subst ','\'',$(CHAINLINTTMP))
T = $(sort $(wildcard t[0-9][0-9][0-9][0-9]-*.sh))
TGITWEB = $(sort $(wildcard t95[0-9][0-9]-*.sh))
THELPERS = $(sort $(filter-out $(T),$(wildcard *.sh)))
+CHAINLINTTESTS = $(sort $(patsubst chainlint/%.test,%,$(wildcard chainlint/*.test)))
+CHAINLINT = sed -f chainlint.sed
all: $(DEFAULT_TEST_TARGET)
-test: pre-clean $(TEST_LINT)
+test: pre-clean check-chainlint $(TEST_LINT)
$(MAKE) aggregate-results-and-cleanup
failed:
@@ -43,7 +48,7 @@ failed:
sed -n 's/\.counts$$/.sh/p') && \
test -z "$$failed" || $(MAKE) $$failed
-prove: pre-clean $(TEST_LINT)
+prove: pre-clean check-chainlint $(TEST_LINT)
@echo "*** prove ***"; $(PROVE) --exec '$(TEST_SHELL_PATH_SQ)' $(GIT_PROVE_OPTS) $(T) :: $(GIT_TEST_OPTS)
$(MAKE) clean-except-prove-cache
@@ -53,13 +58,25 @@ $(T):
pre-clean:
$(RM) -r '$(TEST_RESULTS_DIRECTORY_SQ)'
-clean-except-prove-cache:
+clean-except-prove-cache: clean-chainlint
$(RM) -r 'trash directory'.* '$(TEST_RESULTS_DIRECTORY_SQ)'
$(RM) -r valgrind/bin
clean: clean-except-prove-cache
$(RM) .prove
+clean-chainlint:
+ $(RM) -r '$(CHAINLINTTMP_SQ)'
+
+check-chainlint:
+ @mkdir -p '$(CHAINLINTTMP_SQ)' && \
+ err=0 && \
+ for i in $(CHAINLINTTESTS); do \
+ $(CHAINLINT) <chainlint/$$i.test | \
+ sed -e '/^# LINT: /d' >'$(CHAINLINTTMP_SQ)'/$$i.actual && \
+ diff -u chainlint/$$i.expect '$(CHAINLINTTMP_SQ)'/$$i.actual || err=1; \
+ done && exit $$err
+
test-lint: test-lint-duplicates test-lint-executable test-lint-shell-syntax \
test-lint-filenames
@@ -102,4 +119,4 @@ valgrind:
perf:
$(MAKE) -C perf/ all
-.PHONY: pre-clean $(T) aggregate-results clean valgrind perf
+.PHONY: pre-clean $(T) aggregate-results clean valgrind perf check-chainlint clean-chainlint
diff --git a/t/annotate-tests.sh b/t/annotate-tests.sh
index 093832f..6da48a2 100644
--- a/t/annotate-tests.sh
+++ b/t/annotate-tests.sh
@@ -320,11 +320,11 @@ test_expect_success 'blame -L ,Y (Y == nlines)' '
test_expect_success 'blame -L ,Y (Y == nlines + 1)' '
n=$(expr $(wc -l <file) + 2) &&
- test_must_fail $PROG -L,$n file
+ check_count -L,$n A 1 B 1 B1 1 B2 1 "A U Thor" 1 C 1 D 1 E 1
'
test_expect_success 'blame -L ,Y (Y > nlines)' '
- test_must_fail $PROG -L,12345 file
+ check_count -L,12345 A 1 B 1 B1 1 B2 1 "A U Thor" 1 C 1 D 1 E 1
'
test_expect_success 'blame -L multiple (disjoint)' '
diff --git a/t/chainlint.sed b/t/chainlint.sed
new file mode 100644
index 0000000..5f0882c
--- /dev/null
+++ b/t/chainlint.sed
@@ -0,0 +1,346 @@
+#------------------------------------------------------------------------------
+# Detect broken &&-chains in tests.
+#
+# At present, only &&-chains in subshells are examined by this linter;
+# top-level &&-chains are instead checked directly by the test framework. Like
+# the top-level &&-chain linter, the subshell linter (intentionally) does not
+# check &&-chains within {...} blocks.
+#
+# Checking for &&-chain breakage is done line-by-line by pure textual
+# inspection.
+#
+# Incomplete lines (those ending with "\") are stitched together with following
+# lines to simplify processing, particularly of "one-liner" statements.
+# Top-level here-docs are swallowed to avoid false positives within the
+# here-doc body, although the statement to which the here-doc is attached is
+# retained.
+#
+# Heuristics are used to detect end-of-subshell when the closing ")" is cuddled
+# with the final subshell statement on the same line:
+#
+# (cd foo &&
+# bar)
+#
+# in order to avoid misinterpreting the ")" in constructs such as "x=$(...)"
+# and "case $x in *)" as ending the subshell.
+#
+# Lines missing a final "&&" are flagged with "?!AMP?!", and lines which chain
+# commands with ";" internally rather than "&&" are flagged "?!SEMI?!". A line
+# may be flagged for both violations.
+#
+# Detection of a missing &&-link in a multi-line subshell is complicated by the
+# fact that the last statement before the closing ")" must not end with "&&".
+# Since processing is line-by-line, it is not known whether a missing "&&" is
+# legitimate or not until the _next_ line is seen. To accommodate this, within
+# multi-line subshells, each line is stored in sed's "hold" area until after
+# the next line is seen and processed. If the next line is a stand-alone ")",
+# then a missing "&&" on the previous line is legitimate; otherwise a missing
+# "&&" is a break in the &&-chain.
+#
+# (
+# cd foo &&
+# bar
+# )
+#
+# In practical terms, when "bar" is encountered, it is flagged with "?!AMP?!",
+# but when the stand-alone ")" line is seen which closes the subshell, the
+# "?!AMP?!" violation is removed from the "bar" line (retrieved from the "hold"
+# area) since the final statement of a subshell must not end with "&&". The
+# final line of a subshell may still break the &&-chain by using ";" internally
+# to chain commands together rather than "&&", so "?!SEMI?!" is never removed
+# from a line (even though "?!AMP?!" might be).
+#
+# Care is taken to recognize the last _statement_ of a multi-line subshell, not
+# necessarily the last textual _line_ within the subshell, since &&-chaining
+# applies to statements, not to lines. Consequently, blank lines, comment
+# lines, and here-docs are swallowed (but not the command to which the here-doc
+# is attached), leaving the last statement in the "hold" area, not the last
+# line, thus simplifying &&-link checking.
+#
+# The final statement before "done" in for- and while-loops, and before "elif",
+# "else", and "fi" in if-then-else likewise must not end with "&&", thus
+# receives similar treatment.
+#
+# To facilitate regression testing (and manual debugging), a ">" annotation is
+# applied to the line containing ")" which closes a subshell, ">>" to a line
+# closing a nested subshell, and ">>>" to a line closing both at once. This
+# makes it easy to detect whether the heuristics correctly identify
+# end-of-subshell.
+#------------------------------------------------------------------------------
+
+# incomplete line -- slurp up next line
+:squash
+/\\$/ {
+ N
+ s/\\\n//
+ bsquash
+}
+
+# here-doc -- swallow it to avoid false hits within its body (but keep the
+# command to which it was attached)
+/<<[ ]*[-\\]*EOF[ ]*/ {
+ s/[ ]*<<[ ]*[-\\]*EOF//
+ h
+ :hereslurp
+ N
+ s/.*\n//
+ /^[ ]*EOF[ ]*$/!bhereslurp
+ x
+}
+
+# one-liner "(...) &&"
+/^[ ]*!*[ ]*(..*)[ ]*&&[ ]*$/boneline
+
+# same as above but without trailing "&&"
+/^[ ]*!*[ ]*(..*)[ ]*$/boneline
+
+# one-liner "(...) >x" (or "2>x" or "<x" or "|x" or "&"
+/^[ ]*!*[ ]*(..*)[ ]*[0-9]*[<>|&]/boneline
+
+# multi-line "(...\n...)"
+/^[ ]*(/bsubshell
+
+# innocuous line -- print it and advance to next line
+b
+
+# found one-liner "(...)" -- mark suspect if it uses ";" internally rather than
+# "&&" (but not ";" in a string)
+:oneline
+/;/{
+ /"[^"]*;[^"]*"/!s/^/?!SEMI?!/
+}
+b
+
+:subshell
+# bare "(" line?
+/^[ ]*([ ]*$/ {
+ # stash for later printing
+ h
+ bnextline
+}
+# "(..." line -- split off and stash "(", then process "..." as its own line
+x
+s/.*/(/
+x
+s/(//
+bslurp
+
+:nextline
+N
+s/.*\n//
+
+:slurp
+# incomplete line "...\"
+/\\$/bincomplete
+# multi-line quoted string "...\n..."
+/^[^"]*"[^"]*$/bdqstring
+# multi-line quoted string '...\n...' (but not contraction in string "it's so")
+/^[^']*'[^']*$/{
+ /"[^'"]*'[^'"]*"/!bsqstring
+}
+# here-doc -- swallow it
+/<<[ ]*[-\\]*EOF/bheredoc
+/<<[ ]*[-\\]*EOT/bheredoc
+/<<[ ]*[-\\]*INPUT_END/bheredoc
+# comment or empty line -- discard since final non-comment, non-empty line
+# before closing ")", "done", "elsif", "else", or "fi" will need to be
+# re-visited to drop "suspect" marking since final line of those constructs
+# legitimately lacks "&&", so "suspect" mark must be removed
+/^[ ]*#/bnextline
+/^[ ]*$/bnextline
+# in-line comment -- strip it (but not "#" in a string, Bash ${#...} array
+# length, or Perforce "//depot/path#42" revision in filespec)
+/[ ]#/{
+ /"[^"]*#[^"]*"/!s/[ ]#.*$//
+}
+# one-liner "case ... esac"
+/^[ ]*case[ ]*..*esac/bcheckchain
+# multi-line "case ... esac"
+/^[ ]*case[ ]..*[ ]in/bcase
+# multi-line "for ... done" or "while ... done"
+/^[ ]*for[ ]..*[ ]in/bcontinue
+/^[ ]*while[ ]/bcontinue
+/^[ ]*do[ ]/bcontinue
+/^[ ]*do[ ]*$/bcontinue
+/;[ ]*do/bcontinue
+/^[ ]*done[ ]*&&[ ]*$/bdone
+/^[ ]*done[ ]*$/bdone
+/^[ ]*done[ ]*[<>|]/bdone
+/^[ ]*done[ ]*)/bdone
+/||[ ]*exit[ ]/bcontinue
+/||[ ]*exit[ ]*$/bcontinue
+# multi-line "if...elsif...else...fi"
+/^[ ]*if[ ]/bcontinue
+/^[ ]*then[ ]/bcontinue
+/^[ ]*then[ ]*$/bcontinue
+/;[ ]*then/bcontinue
+/^[ ]*elif[ ]/belse
+/^[ ]*elif[ ]*$/belse
+/^[ ]*else[ ]/belse
+/^[ ]*else[ ]*$/belse
+/^[ ]*fi[ ]*&&[ ]*$/bdone
+/^[ ]*fi[ ]*$/bdone
+/^[ ]*fi[ ]*[<>|]/bdone
+/^[ ]*fi[ ]*)/bdone
+# nested one-liner "(...) &&"
+/^[ ]*(.*)[ ]*&&[ ]*$/bcheckchain
+# nested one-liner "(...)"
+/^[ ]*(.*)[ ]*$/bcheckchain
+# nested one-liner "(...) >x" (or "2>x" or "<x" or "|x")
+/^[ ]*(.*)[ ]*[0-9]*[<>|]/bcheckchain
+# nested multi-line "(...\n...)"
+/^[ ]*(/bnest
+# multi-line "{...\n...}"
+/^[ ]*{/bblock
+# closing ")" on own line -- exit subshell
+/^[ ]*)/bclosesolo
+# "$((...))" -- arithmetic expansion; not closing ")"
+/\$(([^)][^)]*))[^)]*$/bcheckchain
+# "$(...)" -- command substitution; not closing ")"
+/\$([^)][^)]*)[^)]*$/bcheckchain
+# multi-line "$(...\n...)" -- command substitution; treat as nested subshell
+/\$([ ]*$/bnest
+# "=(...)" -- Bash array assignment; not closing ")"
+/=(/bcheckchain
+# closing "...) &&"
+/)[ ]*&&[ ]*$/bclose
+# closing "...)"
+/)[ ]*$/bclose
+# closing "...) >x" (or "2>x" or "<x" or "|x")
+/)[ ]*[<>|]/bclose
+:checkchain
+# mark suspect if line uses ";" internally rather than "&&" (but not ";" in a
+# string and not ";;" in one-liner "case...esac")
+/;/{
+ /;;/!{
+ /"[^"]*;[^"]*"/!s/^/?!SEMI?!/
+ }
+}
+# line ends with pipe "...|" -- valid; not missing "&&"
+/|[ ]*$/bcontinue
+# missing end-of-line "&&" -- mark suspect
+/&&[ ]*$/!s/^/?!AMP?!/
+:continue
+# retrieve and print previous line
+x
+n
+bslurp
+
+# found incomplete line "...\" -- slurp up next line
+:incomplete
+N
+s/\\\n//
+bslurp
+
+# found multi-line double-quoted string "...\n..." -- slurp until end of string
+:dqstring
+s/"//g
+N
+s/\n//
+/"/!bdqstring
+bcheckchain
+
+# found multi-line single-quoted string '...\n...' -- slurp until end of string
+:sqstring
+s/'//g
+N
+s/\n//
+/'/!bsqstring
+bcheckchain
+
+# found here-doc -- swallow it to avoid false hits within its body (but keep
+# the command to which it was attached); take care to handle here-docs nested
+# within here-docs by only recognizing closing tag matching outer here-doc
+# opening tag
+:heredoc
+/EOF/{ s/[ ]*<<[ ]*[-\\]*EOF//; s/^/EOF/; }
+/EOT/{ s/[ ]*<<[ ]*[-\\]*EOT//; s/^/EOT/; }
+/INPUT_END/{ s/[ ]*<<[ ]*[-\\]*INPUT_END//; s/^/INPUT_END/; }
+:hereslurpsub
+N
+/^EOF.*\n[ ]*EOF[ ]*$/bhereclose
+/^EOT.*\n[ ]*EOT[ ]*$/bhereclose
+/^INPUT_END.*\n[ ]*INPUT_END[ ]*$/bhereclose
+bhereslurpsub
+:hereclose
+s/^EOF//
+s/^EOT//
+s/^INPUT_END//
+s/\n.*$//
+bcheckchain
+
+# found "case ... in" -- pass through untouched
+:case
+x
+n
+/^[ ]*esac/bslurp
+bcase
+
+# found "else" or "elif" -- drop "suspect" from final line before "else" since
+# that line legitimately lacks "&&"
+:else
+x
+s/?!AMP?!//
+x
+bcontinue
+
+# found "done" closing for-loop or while-loop, or "fi" closing if-then -- drop
+# "suspect" from final contained line since that line legitimately lacks "&&"
+:done
+x
+s/?!AMP?!//
+x
+# is 'done' or 'fi' cuddled with ")" to close subshell?
+/done.*)/bclose
+/fi.*)/bclose
+bcheckchain
+
+# found nested multi-line "(...\n...)" -- pass through untouched
+:nest
+x
+:nestslurp
+n
+# closing ")" on own line -- stop nested slurp
+/^[ ]*)/bnestclose
+# comment -- not closing ")" if in comment
+/^[ ]*#/bnestcontinue
+# "$((...))" -- arithmetic expansion; not closing ")"
+/\$(([^)][^)]*))[^)]*$/bnestcontinue
+# "$(...)" -- command substitution; not closing ")"
+/\$([^)][^)]*)[^)]*$/bnestcontinue
+# closing "...)" -- stop nested slurp
+/)/bnestclose
+:nestcontinue
+x
+bnestslurp
+:nestclose
+s/^/>>/
+# is it "))" which closes nested and parent subshells?
+/)[ ]*)/bslurp
+bcheckchain
+
+# found multi-line "{...\n...}" block -- pass through untouched
+:block
+x
+n
+# closing "}" -- stop block slurp
+/}/bcheckchain
+bblock
+
+# found closing ")" on own line -- drop "suspect" from final line of subshell
+# since that line legitimately lacks "&&" and exit subshell loop
+:closesolo
+x
+s/?!AMP?!//
+p
+x
+s/^/>/
+b
+
+# found closing "...)" -- exit subshell loop
+:close
+x
+p
+x
+s/^/>/
+b
diff --git a/t/chainlint/arithmetic-expansion.expect b/t/chainlint/arithmetic-expansion.expect
new file mode 100644
index 0000000..09457d3
--- /dev/null
+++ b/t/chainlint/arithmetic-expansion.expect
@@ -0,0 +1,9 @@
+(
+ foo &&
+ bar=$((42 + 1)) &&
+ baz
+>) &&
+(
+?!AMP?! bar=$((42 + 1))
+ baz
+>)
diff --git a/t/chainlint/arithmetic-expansion.test b/t/chainlint/arithmetic-expansion.test
new file mode 100644
index 0000000..1620696
--- /dev/null
+++ b/t/chainlint/arithmetic-expansion.test
@@ -0,0 +1,11 @@
+(
+ foo &&
+# LINT: closing ")" of $((...)) not misinterpreted as subshell-closing ")"
+ bar=$((42 + 1)) &&
+ baz
+) &&
+(
+# LINT: missing "&&" on $((...))
+ bar=$((42 + 1))
+ baz
+)
diff --git a/t/chainlint/bash-array.expect b/t/chainlint/bash-array.expect
new file mode 100644
index 0000000..c4a830d
--- /dev/null
+++ b/t/chainlint/bash-array.expect
@@ -0,0 +1,10 @@
+(
+ foo &&
+ bar=(gumbo stumbo wumbo) &&
+ baz
+>) &&
+(
+ foo &&
+ bar=${#bar[@]} &&
+ baz
+>)
diff --git a/t/chainlint/bash-array.test b/t/chainlint/bash-array.test
new file mode 100644
index 0000000..92bbb77
--- /dev/null
+++ b/t/chainlint/bash-array.test
@@ -0,0 +1,12 @@
+(
+ foo &&
+# LINT: ")" in Bash array assignment not misinterpreted as subshell-closing ")"
+ bar=(gumbo stumbo wumbo) &&
+ baz
+) &&
+(
+ foo &&
+# LINT: Bash array length operator not misinterpreted as comment
+ bar=${#bar[@]} &&
+ baz
+)
diff --git a/t/chainlint/blank-line.expect b/t/chainlint/blank-line.expect
new file mode 100644
index 0000000..3be939e
--- /dev/null
+++ b/t/chainlint/blank-line.expect
@@ -0,0 +1,4 @@
+(
+ nothing &&
+ something
+>)
diff --git a/t/chainlint/blank-line.test b/t/chainlint/blank-line.test
new file mode 100644
index 0000000..f6dd143
--- /dev/null
+++ b/t/chainlint/blank-line.test
@@ -0,0 +1,10 @@
+(
+
+ nothing &&
+
+ something
+# LINT: swallow blank lines since final _statement_ before subshell end is
+# LINT: significant to "&&"-check, not final _line_ (which might be blank)
+
+
+)
diff --git a/t/chainlint/block.expect b/t/chainlint/block.expect
new file mode 100644
index 0000000..fed7e89
--- /dev/null
+++ b/t/chainlint/block.expect
@@ -0,0 +1,12 @@
+(
+ foo &&
+ {
+ echo a
+ echo b
+ } &&
+ bar &&
+ {
+ echo c
+?!AMP?! }
+ baz
+>)
diff --git a/t/chainlint/block.test b/t/chainlint/block.test
new file mode 100644
index 0000000..d859151
--- /dev/null
+++ b/t/chainlint/block.test
@@ -0,0 +1,15 @@
+(
+# LINT: missing "&&" in block not currently detected (for consistency with
+# LINT: --chain-lint at top level and to provide escape hatch if needed)
+ foo &&
+ {
+ echo a
+ echo b
+ } &&
+ bar &&
+# LINT: missing "&&" at closing "}"
+ {
+ echo c
+ }
+ baz
+)
diff --git a/t/chainlint/broken-chain.expect b/t/chainlint/broken-chain.expect
new file mode 100644
index 0000000..55b0f42
--- /dev/null
+++ b/t/chainlint/broken-chain.expect
@@ -0,0 +1,6 @@
+(
+ foo &&
+?!AMP?! bar
+ baz &&
+ wop
+>)
diff --git a/t/chainlint/broken-chain.test b/t/chainlint/broken-chain.test
new file mode 100644
index 0000000..3cc67b6
--- /dev/null
+++ b/t/chainlint/broken-chain.test
@@ -0,0 +1,8 @@
+(
+ foo &&
+# LINT: missing "&&" from 'bar'
+ bar
+ baz &&
+# LINT: final statement before closing ")" legitimately lacks "&&"
+ wop
+)
diff --git a/t/chainlint/case.expect b/t/chainlint/case.expect
new file mode 100644
index 0000000..41f121f
--- /dev/null
+++ b/t/chainlint/case.expect
@@ -0,0 +1,19 @@
+(
+ case "$x" in
+ x) foo ;;
+ *) bar ;;
+ esac &&
+ foobar
+>) &&
+(
+ case "$x" in
+ x) foo ;;
+ *) bar ;;
+?!AMP?! esac
+ foobar
+>) &&
+(
+ case "$x" in 1) true;; esac &&
+?!AMP?! case "$y" in 2) false;; esac
+ foobar
+>)
diff --git a/t/chainlint/case.test b/t/chainlint/case.test
new file mode 100644
index 0000000..5ef6ff7
--- /dev/null
+++ b/t/chainlint/case.test
@@ -0,0 +1,23 @@
+(
+# LINT: "...)" arms in 'case' not misinterpreted as subshell-closing ")"
+ case "$x" in
+ x) foo ;;
+ *) bar ;;
+ esac &&
+ foobar
+) &&
+(
+# LINT: missing "&&" on 'esac'
+ case "$x" in
+ x) foo ;;
+ *) bar ;;
+ esac
+ foobar
+) &&
+(
+# LINT: "...)" arm in one-liner 'case' not misinterpreted as closing ")"
+ case "$x" in 1) true;; esac &&
+# LINT: same but missing "&&"
+ case "$y" in 2) false;; esac
+ foobar
+)
diff --git a/t/chainlint/close-nested-and-parent-together.expect b/t/chainlint/close-nested-and-parent-together.expect
new file mode 100644
index 0000000..2a910f9
--- /dev/null
+++ b/t/chainlint/close-nested-and-parent-together.expect
@@ -0,0 +1,4 @@
+(
+cd foo &&
+ (bar &&
+>>> baz))
diff --git a/t/chainlint/close-nested-and-parent-together.test b/t/chainlint/close-nested-and-parent-together.test
new file mode 100644
index 0000000..72d482f
--- /dev/null
+++ b/t/chainlint/close-nested-and-parent-together.test
@@ -0,0 +1,3 @@
+(cd foo &&
+ (bar &&
+ baz))
diff --git a/t/chainlint/close-subshell.expect b/t/chainlint/close-subshell.expect
new file mode 100644
index 0000000..1846887
--- /dev/null
+++ b/t/chainlint/close-subshell.expect
@@ -0,0 +1,25 @@
+(
+ foo
+>) &&
+(
+ bar
+>) >out &&
+(
+ baz
+>) 2>err &&
+(
+ boo
+>) <input &&
+(
+ bip
+>) | wuzzle &&
+(
+ bop
+>) | fazz fozz &&
+(
+ bup
+>) |
+fuzzle &&
+(
+ yop
+>)
diff --git a/t/chainlint/close-subshell.test b/t/chainlint/close-subshell.test
new file mode 100644
index 0000000..508ca44
--- /dev/null
+++ b/t/chainlint/close-subshell.test
@@ -0,0 +1,27 @@
+# LINT: closing ")" with various decorations ("&&", ">", "|", etc.)
+(
+ foo
+) &&
+(
+ bar
+) >out &&
+(
+ baz
+) 2>err &&
+(
+ boo
+) <input &&
+(
+ bip
+) | wuzzle &&
+(
+ bop
+) | fazz \
+ fozz &&
+(
+ bup
+) |
+fuzzle &&
+(
+ yop
+)
diff --git a/t/chainlint/command-substitution.expect b/t/chainlint/command-substitution.expect
new file mode 100644
index 0000000..ad4118e
--- /dev/null
+++ b/t/chainlint/command-substitution.expect
@@ -0,0 +1,9 @@
+(
+ foo &&
+ bar=$(gobble) &&
+ baz
+>) &&
+(
+?!AMP?! bar=$(gobble blocks)
+ baz
+>)
diff --git a/t/chainlint/command-substitution.test b/t/chainlint/command-substitution.test
new file mode 100644
index 0000000..3bbb002
--- /dev/null
+++ b/t/chainlint/command-substitution.test
@@ -0,0 +1,11 @@
+(
+ foo &&
+# LINT: closing ")" of $(...) not misinterpreted as subshell-closing ")"
+ bar=$(gobble) &&
+ baz
+) &&
+(
+# LINT: missing "&&" on $(...)
+ bar=$(gobble blocks)
+ baz
+)
diff --git a/t/chainlint/comment.expect b/t/chainlint/comment.expect
new file mode 100644
index 0000000..3be939e
--- /dev/null
+++ b/t/chainlint/comment.expect
@@ -0,0 +1,4 @@
+(
+ nothing &&
+ something
+>)
diff --git a/t/chainlint/comment.test b/t/chainlint/comment.test
new file mode 100644
index 0000000..113c0c4
--- /dev/null
+++ b/t/chainlint/comment.test
@@ -0,0 +1,11 @@
+(
+# LINT: swallow comment lines
+ # comment 1
+ nothing &&
+ # comment 2
+ something
+# LINT: swallow comment lines since final _statement_ before subshell end is
+# LINT: significant to "&&"-check, not final _line_ (which might be comment)
+ # comment 3
+ # comment 4
+)
diff --git a/t/chainlint/complex-if-in-cuddled-loop.expect b/t/chainlint/complex-if-in-cuddled-loop.expect
new file mode 100644
index 0000000..9674b88
--- /dev/null
+++ b/t/chainlint/complex-if-in-cuddled-loop.expect
@@ -0,0 +1,10 @@
+(
+for i in a b c; do
+ if test "$(echo $(waffle bat))" = "eleventeen" &&
+ test "$x" = "$y"; then
+ :
+ else
+ echo >file
+ fi
+> done) &&
+test ! -f file
diff --git a/t/chainlint/complex-if-in-cuddled-loop.test b/t/chainlint/complex-if-in-cuddled-loop.test
new file mode 100644
index 0000000..571bbd8
--- /dev/null
+++ b/t/chainlint/complex-if-in-cuddled-loop.test
@@ -0,0 +1,11 @@
+# LINT: 'for' loop cuddled with "(" and ")" and nested 'if' with complex
+# LINT: multi-line condition; indented with spaces, not tabs
+(for i in a b c; do
+ if test "$(echo $(waffle bat))" = "eleventeen" &&
+ test "$x" = "$y"; then
+ :
+ else
+ echo >file
+ fi
+ done) &&
+test ! -f file
diff --git a/t/chainlint/cuddled-if-then-else.expect b/t/chainlint/cuddled-if-then-else.expect
new file mode 100644
index 0000000..ab2a026
--- /dev/null
+++ b/t/chainlint/cuddled-if-then-else.expect
@@ -0,0 +1,7 @@
+(
+if test -z ""; then
+ echo empty
+ else
+ echo bizzy
+> fi) &&
+echo foobar
diff --git a/t/chainlint/cuddled-if-then-else.test b/t/chainlint/cuddled-if-then-else.test
new file mode 100644
index 0000000..eed774a
--- /dev/null
+++ b/t/chainlint/cuddled-if-then-else.test
@@ -0,0 +1,7 @@
+# LINT: 'if' cuddled with "(" and ")"; indented with spaces, not tabs
+(if test -z ""; then
+ echo empty
+ else
+ echo bizzy
+ fi) &&
+echo foobar
diff --git a/t/chainlint/cuddled-loop.expect b/t/chainlint/cuddled-loop.expect
new file mode 100644
index 0000000..8c0260d
--- /dev/null
+++ b/t/chainlint/cuddled-loop.expect
@@ -0,0 +1,5 @@
+(
+ while read x
+ do foobar bop || exit 1
+> done <file ) &&
+outside subshell
diff --git a/t/chainlint/cuddled-loop.test b/t/chainlint/cuddled-loop.test
new file mode 100644
index 0000000..a841d78
--- /dev/null
+++ b/t/chainlint/cuddled-loop.test
@@ -0,0 +1,7 @@
+# LINT: 'while' loop cuddled with "(" and ")", with embedded (allowed)
+# LINT: "|| exit {n}" to exit loop early, and using redirection "<" to feed
+# LINT: loop; indented with spaces, not tabs
+( while read x
+ do foobar bop || exit 1
+ done <file ) &&
+outside subshell
diff --git a/t/chainlint/cuddled.expect b/t/chainlint/cuddled.expect
new file mode 100644
index 0000000..b506d46
--- /dev/null
+++ b/t/chainlint/cuddled.expect
@@ -0,0 +1,21 @@
+(
+cd foo &&
+ bar
+>) &&
+
+(
+?!AMP?!cd foo
+ bar
+>) &&
+
+(
+ cd foo &&
+> bar) &&
+
+(
+cd foo &&
+> bar) &&
+
+(
+?!AMP?!cd foo
+> bar)
diff --git a/t/chainlint/cuddled.test b/t/chainlint/cuddled.test
new file mode 100644
index 0000000..0499fa4
--- /dev/null
+++ b/t/chainlint/cuddled.test
@@ -0,0 +1,23 @@
+# LINT: first subshell statement cuddled with opening "("; for implementation
+# LINT: simplicity, "(..." is split into two lines, "(" and "..."
+(cd foo &&
+ bar
+) &&
+
+# LINT: same with missing "&&"
+(cd foo
+ bar
+) &&
+
+# LINT: closing ")" cuddled with final subshell statement
+(
+ cd foo &&
+ bar) &&
+
+# LINT: "(" and ")" cuddled with first and final subshell statements
+(cd foo &&
+ bar) &&
+
+# LINT: same with missing "&&"
+(cd foo
+ bar)
diff --git a/t/chainlint/exit-loop.expect b/t/chainlint/exit-loop.expect
new file mode 100644
index 0000000..84d8bde
--- /dev/null
+++ b/t/chainlint/exit-loop.expect
@@ -0,0 +1,24 @@
+(
+ for i in a b c
+ do
+ foo || exit 1
+ bar &&
+ baz
+ done
+>) &&
+(
+ while true
+ do
+ foo || exit 1
+ bar &&
+ baz
+ done
+>) &&
+(
+ i=0 &&
+ while test $i -lt 10
+ do
+ echo $i || exit
+ i=$(($i + 1))
+ done
+>)
diff --git a/t/chainlint/exit-loop.test b/t/chainlint/exit-loop.test
new file mode 100644
index 0000000..2f03820
--- /dev/null
+++ b/t/chainlint/exit-loop.test
@@ -0,0 +1,27 @@
+(
+ for i in a b c
+ do
+# LINT: "|| exit {n}" valid for-loop escape in subshell; no "&&" needed
+ foo || exit 1
+ bar &&
+ baz
+ done
+) &&
+(
+ while true
+ do
+# LINT: "|| exit {n}" valid while-loop escape in subshell; no "&&" needed
+ foo || exit 1
+ bar &&
+ baz
+ done
+) &&
+(
+ i=0 &&
+ while test $i -lt 10
+ do
+# LINT: "|| exit" (sans exit code) valid escape in subshell; no "&&" needed
+ echo $i || exit
+ i=$(($i + 1))
+ done
+)
diff --git a/t/chainlint/exit-subshell.expect b/t/chainlint/exit-subshell.expect
new file mode 100644
index 0000000..bf78454
--- /dev/null
+++ b/t/chainlint/exit-subshell.expect
@@ -0,0 +1,5 @@
+(
+ foo || exit 1
+ bar &&
+ baz
+>)
diff --git a/t/chainlint/exit-subshell.test b/t/chainlint/exit-subshell.test
new file mode 100644
index 0000000..4e6ab69
--- /dev/null
+++ b/t/chainlint/exit-subshell.test
@@ -0,0 +1,6 @@
+(
+# LINT: "|| exit {n}" valid subshell escape without hurting &&-chain
+ foo || exit 1
+ bar &&
+ baz
+)
diff --git a/t/chainlint/for-loop.expect b/t/chainlint/for-loop.expect
new file mode 100644
index 0000000..c33cf56
--- /dev/null
+++ b/t/chainlint/for-loop.expect
@@ -0,0 +1,11 @@
+(
+ for i in a b c
+ do
+?!AMP?! echo $i
+ cat
+?!AMP?! done
+ for i in a b c; do
+ echo $i &&
+ cat $i
+ done
+>)
diff --git a/t/chainlint/for-loop.test b/t/chainlint/for-loop.test
new file mode 100644
index 0000000..7db7626
--- /dev/null
+++ b/t/chainlint/for-loop.test
@@ -0,0 +1,19 @@
+(
+# LINT: 'for', 'do', 'done' do not need "&&"
+ for i in a b c
+ do
+# LINT: missing "&&" on 'echo'
+ echo $i
+# LINT: last statement of while does not need "&&"
+ cat <<-\EOF
+ bar
+ EOF
+# LINT: missing "&&" on 'done'
+ done
+
+# LINT: 'do' on same line as 'for'
+ for i in a b c; do
+ echo $i &&
+ cat $i
+ done
+)
diff --git a/t/chainlint/here-doc.expect b/t/chainlint/here-doc.expect
new file mode 100644
index 0000000..2328fe7
--- /dev/null
+++ b/t/chainlint/here-doc.expect
@@ -0,0 +1,3 @@
+boodle wobba gorgo snoot wafta snurb &&
+
+horticulture
diff --git a/t/chainlint/here-doc.test b/t/chainlint/here-doc.test
new file mode 100644
index 0000000..bd36f6e
--- /dev/null
+++ b/t/chainlint/here-doc.test
@@ -0,0 +1,16 @@
+# LINT: stitch together incomplete \-ending lines
+# LINT: swallow here-doc to avoid false positives in content
+boodle wobba \
+ gorgo snoot \
+ wafta snurb <<EOF &&
+quoth the raven,
+nevermore...
+EOF
+
+# LINT: swallow here-doc (EOF is last line of test)
+horticulture <<\EOF
+gomez
+morticia
+wednesday
+pugsly
+EOF
diff --git a/t/chainlint/if-in-loop.expect b/t/chainlint/if-in-loop.expect
new file mode 100644
index 0000000..03d3ceb
--- /dev/null
+++ b/t/chainlint/if-in-loop.expect
@@ -0,0 +1,12 @@
+(
+ for i in a b c
+ do
+ if false
+ then
+?!AMP?! echo "err"
+ exit 1
+?!AMP?! fi
+ foo
+?!AMP?! done
+ bar
+>)
diff --git a/t/chainlint/if-in-loop.test b/t/chainlint/if-in-loop.test
new file mode 100644
index 0000000..daf22da
--- /dev/null
+++ b/t/chainlint/if-in-loop.test
@@ -0,0 +1,15 @@
+(
+ for i in a b c
+ do
+ if false
+ then
+# LINT: missing "&&" on 'echo'
+ echo "err"
+ exit 1
+# LINT: missing "&&" on 'fi'
+ fi
+ foo
+# LINT: missing "&&" on 'done'
+ done
+ bar
+)
diff --git a/t/chainlint/if-then-else.expect b/t/chainlint/if-then-else.expect
new file mode 100644
index 0000000..5953c7b
--- /dev/null
+++ b/t/chainlint/if-then-else.expect
@@ -0,0 +1,19 @@
+(
+ if test -n ""
+ then
+?!AMP?! echo very
+ echo empty
+ elif test -z ""
+ echo foo
+ else
+ echo foo &&
+ cat
+?!AMP?! fi
+ echo poodle
+>) &&
+(
+ if test -n ""; then
+ echo very &&
+?!AMP?! echo empty
+ if
+>)
diff --git a/t/chainlint/if-then-else.test b/t/chainlint/if-then-else.test
new file mode 100644
index 0000000..9bd8e9a
--- /dev/null
+++ b/t/chainlint/if-then-else.test
@@ -0,0 +1,28 @@
+(
+# LINT: 'if', 'then', 'elif', 'else', 'fi' do not need "&&"
+ if test -n ""
+ then
+# LINT: missing "&&" on 'echo'
+ echo very
+# LINT: last statement before 'elif' does not need "&&"
+ echo empty
+ elif test -z ""
+# LINT: last statement before 'else' does not need "&&"
+ echo foo
+ else
+ echo foo &&
+# LINT: last statement before 'fi' does not need "&&"
+ cat <<-\EOF
+ bar
+ EOF
+# LINT: missing "&&" on 'fi'
+ fi
+ echo poodle
+) &&
+(
+# LINT: 'then' on same line as 'if'
+ if test -n ""; then
+ echo very &&
+ echo empty
+ if
+)
diff --git a/t/chainlint/incomplete-line.expect b/t/chainlint/incomplete-line.expect
new file mode 100644
index 0000000..2f3ebab
--- /dev/null
+++ b/t/chainlint/incomplete-line.expect
@@ -0,0 +1,4 @@
+line 1 line 2 line 3 line 4 &&
+(
+ line 5 line 6 line 7 line 8
+>)
diff --git a/t/chainlint/incomplete-line.test b/t/chainlint/incomplete-line.test
new file mode 100644
index 0000000..d856658
--- /dev/null
+++ b/t/chainlint/incomplete-line.test
@@ -0,0 +1,12 @@
+# LINT: stitch together all incomplete \-ending lines
+line 1 \
+line 2 \
+line 3 \
+line 4 &&
+(
+# LINT: stitch together all incomplete \-ending lines (subshell)
+ line 5 \
+ line 6 \
+ line 7 \
+ line 8
+)
diff --git a/t/chainlint/inline-comment.expect b/t/chainlint/inline-comment.expect
new file mode 100644
index 0000000..fc9f250
--- /dev/null
+++ b/t/chainlint/inline-comment.expect
@@ -0,0 +1,9 @@
+(
+ foobar &&
+?!AMP?! barfoo
+ flibble "not a # comment"
+>) &&
+
+(
+cd foo &&
+> flibble "not a # comment")
diff --git a/t/chainlint/inline-comment.test b/t/chainlint/inline-comment.test
new file mode 100644
index 0000000..8f26856
--- /dev/null
+++ b/t/chainlint/inline-comment.test
@@ -0,0 +1,12 @@
+(
+# LINT: swallow inline comment (leaving command intact)
+ foobar && # comment 1
+# LINT: mispositioned "&&" (correctly) swallowed with comment
+ barfoo # wrong position for &&
+# LINT: "#" in string not misinterpreted as comment
+ flibble "not a # comment"
+) &&
+
+# LINT: "#" in string in cuddled subshell not misinterpreted as comment
+(cd foo &&
+ flibble "not a # comment")
diff --git a/t/chainlint/loop-in-if.expect b/t/chainlint/loop-in-if.expect
new file mode 100644
index 0000000..088e622
--- /dev/null
+++ b/t/chainlint/loop-in-if.expect
@@ -0,0 +1,12 @@
+(
+ if true
+ then
+ while true
+ do
+?!AMP?! echo "pop"
+ echo "glup"
+?!AMP?! done
+ foo
+?!AMP?! fi
+ bar
+>)
diff --git a/t/chainlint/loop-in-if.test b/t/chainlint/loop-in-if.test
new file mode 100644
index 0000000..93e8ba8
--- /dev/null
+++ b/t/chainlint/loop-in-if.test
@@ -0,0 +1,15 @@
+(
+ if true
+ then
+ while true
+ do
+# LINT: missing "&&" on 'echo'
+ echo "pop"
+ echo "glup"
+# LINT: missing "&&" on 'done'
+ done
+ foo
+# LINT: missing "&&" on 'fi'
+ fi
+ bar
+)
diff --git a/t/chainlint/multi-line-nested-command-substitution.expect b/t/chainlint/multi-line-nested-command-substitution.expect
new file mode 100644
index 0000000..19c023b
--- /dev/null
+++ b/t/chainlint/multi-line-nested-command-substitution.expect
@@ -0,0 +1,9 @@
+(
+ foo &&
+ x=$(
+ echo bar |
+ cat
+>> ) &&
+ echo ok
+>) |
+sort
diff --git a/t/chainlint/multi-line-nested-command-substitution.test b/t/chainlint/multi-line-nested-command-substitution.test
new file mode 100644
index 0000000..ca0620a
--- /dev/null
+++ b/t/chainlint/multi-line-nested-command-substitution.test
@@ -0,0 +1,9 @@
+(
+ foo &&
+ x=$(
+ echo bar |
+ cat
+ ) &&
+ echo ok
+) |
+sort
diff --git a/t/chainlint/multi-line-string.expect b/t/chainlint/multi-line-string.expect
new file mode 100644
index 0000000..8334c4c
--- /dev/null
+++ b/t/chainlint/multi-line-string.expect
@@ -0,0 +1,9 @@
+(
+ x=line 1 line 2 line 3" &&
+?!AMP?! y=line 1 line2'
+ foobar
+>) &&
+(
+ echo "there's nothing to see here" &&
+ exit
+>)
diff --git a/t/chainlint/multi-line-string.test b/t/chainlint/multi-line-string.test
new file mode 100644
index 0000000..14cb44d
--- /dev/null
+++ b/t/chainlint/multi-line-string.test
@@ -0,0 +1,15 @@
+(
+ x="line 1
+ line 2
+ line 3" &&
+# LINT: missing "&&" on assignment
+ y='line 1
+ line2'
+ foobar
+) &&
+(
+# LINT: apostrophe (in a contraction) within string not misinterpreted as
+# LINT: starting multi-line single-quoted string
+ echo "there's nothing to see here" &&
+ exit
+)
diff --git a/t/chainlint/negated-one-liner.expect b/t/chainlint/negated-one-liner.expect
new file mode 100644
index 0000000..cf18429
--- /dev/null
+++ b/t/chainlint/negated-one-liner.expect
@@ -0,0 +1,5 @@
+! (foo && bar) &&
+! (foo && bar) >baz &&
+
+?!SEMI?!! (foo; bar) &&
+?!SEMI?!! (foo; bar) >baz
diff --git a/t/chainlint/negated-one-liner.test b/t/chainlint/negated-one-liner.test
new file mode 100644
index 0000000..c9598e9
--- /dev/null
+++ b/t/chainlint/negated-one-liner.test
@@ -0,0 +1,7 @@
+# LINT: top-level one-liner subshell
+! (foo && bar) &&
+! (foo && bar) >baz &&
+
+# LINT: top-level one-liner subshell missing internal "&&"
+! (foo; bar) &&
+! (foo; bar) >baz
diff --git a/t/chainlint/nested-cuddled-subshell.expect b/t/chainlint/nested-cuddled-subshell.expect
new file mode 100644
index 0000000..c2a59ff
--- /dev/null
+++ b/t/chainlint/nested-cuddled-subshell.expect
@@ -0,0 +1,19 @@
+(
+ (cd foo &&
+ bar
+>> ) &&
+ (cd foo &&
+ bar
+?!AMP?!>> )
+ (
+ cd foo &&
+>> bar) &&
+ (
+ cd foo &&
+?!AMP?!>> bar)
+ (cd foo &&
+>> bar) &&
+ (cd foo &&
+?!AMP?!>> bar)
+ foobar
+>)
diff --git a/t/chainlint/nested-cuddled-subshell.test b/t/chainlint/nested-cuddled-subshell.test
new file mode 100644
index 0000000..8fd656c
--- /dev/null
+++ b/t/chainlint/nested-cuddled-subshell.test
@@ -0,0 +1,31 @@
+(
+# LINT: opening "(" cuddled with first nested subshell statement
+ (cd foo &&
+ bar
+ ) &&
+
+# LINT: same but "&&" missing
+ (cd foo &&
+ bar
+ )
+
+# LINT: closing ")" cuddled with final nested subshell statement
+ (
+ cd foo &&
+ bar) &&
+
+# LINT: same but "&&" missing
+ (
+ cd foo &&
+ bar)
+
+# LINT: "(" and ")" cuddled with first and final subshell statements
+ (cd foo &&
+ bar) &&
+
+# LINT: same but "&&" missing
+ (cd foo &&
+ bar)
+
+ foobar
+)
diff --git a/t/chainlint/nested-here-doc.expect b/t/chainlint/nested-here-doc.expect
new file mode 100644
index 0000000..559301e
--- /dev/null
+++ b/t/chainlint/nested-here-doc.expect
@@ -0,0 +1,5 @@
+(
+ cat &&
+?!AMP?! cat
+ foobar
+>)
diff --git a/t/chainlint/nested-here-doc.test b/t/chainlint/nested-here-doc.test
new file mode 100644
index 0000000..027e0bb
--- /dev/null
+++ b/t/chainlint/nested-here-doc.test
@@ -0,0 +1,23 @@
+(
+# LINT: inner "EOF" not misintrepreted as closing INPUT_END here-doc
+ cat <<-\INPUT_END &&
+ fish are mice
+ but geese go slow
+ data <<EOF
+ perl is lerp
+ and nothing else
+ EOF
+ toink
+ INPUT_END
+
+# LINT: same but missing "&&"
+ cat <<-\EOT
+ text goes here
+ data <<EOF
+ data goes here
+ EOF
+ more test here
+ EOT
+
+ foobar
+)
diff --git a/t/chainlint/nested-subshell-comment.expect b/t/chainlint/nested-subshell-comment.expect
new file mode 100644
index 0000000..15b68d4
--- /dev/null
+++ b/t/chainlint/nested-subshell-comment.expect
@@ -0,0 +1,11 @@
+(
+ foo &&
+ (
+ bar &&
+ # bottles wobble while fiddles gobble
+ # minor numbers of cows (or do they?)
+ baz &&
+ snaff
+?!AMP?!>> )
+ fuzzy
+>)
diff --git a/t/chainlint/nested-subshell-comment.test b/t/chainlint/nested-subshell-comment.test
new file mode 100644
index 0000000..0ff136a
--- /dev/null
+++ b/t/chainlint/nested-subshell-comment.test
@@ -0,0 +1,13 @@
+(
+ foo &&
+ (
+ bar &&
+# LINT: ")" in comment in nested subshell not misinterpreted as closing ")"
+ # bottles wobble while fiddles gobble
+ # minor numbers of cows (or do they?)
+ baz &&
+ snaff
+# LINT: missing "&&" on ')'
+ )
+ fuzzy
+)
diff --git a/t/chainlint/nested-subshell.expect b/t/chainlint/nested-subshell.expect
new file mode 100644
index 0000000..c8165ad
--- /dev/null
+++ b/t/chainlint/nested-subshell.expect
@@ -0,0 +1,12 @@
+(
+ cd foo &&
+ (
+ echo a &&
+ echo b
+>> ) >file &&
+ cd foo &&
+ (
+ echo a
+ echo b
+>> ) >file
+>)
diff --git a/t/chainlint/nested-subshell.test b/t/chainlint/nested-subshell.test
new file mode 100644
index 0000000..998b05a
--- /dev/null
+++ b/t/chainlint/nested-subshell.test
@@ -0,0 +1,14 @@
+(
+ cd foo &&
+ (
+ echo a &&
+ echo b
+ ) >file &&
+
+ cd foo &&
+ (
+# LINT: nested multi-line subshell not presently checked for missing "&&"
+ echo a
+ echo b
+ ) >file
+)
diff --git a/t/chainlint/one-liner.expect b/t/chainlint/one-liner.expect
new file mode 100644
index 0000000..237f227
--- /dev/null
+++ b/t/chainlint/one-liner.expect
@@ -0,0 +1,9 @@
+(foo && bar) &&
+(foo && bar) |
+(foo && bar) >baz &&
+
+?!SEMI?!(foo; bar) &&
+?!SEMI?!(foo; bar) |
+?!SEMI?!(foo; bar) >baz
+
+(foo "bar; baz")
diff --git a/t/chainlint/one-liner.test b/t/chainlint/one-liner.test
new file mode 100644
index 0000000..ec9acb9
--- /dev/null
+++ b/t/chainlint/one-liner.test
@@ -0,0 +1,12 @@
+# LINT: top-level one-liner subshell
+(foo && bar) &&
+(foo && bar) |
+(foo && bar) >baz &&
+
+# LINT: top-level one-liner subshell missing internal "&&"
+(foo; bar) &&
+(foo; bar) |
+(foo; bar) >baz
+
+# LINT: ";" in string not misinterpreted as broken &&-chain
+(foo "bar; baz")
diff --git a/t/chainlint/p4-filespec.expect b/t/chainlint/p4-filespec.expect
new file mode 100644
index 0000000..98b3d88
--- /dev/null
+++ b/t/chainlint/p4-filespec.expect
@@ -0,0 +1,4 @@
+(
+ p4 print -1 //depot/fiddle#42 >file &&
+ foobar
+>)
diff --git a/t/chainlint/p4-filespec.test b/t/chainlint/p4-filespec.test
new file mode 100644
index 0000000..4fd2d6e
--- /dev/null
+++ b/t/chainlint/p4-filespec.test
@@ -0,0 +1,5 @@
+(
+# LINT: Perforce revspec in filespec not misinterpreted as in-line comment
+ p4 print -1 //depot/fiddle#42 >file &&
+ foobar
+)
diff --git a/t/chainlint/pipe.expect b/t/chainlint/pipe.expect
new file mode 100644
index 0000000..211b901
--- /dev/null
+++ b/t/chainlint/pipe.expect
@@ -0,0 +1,8 @@
+(
+ foo |
+ bar |
+ baz &&
+ fish |
+?!AMP?! cow
+ sunder
+>)
diff --git a/t/chainlint/pipe.test b/t/chainlint/pipe.test
new file mode 100644
index 0000000..e6af4de
--- /dev/null
+++ b/t/chainlint/pipe.test
@@ -0,0 +1,12 @@
+(
+# LINT: no "&&" needed on line ending with "|"
+ foo |
+ bar |
+ baz &&
+
+# LINT: final line of pipe sequence ('cow') lacking "&&"
+ fish |
+ cow
+
+ sunder
+)
diff --git a/t/chainlint/semicolon.expect b/t/chainlint/semicolon.expect
new file mode 100644
index 0000000..1d79384
--- /dev/null
+++ b/t/chainlint/semicolon.expect
@@ -0,0 +1,20 @@
+(
+?!AMP?!?!SEMI?! cat foo ; echo bar
+?!SEMI?! cat foo ; echo bar
+>) &&
+(
+?!SEMI?! cat foo ; echo bar &&
+?!SEMI?! cat foo ; echo bar
+>) &&
+(
+ echo "foo; bar" &&
+?!SEMI?! cat foo; echo bar
+>) &&
+(
+?!SEMI?! foo;
+>) &&
+(
+cd foo &&
+ for i in a b c; do
+?!SEMI?! echo;
+> done)
diff --git a/t/chainlint/semicolon.test b/t/chainlint/semicolon.test
new file mode 100644
index 0000000..d82c8eb
--- /dev/null
+++ b/t/chainlint/semicolon.test
@@ -0,0 +1,25 @@
+(
+# LINT: missing internal "&&" and ending "&&"
+ cat foo ; echo bar
+# LINT: final statement before ")" only missing internal "&&"
+ cat foo ; echo bar
+) &&
+(
+# LINT: missing internal "&&"
+ cat foo ; echo bar &&
+ cat foo ; echo bar
+) &&
+(
+# LINT: not fooled by semicolon in string
+ echo "foo; bar" &&
+ cat foo; echo bar
+) &&
+(
+# LINT: unnecessary terminating semicolon
+ foo;
+) &&
+(cd foo &&
+ for i in a b c; do
+# LINT: unnecessary terminating semicolon
+ echo;
+ done)
diff --git a/t/chainlint/subshell-here-doc.expect b/t/chainlint/subshell-here-doc.expect
new file mode 100644
index 0000000..19d5aff
--- /dev/null
+++ b/t/chainlint/subshell-here-doc.expect
@@ -0,0 +1,5 @@
+(
+ echo wobba gorgo snoot wafta snurb &&
+?!AMP?! cat >bip
+ echo >bop
+>)
diff --git a/t/chainlint/subshell-here-doc.test b/t/chainlint/subshell-here-doc.test
new file mode 100644
index 0000000..9c3564c
--- /dev/null
+++ b/t/chainlint/subshell-here-doc.test
@@ -0,0 +1,23 @@
+(
+# LINT: stitch together incomplete \-ending lines
+# LINT: swallow here-doc to avoid false positives in content
+ echo wobba \
+ gorgo snoot \
+ wafta snurb <<-EOF &&
+ quoth the raven,
+ nevermore...
+ EOF
+
+# LINT: missing "&&" on 'cat'
+ cat <<EOF >bip
+ fish fly high
+ EOF
+
+# LINT: swallow here-doc (EOF is last line of subshell)
+ echo <<-\EOF >bop
+ gomez
+ morticia
+ wednesday
+ pugsly
+ EOF
+)
diff --git a/t/chainlint/subshell-one-liner.expect b/t/chainlint/subshell-one-liner.expect
new file mode 100644
index 0000000..5116282
--- /dev/null
+++ b/t/chainlint/subshell-one-liner.expect
@@ -0,0 +1,14 @@
+(
+ (foo && bar) &&
+ (foo && bar) |
+ (foo && bar) >baz &&
+?!SEMI?! (foo; bar) &&
+?!SEMI?! (foo; bar) |
+?!SEMI?! (foo; bar) >baz &&
+ (foo || exit 1) &&
+ (foo || exit 1) |
+ (foo || exit 1) >baz &&
+?!AMP?! (foo && bar)
+?!AMP?!?!SEMI?! (foo && bar; baz)
+ foobar
+>)
diff --git a/t/chainlint/subshell-one-liner.test b/t/chainlint/subshell-one-liner.test
new file mode 100644
index 0000000..37fa643
--- /dev/null
+++ b/t/chainlint/subshell-one-liner.test
@@ -0,0 +1,24 @@
+(
+# LINT: nested one-liner subshell
+ (foo && bar) &&
+ (foo && bar) |
+ (foo && bar) >baz &&
+
+# LINT: nested one-liner subshell missing internal "&&"
+ (foo; bar) &&
+ (foo; bar) |
+ (foo; bar) >baz &&
+
+# LINT: nested one-liner subshell with "|| exit"
+ (foo || exit 1) &&
+ (foo || exit 1) |
+ (foo || exit 1) >baz &&
+
+# LINT: nested one-liner subshell lacking ending "&&"
+ (foo && bar)
+
+# LINT: nested one-liner subshell missing internal "&&" and lacking ending "&&"
+ (foo && bar; baz)
+
+ foobar
+)
diff --git a/t/chainlint/while-loop.expect b/t/chainlint/while-loop.expect
new file mode 100644
index 0000000..13cff2c
--- /dev/null
+++ b/t/chainlint/while-loop.expect
@@ -0,0 +1,11 @@
+(
+ while true
+ do
+?!AMP?! echo foo
+ cat
+?!AMP?! done
+ while true; do
+ echo foo &&
+ cat bar
+ done
+>)
diff --git a/t/chainlint/while-loop.test b/t/chainlint/while-loop.test
new file mode 100644
index 0000000..f1df085
--- /dev/null
+++ b/t/chainlint/while-loop.test
@@ -0,0 +1,19 @@
+(
+# LINT: 'while, 'do', 'done' do not need "&&"
+ while true
+ do
+# LINT: missing "&&" on 'echo'
+ echo foo
+# LINT: last statement of while does not need "&&"
+ cat <<-\EOF
+ bar
+ EOF
+# LINT: missing "&&" on 'done'
+ done
+
+# LINT: 'do' on same line as 'while'
+ while true; do
+ echo foo &&
+ cat bar
+ done
+)
diff --git a/t/helper/test-drop-caches.c b/t/helper/test-drop-caches.c
index d6bcfdd..f65e301 100644
--- a/t/helper/test-drop-caches.c
+++ b/t/helper/test-drop-caches.c
@@ -16,8 +16,8 @@ static int cmd_sync(void)
if ((0 == dwRet) || (dwRet > MAX_PATH))
return error("Error getting current directory");
- if ((Buffer[0] < 'A') || (Buffer[0] > 'Z'))
- return error("Invalid drive letter '%c'", Buffer[0]);
+ if (!has_dos_drive_prefix(Buffer))
+ return error("'%s': invalid drive letter", Buffer);
szVolumeAccessPath[4] = Buffer[0];
hVolWrite = CreateFile(szVolumeAccessPath, GENERIC_READ | GENERIC_WRITE,
diff --git a/t/helper/test-json-writer.c b/t/helper/test-json-writer.c
new file mode 100644
index 0000000..37c4525
--- /dev/null
+++ b/t/helper/test-json-writer.c
@@ -0,0 +1,565 @@
+#include "test-tool.h"
+#include "cache.h"
+#include "json-writer.h"
+
+static const char *expect_obj1 = "{\"a\":\"abc\",\"b\":42,\"c\":true}";
+static const char *expect_obj2 = "{\"a\":-1,\"b\":2147483647,\"c\":0}";
+static const char *expect_obj3 = "{\"a\":0,\"b\":4294967295,\"c\":9223372036854775807}";
+static const char *expect_obj4 = "{\"t\":true,\"f\":false,\"n\":null}";
+static const char *expect_obj5 = "{\"abc\\tdef\":\"abc\\\\def\"}";
+static const char *expect_obj6 = "{\"a\":3.14}";
+
+static const char *pretty_obj1 = ("{\n"
+ " \"a\": \"abc\",\n"
+ " \"b\": 42,\n"
+ " \"c\": true\n"
+ "}");
+static const char *pretty_obj2 = ("{\n"
+ " \"a\": -1,\n"
+ " \"b\": 2147483647,\n"
+ " \"c\": 0\n"
+ "}");
+static const char *pretty_obj3 = ("{\n"
+ " \"a\": 0,\n"
+ " \"b\": 4294967295,\n"
+ " \"c\": 9223372036854775807\n"
+ "}");
+static const char *pretty_obj4 = ("{\n"
+ " \"t\": true,\n"
+ " \"f\": false,\n"
+ " \"n\": null\n"
+ "}");
+
+static struct json_writer obj1 = JSON_WRITER_INIT;
+static struct json_writer obj2 = JSON_WRITER_INIT;
+static struct json_writer obj3 = JSON_WRITER_INIT;
+static struct json_writer obj4 = JSON_WRITER_INIT;
+static struct json_writer obj5 = JSON_WRITER_INIT;
+static struct json_writer obj6 = JSON_WRITER_INIT;
+
+static void make_obj1(int pretty)
+{
+ jw_object_begin(&obj1, pretty);
+ {
+ jw_object_string(&obj1, "a", "abc");
+ jw_object_intmax(&obj1, "b", 42);
+ jw_object_true(&obj1, "c");
+ }
+ jw_end(&obj1);
+}
+
+static void make_obj2(int pretty)
+{
+ jw_object_begin(&obj2, pretty);
+ {
+ jw_object_intmax(&obj2, "a", -1);
+ jw_object_intmax(&obj2, "b", 0x7fffffff);
+ jw_object_intmax(&obj2, "c", 0);
+ }
+ jw_end(&obj2);
+}
+
+static void make_obj3(int pretty)
+{
+ jw_object_begin(&obj3, pretty);
+ {
+ jw_object_intmax(&obj3, "a", 0);
+ jw_object_intmax(&obj3, "b", 0xffffffff);
+ jw_object_intmax(&obj3, "c", 0x7fffffffffffffffULL);
+ }
+ jw_end(&obj3);
+}
+
+static void make_obj4(int pretty)
+{
+ jw_object_begin(&obj4, pretty);
+ {
+ jw_object_true(&obj4, "t");
+ jw_object_false(&obj4, "f");
+ jw_object_null(&obj4, "n");
+ }
+ jw_end(&obj4);
+}
+
+static void make_obj5(int pretty)
+{
+ jw_object_begin(&obj5, pretty);
+ {
+ jw_object_string(&obj5, "abc" "\x09" "def", "abc" "\\" "def");
+ }
+ jw_end(&obj5);
+}
+
+static void make_obj6(int pretty)
+{
+ jw_object_begin(&obj6, pretty);
+ {
+ jw_object_double(&obj6, "a", 2, 3.14159);
+ }
+ jw_end(&obj6);
+}
+
+static const char *expect_arr1 = "[\"abc\",42,true]";
+static const char *expect_arr2 = "[-1,2147483647,0]";
+static const char *expect_arr3 = "[0,4294967295,9223372036854775807]";
+static const char *expect_arr4 = "[true,false,null]";
+
+static const char *pretty_arr1 = ("[\n"
+ " \"abc\",\n"
+ " 42,\n"
+ " true\n"
+ "]");
+static const char *pretty_arr2 = ("[\n"
+ " -1,\n"
+ " 2147483647,\n"
+ " 0\n"
+ "]");
+static const char *pretty_arr3 = ("[\n"
+ " 0,\n"
+ " 4294967295,\n"
+ " 9223372036854775807\n"
+ "]");
+static const char *pretty_arr4 = ("[\n"
+ " true,\n"
+ " false,\n"
+ " null\n"
+ "]");
+
+static struct json_writer arr1 = JSON_WRITER_INIT;
+static struct json_writer arr2 = JSON_WRITER_INIT;
+static struct json_writer arr3 = JSON_WRITER_INIT;
+static struct json_writer arr4 = JSON_WRITER_INIT;
+
+static void make_arr1(int pretty)
+{
+ jw_array_begin(&arr1, pretty);
+ {
+ jw_array_string(&arr1, "abc");
+ jw_array_intmax(&arr1, 42);
+ jw_array_true(&arr1);
+ }
+ jw_end(&arr1);
+}
+
+static void make_arr2(int pretty)
+{
+ jw_array_begin(&arr2, pretty);
+ {
+ jw_array_intmax(&arr2, -1);
+ jw_array_intmax(&arr2, 0x7fffffff);
+ jw_array_intmax(&arr2, 0);
+ }
+ jw_end(&arr2);
+}
+
+static void make_arr3(int pretty)
+{
+ jw_array_begin(&arr3, pretty);
+ {
+ jw_array_intmax(&arr3, 0);
+ jw_array_intmax(&arr3, 0xffffffff);
+ jw_array_intmax(&arr3, 0x7fffffffffffffffULL);
+ }
+ jw_end(&arr3);
+}
+
+static void make_arr4(int pretty)
+{
+ jw_array_begin(&arr4, pretty);
+ {
+ jw_array_true(&arr4);
+ jw_array_false(&arr4);
+ jw_array_null(&arr4);
+ }
+ jw_end(&arr4);
+}
+
+static char *expect_nest1 =
+ "{\"obj1\":{\"a\":\"abc\",\"b\":42,\"c\":true},\"arr1\":[\"abc\",42,true]}";
+
+static struct json_writer nest1 = JSON_WRITER_INIT;
+
+static void make_nest1(int pretty)
+{
+ jw_object_begin(&nest1, pretty);
+ {
+ jw_object_sub_jw(&nest1, "obj1", &obj1);
+ jw_object_sub_jw(&nest1, "arr1", &arr1);
+ }
+ jw_end(&nest1);
+}
+
+static char *expect_inline1 =
+ "{\"obj1\":{\"a\":\"abc\",\"b\":42,\"c\":true},\"arr1\":[\"abc\",42,true]}";
+
+static char *pretty_inline1 =
+ ("{\n"
+ " \"obj1\": {\n"
+ " \"a\": \"abc\",\n"
+ " \"b\": 42,\n"
+ " \"c\": true\n"
+ " },\n"
+ " \"arr1\": [\n"
+ " \"abc\",\n"
+ " 42,\n"
+ " true\n"
+ " ]\n"
+ "}");
+
+static struct json_writer inline1 = JSON_WRITER_INIT;
+
+static void make_inline1(int pretty)
+{
+ jw_object_begin(&inline1, pretty);
+ {
+ jw_object_inline_begin_object(&inline1, "obj1");
+ {
+ jw_object_string(&inline1, "a", "abc");
+ jw_object_intmax(&inline1, "b", 42);
+ jw_object_true(&inline1, "c");
+ }
+ jw_end(&inline1);
+ jw_object_inline_begin_array(&inline1, "arr1");
+ {
+ jw_array_string(&inline1, "abc");
+ jw_array_intmax(&inline1, 42);
+ jw_array_true(&inline1);
+ }
+ jw_end(&inline1);
+ }
+ jw_end(&inline1);
+}
+
+static char *expect_inline2 =
+ "[[1,2],[3,4],{\"a\":\"abc\"}]";
+
+static char *pretty_inline2 =
+ ("[\n"
+ " [\n"
+ " 1,\n"
+ " 2\n"
+ " ],\n"
+ " [\n"
+ " 3,\n"
+ " 4\n"
+ " ],\n"
+ " {\n"
+ " \"a\": \"abc\"\n"
+ " }\n"
+ "]");
+
+static struct json_writer inline2 = JSON_WRITER_INIT;
+
+static void make_inline2(int pretty)
+{
+ jw_array_begin(&inline2, pretty);
+ {
+ jw_array_inline_begin_array(&inline2);
+ {
+ jw_array_intmax(&inline2, 1);
+ jw_array_intmax(&inline2, 2);
+ }
+ jw_end(&inline2);
+ jw_array_inline_begin_array(&inline2);
+ {
+ jw_array_intmax(&inline2, 3);
+ jw_array_intmax(&inline2, 4);
+ }
+ jw_end(&inline2);
+ jw_array_inline_begin_object(&inline2);
+ {
+ jw_object_string(&inline2, "a", "abc");
+ }
+ jw_end(&inline2);
+ }
+ jw_end(&inline2);
+}
+
+/*
+ * When super is compact, we expect subs to be compacted (even if originally
+ * pretty).
+ */
+static const char *expect_mixed1 =
+ ("{\"obj1\":{\"a\":\"abc\",\"b\":42,\"c\":true},"
+ "\"arr1\":[\"abc\",42,true]}");
+
+/*
+ * When super is pretty, a compact sub (obj1) is kept compact and a pretty
+ * sub (arr1) is re-indented.
+ */
+static const char *pretty_mixed1 =
+ ("{\n"
+ " \"obj1\": {\"a\":\"abc\",\"b\":42,\"c\":true},\n"
+ " \"arr1\": [\n"
+ " \"abc\",\n"
+ " 42,\n"
+ " true\n"
+ " ]\n"
+ "}");
+
+static struct json_writer mixed1 = JSON_WRITER_INIT;
+
+static void make_mixed1(int pretty)
+{
+ jw_init(&obj1);
+ jw_init(&arr1);
+
+ make_obj1(0); /* obj1 is compact */
+ make_arr1(1); /* arr1 is pretty */
+
+ jw_object_begin(&mixed1, pretty);
+ {
+ jw_object_sub_jw(&mixed1, "obj1", &obj1);
+ jw_object_sub_jw(&mixed1, "arr1", &arr1);
+ }
+ jw_end(&mixed1);
+}
+
+static void cmp(const char *test, const struct json_writer *jw, const char *exp)
+{
+ if (!strcmp(jw->json.buf, exp))
+ return;
+
+ printf("error[%s]: observed '%s' expected '%s'\n",
+ test, jw->json.buf, exp);
+ exit(1);
+}
+
+#define t(v) do { make_##v(0); cmp(#v, &v, expect_##v); } while (0)
+#define p(v) do { make_##v(1); cmp(#v, &v, pretty_##v); } while (0)
+
+/*
+ * Run some basic regression tests with some known patterns.
+ * These tests also demonstrate how to use the jw_ API.
+ */
+static int unit_tests(void)
+{
+ /* comptact (canonical) forms */
+ t(obj1);
+ t(obj2);
+ t(obj3);
+ t(obj4);
+ t(obj5);
+ t(obj6);
+
+ t(arr1);
+ t(arr2);
+ t(arr3);
+ t(arr4);
+
+ t(nest1);
+
+ t(inline1);
+ t(inline2);
+
+ jw_init(&obj1);
+ jw_init(&obj2);
+ jw_init(&obj3);
+ jw_init(&obj4);
+
+ jw_init(&arr1);
+ jw_init(&arr2);
+ jw_init(&arr3);
+ jw_init(&arr4);
+
+ jw_init(&inline1);
+ jw_init(&inline2);
+
+ /* pretty forms */
+ p(obj1);
+ p(obj2);
+ p(obj3);
+ p(obj4);
+
+ p(arr1);
+ p(arr2);
+ p(arr3);
+ p(arr4);
+
+ p(inline1);
+ p(inline2);
+
+ /* mixed forms */
+ t(mixed1);
+ jw_init(&mixed1);
+ p(mixed1);
+
+ return 0;
+}
+
+static void get_s(int line_nr, char **s_in)
+{
+ *s_in = strtok(NULL, " ");
+ if (!*s_in)
+ die("line[%d]: expected: <s>", line_nr);
+}
+
+static void get_i(int line_nr, intmax_t *s_in)
+{
+ char *s;
+ char *endptr;
+
+ get_s(line_nr, &s);
+
+ *s_in = strtol(s, &endptr, 10);
+ if (*endptr || errno == ERANGE)
+ die("line[%d]: invalid integer value", line_nr);
+}
+
+static void get_d(int line_nr, double *s_in)
+{
+ char *s;
+ char *endptr;
+
+ get_s(line_nr, &s);
+
+ *s_in = strtod(s, &endptr);
+ if (*endptr || errno == ERANGE)
+ die("line[%d]: invalid float value", line_nr);
+}
+
+static int pretty;
+
+#define MAX_LINE_LENGTH (64 * 1024)
+
+static char *get_trimmed_line(char *buf, int buf_size)
+{
+ int len;
+
+ if (!fgets(buf, buf_size, stdin))
+ return NULL;
+
+ len = strlen(buf);
+ while (len > 0) {
+ char c = buf[len - 1];
+ if (c == '\n' || c == '\r' || c == ' ' || c == '\t')
+ buf[--len] = 0;
+ else
+ break;
+ }
+
+ while (*buf == ' ' || *buf == '\t')
+ buf++;
+
+ return buf;
+}
+
+static int scripted(void)
+{
+ struct json_writer jw = JSON_WRITER_INIT;
+ char buf[MAX_LINE_LENGTH];
+ char *line;
+ int line_nr = 0;
+
+ line = get_trimmed_line(buf, MAX_LINE_LENGTH);
+ if (!line)
+ return 0;
+
+ if (!strcmp(line, "object"))
+ jw_object_begin(&jw, pretty);
+ else if (!strcmp(line, "array"))
+ jw_array_begin(&jw, pretty);
+ else
+ die("expected first line to be 'object' or 'array'");
+
+ while ((line = get_trimmed_line(buf, MAX_LINE_LENGTH)) != NULL) {
+ char *verb;
+ char *key;
+ char *s_value;
+ intmax_t i_value;
+ double d_value;
+
+ line_nr++;
+
+ verb = strtok(line, " ");
+
+ if (!strcmp(verb, "end")) {
+ jw_end(&jw);
+ }
+ else if (!strcmp(verb, "object-string")) {
+ get_s(line_nr, &key);
+ get_s(line_nr, &s_value);
+ jw_object_string(&jw, key, s_value);
+ }
+ else if (!strcmp(verb, "object-int")) {
+ get_s(line_nr, &key);
+ get_i(line_nr, &i_value);
+ jw_object_intmax(&jw, key, i_value);
+ }
+ else if (!strcmp(verb, "object-double")) {
+ get_s(line_nr, &key);
+ get_i(line_nr, &i_value);
+ get_d(line_nr, &d_value);
+ jw_object_double(&jw, key, i_value, d_value);
+ }
+ else if (!strcmp(verb, "object-true")) {
+ get_s(line_nr, &key);
+ jw_object_true(&jw, key);
+ }
+ else if (!strcmp(verb, "object-false")) {
+ get_s(line_nr, &key);
+ jw_object_false(&jw, key);
+ }
+ else if (!strcmp(verb, "object-null")) {
+ get_s(line_nr, &key);
+ jw_object_null(&jw, key);
+ }
+ else if (!strcmp(verb, "object-object")) {
+ get_s(line_nr, &key);
+ jw_object_inline_begin_object(&jw, key);
+ }
+ else if (!strcmp(verb, "object-array")) {
+ get_s(line_nr, &key);
+ jw_object_inline_begin_array(&jw, key);
+ }
+ else if (!strcmp(verb, "array-string")) {
+ get_s(line_nr, &s_value);
+ jw_array_string(&jw, s_value);
+ }
+ else if (!strcmp(verb, "array-int")) {
+ get_i(line_nr, &i_value);
+ jw_array_intmax(&jw, i_value);
+ }
+ else if (!strcmp(verb, "array-double")) {
+ get_i(line_nr, &i_value);
+ get_d(line_nr, &d_value);
+ jw_array_double(&jw, i_value, d_value);
+ }
+ else if (!strcmp(verb, "array-true"))
+ jw_array_true(&jw);
+ else if (!strcmp(verb, "array-false"))
+ jw_array_false(&jw);
+ else if (!strcmp(verb, "array-null"))
+ jw_array_null(&jw);
+ else if (!strcmp(verb, "array-object"))
+ jw_array_inline_begin_object(&jw);
+ else if (!strcmp(verb, "array-array"))
+ jw_array_inline_begin_array(&jw);
+ else
+ die("unrecognized token: '%s'", verb);
+ }
+
+ if (!jw_is_terminated(&jw))
+ die("json not terminated: '%s'", jw.json.buf);
+
+ printf("%s\n", jw.json.buf);
+
+ strbuf_release(&jw.json);
+ return 0;
+}
+
+int cmd__json_writer(int argc, const char **argv)
+{
+ argc--; /* skip over "json-writer" arg */
+ argv++;
+
+ if (argc > 0 && argv[0][0] == '-') {
+ if (!strcmp(argv[0], "-u") || !strcmp(argv[0], "--unit"))
+ return unit_tests();
+
+ if (!strcmp(argv[0], "-p") || !strcmp(argv[0], "--pretty"))
+ pretty = 1;
+ }
+
+ return scripted();
+}
diff --git a/t/helper/test-repository.c b/t/helper/test-repository.c
new file mode 100644
index 0000000..2762ca6
--- /dev/null
+++ b/t/helper/test-repository.c
@@ -0,0 +1,82 @@
+#include "test-tool.h"
+#include "cache.h"
+#include "commit-graph.h"
+#include "commit.h"
+#include "config.h"
+#include "object-store.h"
+#include "object.h"
+#include "repository.h"
+#include "tree.h"
+
+static void test_parse_commit_in_graph(const char *gitdir, const char *worktree,
+ const struct object_id *commit_oid)
+{
+ struct repository r;
+ struct commit *c;
+ struct commit_list *parent;
+
+ repo_init(&r, gitdir, worktree);
+
+ c = lookup_commit(&r, commit_oid);
+
+ if (!parse_commit_in_graph(&r, c))
+ die("Couldn't parse commit");
+
+ printf("%"PRItime, c->date);
+ for (parent = c->parents; parent; parent = parent->next)
+ printf(" %s", oid_to_hex(&parent->item->object.oid));
+ printf("\n");
+
+ repo_clear(&r);
+}
+
+static void test_get_commit_tree_in_graph(const char *gitdir,
+ const char *worktree,
+ const struct object_id *commit_oid)
+{
+ struct repository r;
+ struct commit *c;
+ struct tree *tree;
+
+ repo_init(&r, gitdir, worktree);
+
+ c = lookup_commit(&r, commit_oid);
+
+ /*
+ * get_commit_tree_in_graph does not automatically parse the commit, so
+ * parse it first.
+ */
+ if (!parse_commit_in_graph(&r, c))
+ die("Couldn't parse commit");
+ tree = get_commit_tree_in_graph(&r, c);
+ if (!tree)
+ die("Couldn't get commit tree");
+
+ printf("%s\n", oid_to_hex(&tree->object.oid));
+
+ repo_clear(&r);
+}
+
+int cmd__repository(int argc, const char **argv)
+{
+ if (argc < 2)
+ die("must have at least 2 arguments");
+ if (!strcmp(argv[1], "parse_commit_in_graph")) {
+ struct object_id oid;
+ if (argc < 5)
+ die("not enough arguments");
+ if (parse_oid_hex(argv[4], &oid, &argv[4]))
+ die("cannot parse oid '%s'", argv[4]);
+ test_parse_commit_in_graph(argv[2], argv[3], &oid);
+ } else if (!strcmp(argv[1], "get_commit_tree_in_graph")) {
+ struct object_id oid;
+ if (argc < 5)
+ die("not enough arguments");
+ if (parse_oid_hex(argv[4], &oid, &argv[4]))
+ die("cannot parse oid '%s'", argv[4]);
+ test_get_commit_tree_in_graph(argv[2], argv[3], &oid);
+ } else {
+ die("unrecognized '%s'", argv[1]);
+ }
+ return 0;
+}
diff --git a/t/helper/test-tool.c b/t/helper/test-tool.c
index 805a45d..0edafcf 100644
--- a/t/helper/test-tool.c
+++ b/t/helper/test-tool.c
@@ -19,6 +19,7 @@ static struct test_cmd cmds[] = {
{ "genrandom", cmd__genrandom },
{ "hashmap", cmd__hashmap },
{ "index-version", cmd__index_version },
+ { "json-writer", cmd__json_writer },
{ "lazy-init-name-hash", cmd__lazy_init_name_hash },
{ "match-trees", cmd__match_trees },
{ "mergesort", cmd__mergesort },
@@ -29,6 +30,7 @@ static struct test_cmd cmds[] = {
{ "read-cache", cmd__read_cache },
{ "ref-store", cmd__ref_store },
{ "regex", cmd__regex },
+ { "repository", cmd__repository },
{ "revision-walking", cmd__revision_walking },
{ "run-command", cmd__run_command },
{ "scrap-cache-tree", cmd__scrap_cache_tree },
diff --git a/t/helper/test-tool.h b/t/helper/test-tool.h
index 7116ddf..e926c41 100644
--- a/t/helper/test-tool.h
+++ b/t/helper/test-tool.h
@@ -13,6 +13,7 @@ int cmd__example_decorate(int argc, const char **argv);
int cmd__genrandom(int argc, const char **argv);
int cmd__hashmap(int argc, const char **argv);
int cmd__index_version(int argc, const char **argv);
+int cmd__json_writer(int argc, const char **argv);
int cmd__lazy_init_name_hash(int argc, const char **argv);
int cmd__match_trees(int argc, const char **argv);
int cmd__mergesort(int argc, const char **argv);
@@ -23,6 +24,7 @@ int cmd__prio_queue(int argc, const char **argv);
int cmd__read_cache(int argc, const char **argv);
int cmd__ref_store(int argc, const char **argv);
int cmd__regex(int argc, const char **argv);
+int cmd__repository(int argc, const char **argv);
int cmd__revision_walking(int argc, const char **argv);
int cmd__run_command(int argc, const char **argv);
int cmd__scrap_cache_tree(int argc, const char **argv);
diff --git a/t/lib-gpg.sh b/t/lib-gpg.sh
index a5d3b2c..3fe0287 100755
--- a/t/lib-gpg.sh
+++ b/t/lib-gpg.sh
@@ -38,7 +38,33 @@ then
"$TEST_DIRECTORY"/lib-gpg/ownertrust &&
gpg --homedir "${GNUPGHOME}" </dev/null >/dev/null 2>&1 \
--sign -u committer@example.com &&
- test_set_prereq GPG
+ test_set_prereq GPG &&
+ # Available key info:
+ # * see t/lib-gpg/gpgsm-gen-key.in
+ # To generate new certificate:
+ # * no passphrase
+ # gpgsm --homedir /tmp/gpghome/ \
+ # -o /tmp/gpgsm.crt.user \
+ # --generate-key \
+ # --batch t/lib-gpg/gpgsm-gen-key.in
+ # To import certificate:
+ # gpgsm --homedir /tmp/gpghome/ \
+ # --import /tmp/gpgsm.crt.user
+ # To export into a .p12 we can later import:
+ # gpgsm --homedir /tmp/gpghome/ \
+ # -o t/lib-gpg/gpgsm_cert.p12 \
+ # --export-secret-key-p12 "committer@example.com"
+ echo | gpgsm --homedir "${GNUPGHOME}" 2>/dev/null \
+ --passphrase-fd 0 --pinentry-mode loopback \
+ --import "$TEST_DIRECTORY"/lib-gpg/gpgsm_cert.p12 &&
+ gpgsm --homedir "${GNUPGHOME}" 2>/dev/null -K \
+ | grep fingerprint: | cut -d" " -f4 | tr -d '\n' > \
+ ${GNUPGHOME}/trustlist.txt &&
+ echo " S relax" >> ${GNUPGHOME}/trustlist.txt &&
+ (gpgconf --kill gpg-agent >/dev/null 2>&1 || : ) &&
+ echo hello | gpgsm --homedir "${GNUPGHOME}" >/dev/null \
+ -u committer@example.com -o /dev/null --sign - 2>&1 &&
+ test_set_prereq GPGSM
;;
esac
fi
diff --git a/t/lib-gpg/gpgsm-gen-key.in b/t/lib-gpg/gpgsm-gen-key.in
new file mode 100644
index 0000000..a7fd87c
--- /dev/null
+++ b/t/lib-gpg/gpgsm-gen-key.in
@@ -0,0 +1,8 @@
+Key-Type: RSA
+Key-Length: 2048
+Key-Usage: sign
+Serial: random
+Name-DN: CN=C O Mitter, O=Example, SN=C O, GN=Mitter
+Name-Email: committer@example.com
+Not-Before: 1970-01-01 00:00:00
+Not-After: 3000-01-01 00:00:00
diff --git a/t/lib-gpg/gpgsm_cert.p12 b/t/lib-gpg/gpgsm_cert.p12
new file mode 100644
index 0000000..94ffad0
--- /dev/null
+++ b/t/lib-gpg/gpgsm_cert.p12
Binary files differ
diff --git a/t/lib-httpd.sh b/t/lib-httpd.sh
index ed41b15..a8729f8 100644
--- a/t/lib-httpd.sh
+++ b/t/lib-httpd.sh
@@ -288,3 +288,24 @@ expect_askpass() {
test_cmp "$TRASH_DIRECTORY/askpass-expect" \
"$TRASH_DIRECTORY/askpass-query"
}
+
+strip_access_log() {
+ sed -e "
+ s/^.* \"//
+ s/\"//
+ s/ [1-9][0-9]*\$//
+ s/^GET /GET /
+ " "$HTTPD_ROOT_PATH"/access.log
+}
+
+# Requires one argument: the name of a file containing the expected stripped
+# access log entries.
+check_access_log() {
+ sort "$1" >"$1".sorted &&
+ strip_access_log >access.log.stripped &&
+ sort access.log.stripped >access.log.sorted &&
+ if ! test_cmp "$1".sorted access.log.sorted
+ then
+ test_cmp "$1" access.log.stripped
+ fi
+}
diff --git a/t/lib-submodule-update.sh b/t/lib-submodule-update.sh
index be78cdc..5b56b23 100755
--- a/t/lib-submodule-update.sh
+++ b/t/lib-submodule-update.sh
@@ -756,7 +756,7 @@ test_submodule_recursing_with_args_common() {
: >sub1/untrackedfile &&
test_must_fail $command replace_sub1_with_file &&
test_superproject_content origin/add_sub1 &&
- test_submodule_content sub1 origin/add_sub1
+ test_submodule_content sub1 origin/add_sub1 &&
test -f sub1/untracked_file
)
'
@@ -844,7 +844,7 @@ test_submodule_switch_recursing_with_args () {
cd submodule_update &&
git branch -t add_sub1 origin/add_sub1 &&
: >sub1 &&
- echo sub1 >.git/info/exclude
+ echo sub1 >.git/info/exclude &&
$command add_sub1 &&
test_superproject_content origin/add_sub1 &&
test_submodule_content sub1 origin/add_sub1
@@ -971,7 +971,6 @@ test_submodule_forced_switch_recursing_with_args () {
rm -rf .git/modules/sub1 &&
$command replace_sub1_with_directory &&
test_superproject_content origin/replace_sub1_with_directory &&
- test_submodule_content sub1 origin/modify_sub1
test_git_directory_exists sub1
)
'
diff --git a/t/t0000-basic.sh b/t/t0000-basic.sh
index af61d08..34859fe 100755
--- a/t/t0000-basic.sh
+++ b/t/t0000-basic.sh
@@ -1081,7 +1081,7 @@ test_expect_success 'very long name in the index handled sanely' '
(
git ls-files -s path4 |
sed -e "s/ .*/ /" |
- tr -d "\012"
+ tr -d "\012" &&
echo "$a"
) | git update-index --index-info &&
len=$(git ls-files "a*" | wc -c) &&
diff --git a/t/t0001-init.sh b/t/t0001-init.sh
index 4c86505..ca85aae 100755
--- a/t/t0001-init.sh
+++ b/t/t0001-init.sh
@@ -408,7 +408,7 @@ is_hidden () {
test_expect_success MINGW '.git hidden' '
rm -rf newdir &&
(
- unset GIT_DIR GIT_WORK_TREE
+ sane_unset GIT_DIR GIT_WORK_TREE &&
mkdir newdir &&
cd newdir &&
git init &&
@@ -420,7 +420,7 @@ test_expect_success MINGW '.git hidden' '
test_expect_success MINGW 'bare git dir not hidden' '
rm -rf newdir &&
(
- unset GIT_DIR GIT_WORK_TREE GIT_CONFIG
+ sane_unset GIT_DIR GIT_WORK_TREE GIT_CONFIG &&
mkdir newdir &&
cd newdir &&
git --bare init
diff --git a/t/t0003-attributes.sh b/t/t0003-attributes.sh
index f19ae4f..5c37c2e 100755
--- a/t/t0003-attributes.sh
+++ b/t/t0003-attributes.sh
@@ -34,15 +34,15 @@ test_expect_success 'open-quoted pathname' '
test_expect_success 'setup' '
mkdir -p a/b/d a/c b &&
(
- echo "[attr]notest !test"
- echo "\" d \" test=d"
- echo " e test=e"
- echo " e\" test=e"
- echo "f test=f"
- echo "a/i test=a/i"
- echo "onoff test -test"
- echo "offon -test test"
- echo "no notest"
+ echo "[attr]notest !test" &&
+ echo "\" d \" test=d" &&
+ echo " e test=e" &&
+ echo " e\" test=e" &&
+ echo "f test=f" &&
+ echo "a/i test=a/i" &&
+ echo "onoff test -test" &&
+ echo "offon -test test" &&
+ echo "no notest" &&
echo "A/e/F test=A/e/F"
) >.gitattributes &&
(
@@ -51,7 +51,7 @@ test_expect_success 'setup' '
) >a/.gitattributes &&
(
echo "h test=a/b/h" &&
- echo "d/* test=a/b/d/*"
+ echo "d/* test=a/b/d/*" &&
echo "d/yes notest"
) >a/b/.gitattributes &&
(
@@ -287,7 +287,7 @@ test_expect_success 'bare repository: check that .gitattribute is ignored' '
(
cd bare.git &&
(
- echo "f test=f"
+ echo "f test=f" &&
echo "a/i test=a/i"
) >.gitattributes &&
attr_check f unspecified &&
@@ -312,7 +312,7 @@ test_expect_success 'bare repository: test info/attributes' '
(
cd bare.git &&
(
- echo "f test=f"
+ echo "f test=f" &&
echo "a/i test=a/i"
) >info/attributes &&
attr_check f f &&
diff --git a/t/t0019-json-writer.sh b/t/t0019-json-writer.sh
new file mode 100755
index 0000000..3b0c336
--- /dev/null
+++ b/t/t0019-json-writer.sh
@@ -0,0 +1,331 @@
+#!/bin/sh
+
+test_description='test json-writer JSON generation'
+. ./test-lib.sh
+
+test_expect_success 'unit test of json-writer routines' '
+ test-tool json-writer -u
+'
+
+test_expect_success 'trivial object' '
+ cat >expect <<-\EOF &&
+ {}
+ EOF
+ cat >input <<-\EOF &&
+ object
+ end
+ EOF
+ test-tool json-writer <input >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'trivial array' '
+ cat >expect <<-\EOF &&
+ []
+ EOF
+ cat >input <<-\EOF &&
+ array
+ end
+ EOF
+ test-tool json-writer <input >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'simple object' '
+ cat >expect <<-\EOF &&
+ {"a":"abc","b":42,"c":3.14,"d":true,"e":false,"f":null}
+ EOF
+ cat >input <<-\EOF &&
+ object
+ object-string a abc
+ object-int b 42
+ object-double c 2 3.140
+ object-true d
+ object-false e
+ object-null f
+ end
+ EOF
+ test-tool json-writer <input >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'simple array' '
+ cat >expect <<-\EOF &&
+ ["abc",42,3.14,true,false,null]
+ EOF
+ cat >input <<-\EOF &&
+ array
+ array-string abc
+ array-int 42
+ array-double 2 3.140
+ array-true
+ array-false
+ array-null
+ end
+ EOF
+ test-tool json-writer <input >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'escape quoting string' '
+ cat >expect <<-\EOF &&
+ {"a":"abc\\def"}
+ EOF
+ cat >input <<-\EOF &&
+ object
+ object-string a abc\def
+ end
+ EOF
+ test-tool json-writer <input >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'escape quoting string 2' '
+ cat >expect <<-\EOF &&
+ {"a":"abc\"def"}
+ EOF
+ cat >input <<-\EOF &&
+ object
+ object-string a abc"def
+ end
+ EOF
+ test-tool json-writer <input >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'nested inline object' '
+ cat >expect <<-\EOF &&
+ {"a":"abc","b":42,"sub1":{"c":3.14,"d":true,"sub2":{"e":false,"f":null}}}
+ EOF
+ cat >input <<-\EOF &&
+ object
+ object-string a abc
+ object-int b 42
+ object-object sub1
+ object-double c 2 3.140
+ object-true d
+ object-object sub2
+ object-false e
+ object-null f
+ end
+ end
+ end
+ EOF
+ test-tool json-writer <input >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'nested inline array' '
+ cat >expect <<-\EOF &&
+ ["abc",42,[3.14,true,[false,null]]]
+ EOF
+ cat >input <<-\EOF &&
+ array
+ array-string abc
+ array-int 42
+ array-array
+ array-double 2 3.140
+ array-true
+ array-array
+ array-false
+ array-null
+ end
+ end
+ end
+ EOF
+ test-tool json-writer <input >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'nested inline object and array' '
+ cat >expect <<-\EOF &&
+ {"a":"abc","b":42,"sub1":{"c":3.14,"d":true,"sub2":[false,null]}}
+ EOF
+ cat >input <<-\EOF &&
+ object
+ object-string a abc
+ object-int b 42
+ object-object sub1
+ object-double c 2 3.140
+ object-true d
+ object-array sub2
+ array-false
+ array-null
+ end
+ end
+ end
+ EOF
+ test-tool json-writer <input >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'nested inline object and array 2' '
+ cat >expect <<-\EOF &&
+ {"a":"abc","b":42,"sub1":{"c":3.14,"d":true,"sub2":[false,{"g":0,"h":1},null]}}
+ EOF
+ cat >input <<-\EOF &&
+ object
+ object-string a abc
+ object-int b 42
+ object-object sub1
+ object-double c 2 3.140
+ object-true d
+ object-array sub2
+ array-false
+ array-object
+ object-int g 0
+ object-int h 1
+ end
+ array-null
+ end
+ end
+ end
+ EOF
+ test-tool json-writer <input >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'pretty nested inline object and array 2' '
+ sed -e "s/^|//" >expect <<-\EOF &&
+ |{
+ | "a": "abc",
+ | "b": 42,
+ | "sub1": {
+ | "c": 3.14,
+ | "d": true,
+ | "sub2": [
+ | false,
+ | {
+ | "g": 0,
+ | "h": 1
+ | },
+ | null
+ | ]
+ | }
+ |}
+ EOF
+ cat >input <<-\EOF &&
+ object
+ object-string a abc
+ object-int b 42
+ object-object sub1
+ object-double c 2 3.140
+ object-true d
+ object-array sub2
+ array-false
+ array-object
+ object-int g 0
+ object-int h 1
+ end
+ array-null
+ end
+ end
+ end
+ EOF
+ test-tool json-writer -p <input >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'inline object with no members' '
+ cat >expect <<-\EOF &&
+ {"a":"abc","empty":{},"b":42}
+ EOF
+ cat >input <<-\EOF &&
+ object
+ object-string a abc
+ object-object empty
+ end
+ object-int b 42
+ end
+ EOF
+ test-tool json-writer <input >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'inline array with no members' '
+ cat >expect <<-\EOF &&
+ {"a":"abc","empty":[],"b":42}
+ EOF
+ cat >input <<-\EOF &&
+ object
+ object-string a abc
+ object-array empty
+ end
+ object-int b 42
+ end
+ EOF
+ test-tool json-writer <input >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'larger empty example' '
+ cat >expect <<-\EOF &&
+ {"a":"abc","empty":[{},{},{},[],{}],"b":42}
+ EOF
+ cat >input <<-\EOF &&
+ object
+ object-string a abc
+ object-array empty
+ array-object
+ end
+ array-object
+ end
+ array-object
+ end
+ array-array
+ end
+ array-object
+ end
+ end
+ object-int b 42
+ end
+ EOF
+ test-tool json-writer <input >actual &&
+ test_cmp expect actual
+'
+
+test_lazy_prereq PERLJSON '
+ perl -MJSON -e "exit 0"
+'
+
+# As a sanity check, ask Perl to parse our generated JSON and recursively
+# dump the resulting data in sorted order. Confirm that that matches our
+# expectations.
+test_expect_success PERLJSON 'parse JSON using Perl' '
+ cat >expect <<-\EOF &&
+ row[0].a abc
+ row[0].b 42
+ row[0].sub1 hash
+ row[0].sub1.c 3.14
+ row[0].sub1.d 1
+ row[0].sub1.sub2 array
+ row[0].sub1.sub2[0] 0
+ row[0].sub1.sub2[1] hash
+ row[0].sub1.sub2[1].g 0
+ row[0].sub1.sub2[1].h 1
+ row[0].sub1.sub2[2] null
+ EOF
+ cat >input <<-\EOF &&
+ object
+ object-string a abc
+ object-int b 42
+ object-object sub1
+ object-double c 2 3.140
+ object-true d
+ object-array sub2
+ array-false
+ array-object
+ object-int g 0
+ object-int h 1
+ end
+ array-null
+ end
+ end
+ end
+ EOF
+ test-tool json-writer <input >output.json &&
+ perl "$TEST_DIRECTORY"/t0019/parse_json.perl <output.json >actual &&
+ test_cmp expect actual
+'
+
+test_done
diff --git a/t/t0019/parse_json.perl b/t/t0019/parse_json.perl
new file mode 100644
index 0000000..ca4e5bf
--- /dev/null
+++ b/t/t0019/parse_json.perl
@@ -0,0 +1,52 @@
+#!/usr/bin/perl
+use strict;
+use warnings;
+use JSON;
+
+sub dump_array {
+ my ($label_in, $ary_ref) = @_;
+ my @ary = @$ary_ref;
+
+ for ( my $i = 0; $i <= $#{ $ary_ref }; $i++ )
+ {
+ my $label = "$label_in\[$i\]";
+ dump_item($label, $ary[$i]);
+ }
+}
+
+sub dump_hash {
+ my ($label_in, $obj_ref) = @_;
+ my %obj = %$obj_ref;
+
+ foreach my $k (sort keys %obj) {
+ my $label = (length($label_in) > 0) ? "$label_in.$k" : "$k";
+ my $value = $obj{$k};
+
+ dump_item($label, $value);
+ }
+}
+
+sub dump_item {
+ my ($label_in, $value) = @_;
+ if (ref($value) eq 'ARRAY') {
+ print "$label_in array\n";
+ dump_array($label_in, $value);
+ } elsif (ref($value) eq 'HASH') {
+ print "$label_in hash\n";
+ dump_hash($label_in, $value);
+ } elsif (defined $value) {
+ print "$label_in $value\n";
+ } else {
+ print "$label_in null\n";
+ }
+}
+
+my $row = 0;
+while (<>) {
+ my $data = decode_json( $_ );
+ my $label = "row[$row]";
+
+ dump_hash($label, $data);
+ $row++;
+}
+
diff --git a/t/t0021-conversion.sh b/t/t0021-conversion.sh
index 9479a4a..308cd28 100755
--- a/t/t0021-conversion.sh
+++ b/t/t0021-conversion.sh
@@ -583,7 +583,7 @@ test_expect_success PERL 'process filter should restart after unexpected write f
git checkout --quiet --no-progress . 2>git-stderr.log &&
grep "smudge write error at" git-stderr.log &&
- grep "error: external filter" git-stderr.log &&
+ test_i18ngrep "error: external filter" git-stderr.log &&
cat >expected.log <<-EOF &&
START
@@ -785,7 +785,7 @@ test_expect_success PERL 'missing file in delayed checkout' '
cd repo &&
git init &&
echo "*.a filter=bug" >.gitattributes &&
- cp "$TEST_ROOT/test.o" missing-delay.a
+ cp "$TEST_ROOT/test.o" missing-delay.a &&
git add . &&
git commit -m "test commit"
) &&
@@ -807,7 +807,7 @@ test_expect_success PERL 'invalid file in delayed checkout' '
git init &&
echo "*.a filter=bug" >.gitattributes &&
cp "$TEST_ROOT/test.o" invalid-delay.a &&
- cp "$TEST_ROOT/test.o" unfiltered
+ cp "$TEST_ROOT/test.o" unfiltered &&
git add . &&
git commit -m "test commit"
) &&
diff --git a/t/t0090-cache-tree.sh b/t/t0090-cache-tree.sh
index 0c61268..28ea93f 100755
--- a/t/t0090-cache-tree.sh
+++ b/t/t0090-cache-tree.sh
@@ -156,7 +156,7 @@ test_expect_success PERL 'commit --interactive gives cache-tree on partial commi
return 44;
}
EOT
- (echo p; echo 1; echo; echo s; echo n; echo y; echo q) |
+ test_write_lines p 1 "" s n y q |
git commit --interactive -m foo &&
test_cache_tree
'
diff --git a/t/t1004-read-tree-m-u-wf.sh b/t/t1004-read-tree-m-u-wf.sh
index 826cd32..c13578a 100755
--- a/t/t1004-read-tree-m-u-wf.sh
+++ b/t/t1004-read-tree-m-u-wf.sh
@@ -210,10 +210,10 @@ test_expect_success 'D/F' '
read_tree_u_must_succeed -m -u branch-point side-b side-a &&
git ls-files -u >actual &&
(
- a=$(git rev-parse branch-point:subdir/file2)
- b=$(git rev-parse side-a:subdir/file2/another)
- echo "100644 $a 1 subdir/file2"
- echo "100644 $a 2 subdir/file2"
+ a=$(git rev-parse branch-point:subdir/file2) &&
+ b=$(git rev-parse side-a:subdir/file2/another) &&
+ echo "100644 $a 1 subdir/file2" &&
+ echo "100644 $a 2 subdir/file2" &&
echo "100644 $b 3 subdir/file2/another"
) >expect &&
test_cmp expect actual
diff --git a/t/t1005-read-tree-reset.sh b/t/t1005-read-tree-reset.sh
index 0745685..83b09e1 100755
--- a/t/t1005-read-tree-reset.sh
+++ b/t/t1005-read-tree-reset.sh
@@ -33,7 +33,7 @@ test_expect_success 'reset should remove remnants from a failed merge' '
git ls-files -s >expect &&
sha1=$(git rev-parse :new) &&
(
- echo "100644 $sha1 1 old"
+ echo "100644 $sha1 1 old" &&
echo "100644 $sha1 3 old"
) | git update-index --index-info &&
>old &&
@@ -48,7 +48,7 @@ test_expect_success 'two-way reset should remove remnants too' '
git ls-files -s >expect &&
sha1=$(git rev-parse :new) &&
(
- echo "100644 $sha1 1 old"
+ echo "100644 $sha1 1 old" &&
echo "100644 $sha1 3 old"
) | git update-index --index-info &&
>old &&
@@ -63,7 +63,7 @@ test_expect_success 'Porcelain reset should remove remnants too' '
git ls-files -s >expect &&
sha1=$(git rev-parse :new) &&
(
- echo "100644 $sha1 1 old"
+ echo "100644 $sha1 1 old" &&
echo "100644 $sha1 3 old"
) | git update-index --index-info &&
>old &&
@@ -78,7 +78,7 @@ test_expect_success 'Porcelain checkout -f should remove remnants too' '
git ls-files -s >expect &&
sha1=$(git rev-parse :new) &&
(
- echo "100644 $sha1 1 old"
+ echo "100644 $sha1 1 old" &&
echo "100644 $sha1 3 old"
) | git update-index --index-info &&
>old &&
@@ -93,7 +93,7 @@ test_expect_success 'Porcelain checkout -f HEAD should remove remnants too' '
git ls-files -s >expect &&
sha1=$(git rev-parse :new) &&
(
- echo "100644 $sha1 1 old"
+ echo "100644 $sha1 1 old" &&
echo "100644 $sha1 3 old"
) | git update-index --index-info &&
>old &&
diff --git a/t/t1008-read-tree-overlay.sh b/t/t1008-read-tree-overlay.sh
index 4c50ed9..cf96016 100755
--- a/t/t1008-read-tree-overlay.sh
+++ b/t/t1008-read-tree-overlay.sh
@@ -23,7 +23,7 @@ test_expect_success setup '
test_expect_success 'multi-read' '
read_tree_must_succeed initial master side &&
- (echo a; echo b/c) >expect &&
+ test_write_lines a b/c >expect &&
git ls-files >actual &&
test_cmp expect actual
'
diff --git a/t/t1015-read-index-unmerged.sh b/t/t1015-read-index-unmerged.sh
new file mode 100755
index 0000000..55d22da
--- /dev/null
+++ b/t/t1015-read-index-unmerged.sh
@@ -0,0 +1,123 @@
+#!/bin/sh
+
+test_description='Test various callers of read_index_unmerged'
+. ./test-lib.sh
+
+test_expect_success 'setup modify/delete + directory/file conflict' '
+ test_create_repo df_plus_modify_delete &&
+ (
+ cd df_plus_modify_delete &&
+
+ test_write_lines a b c d e f g h >letters &&
+ git add letters &&
+ git commit -m initial &&
+
+ git checkout -b modify &&
+ # Throw in letters.txt for sorting order fun
+ # ("letters.txt" sorts between "letters" and "letters/file")
+ echo i >>letters &&
+ echo "version 2" >letters.txt &&
+ git add letters letters.txt &&
+ git commit -m modified &&
+
+ git checkout -b delete HEAD^ &&
+ git rm letters &&
+ mkdir letters &&
+ >letters/file &&
+ echo "version 1" >letters.txt &&
+ git add letters letters.txt &&
+ git commit -m deleted
+ )
+'
+
+test_expect_success 'read-tree --reset cleans unmerged entries' '
+ test_when_finished "git -C df_plus_modify_delete clean -f" &&
+ test_when_finished "git -C df_plus_modify_delete reset --hard" &&
+ (
+ cd df_plus_modify_delete &&
+
+ git checkout delete^0 &&
+ test_must_fail git merge modify &&
+
+ git read-tree --reset HEAD &&
+ git ls-files -u >conflicts &&
+ test_must_be_empty conflicts
+ )
+'
+
+test_expect_success 'One reset --hard cleans unmerged entries' '
+ test_when_finished "git -C df_plus_modify_delete clean -f" &&
+ test_when_finished "git -C df_plus_modify_delete reset --hard" &&
+ (
+ cd df_plus_modify_delete &&
+
+ git checkout delete^0 &&
+ test_must_fail git merge modify &&
+
+ git reset --hard &&
+ test_path_is_missing .git/MERGE_HEAD &&
+ git ls-files -u >conflicts &&
+ test_must_be_empty conflicts
+ )
+'
+
+test_expect_success 'setup directory/file conflict + simple edit/edit' '
+ test_create_repo df_plus_edit_edit &&
+ (
+ cd df_plus_edit_edit &&
+
+ test_seq 1 10 >numbers &&
+ git add numbers &&
+ git commit -m initial &&
+
+ git checkout -b d-edit &&
+ mkdir foo &&
+ echo content >foo/bar &&
+ git add foo &&
+ echo 11 >>numbers &&
+ git add numbers &&
+ git commit -m "directory and edit" &&
+
+ git checkout -b f-edit d-edit^1 &&
+ echo content >foo &&
+ git add foo &&
+ echo eleven >>numbers &&
+ git add numbers &&
+ git commit -m "file and edit"
+ )
+'
+
+test_expect_success 'git merge --abort succeeds despite D/F conflict' '
+ test_when_finished "git -C df_plus_edit_edit clean -f" &&
+ test_when_finished "git -C df_plus_edit_edit reset --hard" &&
+ (
+ cd df_plus_edit_edit &&
+
+ git checkout f-edit^0 &&
+ test_must_fail git merge d-edit^0 &&
+
+ git merge --abort &&
+ test_path_is_missing .git/MERGE_HEAD &&
+ git ls-files -u >conflicts &&
+ test_must_be_empty conflicts
+ )
+'
+
+test_expect_success 'git am --skip succeeds despite D/F conflict' '
+ test_when_finished "git -C df_plus_edit_edit clean -f" &&
+ test_when_finished "git -C df_plus_edit_edit reset --hard" &&
+ (
+ cd df_plus_edit_edit &&
+
+ git checkout f-edit^0 &&
+ git format-patch -1 d-edit &&
+ test_must_fail git am -3 0001*.patch &&
+
+ git am --skip &&
+ test_path_is_missing .git/rebase-apply &&
+ git ls-files -u >conflicts &&
+ test_must_be_empty conflicts
+ )
+'
+
+test_done
diff --git a/t/t1020-subdirectory.sh b/t/t1020-subdirectory.sh
index df3183e..c2df75e 100755
--- a/t/t1020-subdirectory.sh
+++ b/t/t1020-subdirectory.sh
@@ -148,7 +148,7 @@ test_expect_success 'GIT_PREFIX for built-ins' '
(
cd dir &&
echo "change" >two &&
- GIT_EXTERNAL_DIFF=./diff git diff >../actual
+ GIT_EXTERNAL_DIFF=./diff git diff >../actual &&
git checkout -- two
) &&
test_cmp expect actual
diff --git a/t/t1050-large.sh b/t/t1050-large.sh
index f9eb143..1a9b21b 100755
--- a/t/t1050-large.sh
+++ b/t/t1050-large.sh
@@ -108,7 +108,7 @@ test_expect_success 'packsize limit' '
test-tool genrandom "c" $(( 128 * 1024 )) >mid3 &&
git add mid1 mid2 mid3 &&
- count=0
+ count=0 &&
for pi in .git/objects/pack/pack-*.idx
do
test -f "$pi" && count=$(( $count + 1 ))
@@ -116,8 +116,8 @@ test_expect_success 'packsize limit' '
test $count = 2 &&
(
- git hash-object --stdin <mid1
- git hash-object --stdin <mid2
+ git hash-object --stdin <mid1 &&
+ git hash-object --stdin <mid2 &&
git hash-object --stdin <mid3
) |
sort >expect &&
diff --git a/t/t1300-config.sh b/t/t1300-config.sh
index 03c2237..24706ba 100755
--- a/t/t1300-config.sh
+++ b/t/t1300-config.sh
@@ -888,7 +888,7 @@ EOF
test_expect_success !MINGW 'get --path copes with unset $HOME' '
(
- unset HOME;
+ sane_unset HOME &&
test_must_fail git config --get --path path.home \
>result 2>msg &&
git config --get --path path.normal >>result &&
diff --git a/t/t1305-config-include.sh b/t/t1305-config-include.sh
index f035ee4..6359185 100755
--- a/t/t1305-config-include.sh
+++ b/t/t1305-config-include.sh
@@ -310,7 +310,7 @@ test_expect_success 'include cycles are detected' '
cycle
EOF
test_must_fail git config --get-all test.value 2>stderr &&
- grep "exceeded maximum include depth" stderr
+ test_i18ngrep "exceeded maximum include depth" stderr
'
test_done
diff --git a/t/t1308-config-set.sh b/t/t1308-config-set.sh
index 3e00d1a..d0a2727 100755
--- a/t/t1308-config-set.sh
+++ b/t/t1308-config-set.sh
@@ -233,7 +233,7 @@ test_expect_success 'check line errors for malformed values' '
test_expect_success 'error on modifying repo config without repo' '
nongit test_must_fail git config a.b c 2>err &&
- grep "not in a git directory" err
+ test_i18ngrep "not in a git directory" err
'
cmdline_config="'foo.bar=from-cmdline'"
diff --git a/t/t1400-update-ref.sh b/t/t1400-update-ref.sh
index e1fd0f0..7c8df20 100755
--- a/t/t1400-update-ref.sh
+++ b/t/t1400-update-ref.sh
@@ -390,7 +390,7 @@ test_expect_success 'Query "master@{2005-05-26 23:33:01}" (middle of history wit
test_when_finished "rm -f o e" &&
git rev-parse --verify "master@{2005-05-26 23:33:01}" >o 2>e &&
test $B = $(cat o) &&
- test "warning: Log for ref $m has gap after $gd." = "$(cat e)"
+ test_i18ngrep -F "warning: log for ref $m has gap after $gd" e
'
test_expect_success 'Query "master@{2005-05-26 23:38:00}" (middle of history)' '
test_when_finished "rm -f o e" &&
@@ -408,7 +408,7 @@ test_expect_success 'Query "master@{2005-05-28}" (past end of history)' '
test_when_finished "rm -f o e" &&
git rev-parse --verify "master@{2005-05-28}" >o 2>e &&
test $D = $(cat o) &&
- test "warning: Log for ref $m unexpectedly ended on $ld." = "$(cat e)"
+ test_i18ngrep -F "warning: log for ref $m unexpectedly ended on $ld" e
'
rm -f .git/$m .git/logs/$m expect
@@ -462,7 +462,7 @@ test_expect_success 'git cat-file blob master@{2005-05-26 23:42}:F (expect OTHER
test_expect_success 'given old value for missing pseudoref, do not create' '
test_must_fail git update-ref PSEUDOREF $A $B 2>err &&
test_path_is_missing .git/PSEUDOREF &&
- grep "could not read ref" err
+ test_i18ngrep "could not read ref" err
'
test_expect_success 'create pseudoref' '
@@ -483,7 +483,7 @@ test_expect_success 'overwrite pseudoref with correct old value' '
test_expect_success 'do not overwrite pseudoref with wrong old value' '
test_must_fail git update-ref PSEUDOREF $D $E 2>err &&
test $C = $(cat .git/PSEUDOREF) &&
- grep "unexpected object ID" err
+ test_i18ngrep "unexpected object ID" err
'
test_expect_success 'delete pseudoref' '
@@ -495,7 +495,7 @@ test_expect_success 'do not delete pseudoref with wrong old value' '
git update-ref PSEUDOREF $A &&
test_must_fail git update-ref -d PSEUDOREF $B 2>err &&
test $A = $(cat .git/PSEUDOREF) &&
- grep "unexpected object ID" err
+ test_i18ngrep "unexpected object ID" err
'
test_expect_success 'delete pseudoref with correct old value' '
@@ -512,7 +512,7 @@ test_expect_success 'do not overwrite pseudoref with old OID zero' '
test_when_finished git update-ref -d PSEUDOREF &&
test_must_fail git update-ref PSEUDOREF $B $Z 2>err &&
test $A = $(cat .git/PSEUDOREF) &&
- grep "already exists" err
+ test_i18ngrep "already exists" err
'
# Test --stdin
@@ -650,7 +650,7 @@ test_expect_success 'stdin fails with duplicate refs' '
create $a $m
EOF
test_must_fail git update-ref --stdin <stdin 2>err &&
- grep "fatal: multiple updates for ref '"'"'$a'"'"' not allowed." err
+ test_i18ngrep "fatal: multiple updates for ref '"'"'$a'"'"' not allowed" err
'
test_expect_success 'stdin create ref works' '
@@ -1052,7 +1052,7 @@ test_expect_success 'stdin -z fails option with unknown name' '
test_expect_success 'stdin -z fails with duplicate refs' '
printf $F "create $a" "$m" "create $b" "$m" "create $a" "$m" >stdin &&
test_must_fail git update-ref -z --stdin <stdin 2>err &&
- grep "fatal: multiple updates for ref '"'"'$a'"'"' not allowed." err
+ test_i18ngrep "fatal: multiple updates for ref '"'"'$a'"'"' not allowed" err
'
test_expect_success 'stdin -z create ref works' '
@@ -1283,7 +1283,7 @@ test_expect_success 'fails with duplicate HEAD update' '
update HEAD $B
EOF
test_must_fail git update-ref --stdin <stdin 2>err &&
- grep "fatal: multiple updates for '\''HEAD'\'' (including one via its referent .refs/heads/target1.) are not allowed" err &&
+ test_i18ngrep "fatal: multiple updates for '\''HEAD'\'' (including one via its referent .refs/heads/target1.) are not allowed" err &&
echo "refs/heads/target1" >expect &&
git symbolic-ref HEAD >actual &&
test_cmp expect actual &&
@@ -1300,7 +1300,7 @@ test_expect_success 'fails with duplicate ref update via symref' '
update refs/heads/symref2 $B
EOF
test_must_fail git update-ref --stdin <stdin 2>err &&
- grep "fatal: multiple updates for '\''refs/heads/target2'\'' (including one via symref .refs/heads/symref2.) are not allowed" err &&
+ test_i18ngrep "fatal: multiple updates for '\''refs/heads/target2'\'' (including one via symref .refs/heads/symref2.) are not allowed" err &&
echo "refs/heads/target2" >expect &&
git symbolic-ref refs/heads/symref2 >actual &&
test_cmp expect actual &&
diff --git a/t/t1404-update-ref-errors.sh b/t/t1404-update-ref-errors.sh
index 3a887b5..2a42a58 100755
--- a/t/t1404-update-ref-errors.sh
+++ b/t/t1404-update-ref-errors.sh
@@ -27,7 +27,7 @@ test_update_rejected () {
fi &&
printf "create $prefix/%s $C\n" $create >input &&
test_must_fail git update-ref --stdin <input 2>output.err &&
- grep -F "$error" output.err &&
+ test_i18ngrep -F "$error" output.err &&
git for-each-ref $prefix >actual &&
test_cmp unchanged actual
}
@@ -103,7 +103,7 @@ df_test() {
printf "%s\n" "delete $delname" "create $addname $D"
fi >commands &&
test_must_fail git update-ref --stdin <commands 2>output.err &&
- test_cmp expected-err output.err &&
+ test_i18ncmp expected-err output.err &&
printf "%s\n" "$C $delref" >expected-refs &&
git for-each-ref --format="%(objectname) %(refname)" $prefix/r >actual-refs &&
test_cmp expected-refs actual-refs
diff --git a/t/t1411-reflog-show.sh b/t/t1411-reflog-show.sh
index 5969077..4d62cee 100755
--- a/t/t1411-reflog-show.sh
+++ b/t/t1411-reflog-show.sh
@@ -159,9 +159,9 @@ test_expect_success 'git log -g -p shows diffs vs. parents' '
git log -1 -p HEAD^ >log.one &&
git log -1 -p HEAD >log.two &&
(
- cat log.one; echo
- cat log.two; echo
- cat log.one; echo
+ cat log.one && echo &&
+ cat log.two && echo &&
+ cat log.one && echo &&
cat log.two
) >expect &&
test_cmp expect actual
diff --git a/t/t1450-fsck.sh b/t/t1450-fsck.sh
index 91fd714..7b7602d 100755
--- a/t/t1450-fsck.sh
+++ b/t/t1450-fsck.sh
@@ -372,7 +372,7 @@ test_expect_success 'rev-list --verify-objects with bad sha1' '
test_might_fail git rev-list --verify-objects refs/heads/bogus >/dev/null 2>out &&
cat out &&
- grep -q "error: sha1 mismatch 63ffffffffffffffffffffffffffffffffffffff" out
+ test_i18ngrep -q "error: sha1 mismatch 63ffffffffffffffffffffffffffffffffffffff" out
'
test_expect_success 'force fsck to ignore double author' '
diff --git a/t/t1507-rev-parse-upstream.sh b/t/t1507-rev-parse-upstream.sh
index f121890..fa3e499 100755
--- a/t/t1507-rev-parse-upstream.sh
+++ b/t/t1507-rev-parse-upstream.sh
@@ -123,9 +123,9 @@ test_expect_success 'checkout -b new my-side@{u} forks from the same' '
test_expect_success 'merge my-side@{u} records the correct name' '
(
- cd clone || exit
- git checkout master || exit
- git branch -D new ;# can fail but is ok
+ cd clone &&
+ git checkout master &&
+ test_might_fail git branch -D new &&
git branch -t new my-side@{u} &&
git merge -s ours new@{u} &&
git show -s --pretty=tformat:%s >actual &&
diff --git a/t/t1512-rev-parse-disambiguation.sh b/t/t1512-rev-parse-disambiguation.sh
index 96fe375..e4d5b56 100755
--- a/t/t1512-rev-parse-disambiguation.sh
+++ b/t/t1512-rev-parse-disambiguation.sh
@@ -34,8 +34,8 @@ test_expect_success 'blob and tree' '
for i in 0 1 2 3 4 5 6 7 8 9
do
echo $i
- done
- echo
+ done &&
+ echo &&
echo b1rwzyc3
) >a0blgqsjc &&
@@ -222,7 +222,7 @@ test_expect_success 'more history' '
test_might_fail git rm -f a0blgqsjc &&
(
- git cat-file blob $side:f5518nwu
+ git cat-file blob $side:f5518nwu &&
echo j3l0i9s6
) >ab2gs879 &&
git add ab2gs879 &&
diff --git a/t/t1700-split-index.sh b/t/t1700-split-index.sh
index 1e81b33..39133bc 100755
--- a/t/t1700-split-index.sh
+++ b/t/t1700-split-index.sh
@@ -435,7 +435,7 @@ test_expect_success 'writing split index with null sha1 does not write cache tre
commit=$(git commit-tree $tree -p HEAD <msg) &&
git update-ref HEAD "$commit" &&
GIT_ALLOW_NULL_SHA1=1 git reset --hard &&
- (test-tool dump-cache-tree >cache-tree.out || true) &&
+ test_might_fail test-tool dump-cache-tree >cache-tree.out &&
test_line_count = 0 cache-tree.out
'
diff --git a/t/t2016-checkout-patch.sh b/t/t2016-checkout-patch.sh
index 9cd0ac4..47aeb0b 100755
--- a/t/t2016-checkout-patch.sh
+++ b/t/t2016-checkout-patch.sh
@@ -20,33 +20,33 @@ test_expect_success PERL 'setup' '
test_expect_success PERL 'saying "n" does nothing' '
set_and_save_state dir/foo work head &&
- (echo n; echo n) | git checkout -p &&
+ test_write_lines n n | git checkout -p &&
verify_saved_state bar &&
verify_saved_state dir/foo
'
test_expect_success PERL 'git checkout -p' '
- (echo n; echo y) | git checkout -p &&
+ test_write_lines n y | git checkout -p &&
verify_saved_state bar &&
verify_state dir/foo head head
'
test_expect_success PERL 'git checkout -p with staged changes' '
set_state dir/foo work index &&
- (echo n; echo y) | git checkout -p &&
+ test_write_lines n y | git checkout -p &&
verify_saved_state bar &&
verify_state dir/foo index index
'
test_expect_success PERL 'git checkout -p HEAD with NO staged changes: abort' '
set_and_save_state dir/foo work head &&
- (echo n; echo y; echo n) | git checkout -p HEAD &&
+ test_write_lines n y n | git checkout -p HEAD &&
verify_saved_state bar &&
verify_saved_state dir/foo
'
test_expect_success PERL 'git checkout -p HEAD with NO staged changes: apply' '
- (echo n; echo y; echo y) | git checkout -p HEAD &&
+ test_write_lines n y y | git checkout -p HEAD &&
verify_saved_state bar &&
verify_state dir/foo head head
'
@@ -54,14 +54,14 @@ test_expect_success PERL 'git checkout -p HEAD with NO staged changes: apply' '
test_expect_success PERL 'git checkout -p HEAD with change already staged' '
set_state dir/foo index index &&
# the third n is to get out in case it mistakenly does not apply
- (echo n; echo y; echo n) | git checkout -p HEAD &&
+ test_write_lines n y n | git checkout -p HEAD &&
verify_saved_state bar &&
verify_state dir/foo head head
'
test_expect_success PERL 'git checkout -p HEAD^' '
# the third n is to get out in case it mistakenly does not apply
- (echo n; echo y; echo n) | git checkout -p HEAD^ &&
+ test_write_lines n y n | git checkout -p HEAD^ &&
verify_saved_state bar &&
verify_state dir/foo parent parent
'
@@ -69,7 +69,7 @@ test_expect_success PERL 'git checkout -p HEAD^' '
test_expect_success PERL 'git checkout -p handles deletion' '
set_state dir/foo work index &&
rm dir/foo &&
- (echo n; echo y) | git checkout -p &&
+ test_write_lines n y | git checkout -p &&
verify_saved_state bar &&
verify_state dir/foo index index
'
@@ -81,21 +81,21 @@ test_expect_success PERL 'git checkout -p handles deletion' '
test_expect_success PERL 'path limiting works: dir' '
set_state dir/foo work head &&
- (echo y; echo n) | git checkout -p dir &&
+ test_write_lines y n | git checkout -p dir &&
verify_saved_state bar &&
verify_state dir/foo head head
'
test_expect_success PERL 'path limiting works: -- dir' '
set_state dir/foo work head &&
- (echo y; echo n) | git checkout -p -- dir &&
+ test_write_lines y n | git checkout -p -- dir &&
verify_saved_state bar &&
verify_state dir/foo head head
'
test_expect_success PERL 'path limiting works: HEAD^ -- dir' '
# the third n is to get out in case it mistakenly does not apply
- (echo y; echo n; echo n) | git checkout -p HEAD^ -- dir &&
+ test_write_lines y n n | git checkout -p HEAD^ -- dir &&
verify_saved_state bar &&
verify_state dir/foo parent parent
'
@@ -103,7 +103,7 @@ test_expect_success PERL 'path limiting works: HEAD^ -- dir' '
test_expect_success PERL 'path limiting works: foo inside dir' '
set_state dir/foo work head &&
# the third n is to get out in case it mistakenly does not apply
- (echo y; echo n; echo n) | (cd dir && git checkout -p foo) &&
+ test_write_lines y n n | (cd dir && git checkout -p foo) &&
verify_saved_state bar &&
verify_state dir/foo head head
'
diff --git a/t/t2024-checkout-dwim.sh b/t/t2024-checkout-dwim.sh
index 3e5ac81..f79dfbb 100755
--- a/t/t2024-checkout-dwim.sh
+++ b/t/t2024-checkout-dwim.sh
@@ -23,6 +23,11 @@ test_branch_upstream () {
test_cmp expect.upstream actual.upstream
}
+status_uno_is_clean () {
+ git status -uno --porcelain >status.actual &&
+ test_must_be_empty status.actual
+}
+
test_expect_success 'setup' '
test_commit my_master &&
git init repo_a &&
@@ -55,6 +60,7 @@ test_expect_success 'checkout of non-existing branch fails' '
test_might_fail git branch -D xyzzy &&
test_must_fail git checkout xyzzy &&
+ status_uno_is_clean &&
test_must_fail git rev-parse --verify refs/heads/xyzzy &&
test_branch master
'
@@ -64,15 +70,47 @@ test_expect_success 'checkout of branch from multiple remotes fails #1' '
test_might_fail git branch -D foo &&
test_must_fail git checkout foo &&
+ status_uno_is_clean &&
test_must_fail git rev-parse --verify refs/heads/foo &&
test_branch master
'
+test_expect_success 'checkout of branch from multiple remotes fails with advice' '
+ git checkout -B master &&
+ test_might_fail git branch -D foo &&
+ test_must_fail git checkout foo 2>stderr &&
+ test_branch master &&
+ status_uno_is_clean &&
+ test_i18ngrep "^hint: " stderr &&
+ test_must_fail git -c advice.checkoutAmbiguousRemoteBranchName=false \
+ checkout foo 2>stderr &&
+ test_branch master &&
+ status_uno_is_clean &&
+ test_i18ngrep ! "^hint: " stderr &&
+ # Make sure the likes of checkout -p do not print this hint
+ git checkout -p foo 2>stderr &&
+ test_i18ngrep ! "^hint: " stderr &&
+ status_uno_is_clean
+'
+
+test_expect_success 'checkout of branch from multiple remotes succeeds with checkout.defaultRemote #1' '
+ git checkout -B master &&
+ status_uno_is_clean &&
+ test_might_fail git branch -D foo &&
+
+ git -c checkout.defaultRemote=repo_a checkout foo &&
+ status_uno_is_clean &&
+ test_branch foo &&
+ test_cmp_rev remotes/repo_a/foo HEAD &&
+ test_branch_upstream foo repo_a foo
+'
+
test_expect_success 'checkout of branch from a single remote succeeds #1' '
git checkout -B master &&
test_might_fail git branch -D bar &&
git checkout bar &&
+ status_uno_is_clean &&
test_branch bar &&
test_cmp_rev remotes/repo_a/bar HEAD &&
test_branch_upstream bar repo_a bar
@@ -83,6 +121,7 @@ test_expect_success 'checkout of branch from a single remote succeeds #2' '
test_might_fail git branch -D baz &&
git checkout baz &&
+ status_uno_is_clean &&
test_branch baz &&
test_cmp_rev remotes/other_b/baz HEAD &&
test_branch_upstream baz repo_b baz
@@ -90,6 +129,7 @@ test_expect_success 'checkout of branch from a single remote succeeds #2' '
test_expect_success '--no-guess suppresses branch auto-vivification' '
git checkout -B master &&
+ status_uno_is_clean &&
test_might_fail git branch -D bar &&
test_must_fail git checkout --no-guess bar &&
@@ -99,6 +139,7 @@ test_expect_success '--no-guess suppresses branch auto-vivification' '
test_expect_success 'setup more remotes with unconventional refspecs' '
git checkout -B master &&
+ status_uno_is_clean &&
git init repo_c &&
(
cd repo_c &&
@@ -128,27 +169,33 @@ test_expect_success 'setup more remotes with unconventional refspecs' '
test_expect_success 'checkout of branch from multiple remotes fails #2' '
git checkout -B master &&
+ status_uno_is_clean &&
test_might_fail git branch -D bar &&
test_must_fail git checkout bar &&
+ status_uno_is_clean &&
test_must_fail git rev-parse --verify refs/heads/bar &&
test_branch master
'
test_expect_success 'checkout of branch from multiple remotes fails #3' '
git checkout -B master &&
+ status_uno_is_clean &&
test_might_fail git branch -D baz &&
test_must_fail git checkout baz &&
+ status_uno_is_clean &&
test_must_fail git rev-parse --verify refs/heads/baz &&
test_branch master
'
test_expect_success 'checkout of branch from a single remote succeeds #3' '
git checkout -B master &&
+ status_uno_is_clean &&
test_might_fail git branch -D spam &&
git checkout spam &&
+ status_uno_is_clean &&
test_branch spam &&
test_cmp_rev refs/remotes/extra_dir/repo_c/extra_dir/spam HEAD &&
test_branch_upstream spam repo_c spam
@@ -156,9 +203,11 @@ test_expect_success 'checkout of branch from a single remote succeeds #3' '
test_expect_success 'checkout of branch from a single remote succeeds #4' '
git checkout -B master &&
+ status_uno_is_clean &&
test_might_fail git branch -D eggs &&
git checkout eggs &&
+ status_uno_is_clean &&
test_branch eggs &&
test_cmp_rev refs/repo_d/eggs HEAD &&
test_branch_upstream eggs repo_d eggs
@@ -166,32 +215,38 @@ test_expect_success 'checkout of branch from a single remote succeeds #4' '
test_expect_success 'checkout of branch with a file having the same name fails' '
git checkout -B master &&
+ status_uno_is_clean &&
test_might_fail git branch -D spam &&
>spam &&
test_must_fail git checkout spam &&
+ status_uno_is_clean &&
test_must_fail git rev-parse --verify refs/heads/spam &&
test_branch master
'
test_expect_success 'checkout of branch with a file in subdir having the same name fails' '
git checkout -B master &&
+ status_uno_is_clean &&
test_might_fail git branch -D spam &&
>spam &&
mkdir sub &&
mv spam sub/spam &&
test_must_fail git -C sub checkout spam &&
+ status_uno_is_clean &&
test_must_fail git rev-parse --verify refs/heads/spam &&
test_branch master
'
test_expect_success 'checkout <branch> -- succeeds, even if a file with the same name exists' '
git checkout -B master &&
+ status_uno_is_clean &&
test_might_fail git branch -D spam &&
>spam &&
git checkout spam -- &&
+ status_uno_is_clean &&
test_branch spam &&
test_cmp_rev refs/remotes/extra_dir/repo_c/extra_dir/spam HEAD &&
test_branch_upstream spam repo_c spam
@@ -200,6 +255,7 @@ test_expect_success 'checkout <branch> -- succeeds, even if a file with the same
test_expect_success 'loosely defined local base branch is reported correctly' '
git checkout master &&
+ status_uno_is_clean &&
git branch strict &&
git branch loose &&
git commit --allow-empty -m "a bit more" &&
@@ -210,7 +266,9 @@ test_expect_success 'loosely defined local base branch is reported correctly' '
test_config branch.loose.merge master &&
git checkout strict | sed -e "s/strict/BRANCHNAME/g" >expect &&
+ status_uno_is_clean &&
git checkout loose | sed -e "s/loose/BRANCHNAME/g" >actual &&
+ status_uno_is_clean &&
test_cmp expect actual
'
diff --git a/t/t2025-worktree-add.sh b/t/t2025-worktree-add.sh
index d2e49f7..166942c 100755
--- a/t/t2025-worktree-add.sh
+++ b/t/t2025-worktree-add.sh
@@ -402,6 +402,26 @@ test_expect_success '"add" <path> <branch> dwims' '
)
'
+test_expect_success '"add" <path> <branch> dwims with checkout.defaultRemote' '
+ test_when_finished rm -rf repo_upstream repo_dwim foo &&
+ setup_remote_repo repo_upstream repo_dwim &&
+ git init repo_dwim &&
+ (
+ cd repo_dwim &&
+ git remote add repo_upstream2 ../repo_upstream &&
+ git fetch repo_upstream2 &&
+ test_must_fail git worktree add ../foo foo &&
+ git -c checkout.defaultRemote=repo_upstream worktree add ../foo foo &&
+ git status -uno --porcelain >status.actual &&
+ test_must_be_empty status.actual
+ ) &&
+ (
+ cd foo &&
+ test_branch_upstream foo repo_upstream foo &&
+ test_cmp_rev refs/remotes/repo_upstream/foo refs/heads/foo
+ )
+'
+
test_expect_success 'git worktree add does not match remote' '
test_when_finished rm -rf repo_a repo_b foo &&
setup_remote_repo repo_a repo_b &&
diff --git a/t/t2103-update-index-ignore-missing.sh b/t/t2103-update-index-ignore-missing.sh
index 332694e..0114f05 100755
--- a/t/t2103-update-index-ignore-missing.sh
+++ b/t/t2103-update-index-ignore-missing.sh
@@ -32,7 +32,7 @@ test_expect_success basics '
test_create_repo xyzzy &&
cd xyzzy &&
>file &&
- git add file
+ git add file &&
git commit -m "sub initial"
) &&
git add xyzzy &&
diff --git a/t/t2202-add-addremove.sh b/t/t2202-add-addremove.sh
index e84079d..9ee6590 100755
--- a/t/t2202-add-addremove.sh
+++ b/t/t2202-add-addremove.sh
@@ -6,12 +6,12 @@ test_description='git add --all'
test_expect_success setup '
(
- echo .gitignore
+ echo .gitignore &&
echo will-remove
) >expect &&
(
- echo actual
- echo expect
+ echo actual &&
+ echo expect &&
echo ignored
) >.gitignore &&
git --literal-pathspecs add --all &&
@@ -25,10 +25,10 @@ test_expect_success setup '
test_expect_success 'git add --all' '
(
- echo .gitignore
- echo not-ignored
- echo "M .gitignore"
- echo "A not-ignored"
+ echo .gitignore &&
+ echo not-ignored &&
+ echo "M .gitignore" &&
+ echo "A not-ignored" &&
echo "D will-remove"
) >expect &&
>ignored &&
diff --git a/t/t3000-ls-files-others.sh b/t/t3000-ls-files-others.sh
index c525656..afd4756 100755
--- a/t/t3000-ls-files-others.sh
+++ b/t/t3000-ls-files-others.sh
@@ -84,7 +84,7 @@ test_expect_success SYMLINKS 'ls-files --others with symlinked submodule' '
) &&
(
cd super &&
- "$SHELL_PATH" "$TEST_DIRECTORY/../contrib/workdir/git-new-workdir" ../sub sub
+ "$SHELL_PATH" "$TEST_DIRECTORY/../contrib/workdir/git-new-workdir" ../sub sub &&
git ls-files --others --exclude-standard >../actual
) &&
echo sub/ >expect &&
diff --git a/t/t3005-ls-files-relative.sh b/t/t3005-ls-files-relative.sh
index 3778694..209b4c7 100755
--- a/t/t3005-ls-files-relative.sh
+++ b/t/t3005-ls-files-relative.sh
@@ -44,13 +44,13 @@ test_expect_success 'ls-files -c' '
cd top/sub &&
for f in ../y*
do
- echo "error: pathspec $sq$f$sq did not match any file(s) known to git."
+ echo "error: pathspec $sq$f$sq did not match any file(s) known to git"
done >expect.err &&
echo "Did you forget to ${sq}git add${sq}?" >>expect.err &&
ls ../x* >expect.out &&
test_must_fail git ls-files -c --error-unmatch ../[xy]* >actual.out 2>actual.err &&
test_cmp expect.out actual.out &&
- test_cmp expect.err actual.err
+ test_i18ncmp expect.err actual.err
)
'
@@ -59,13 +59,13 @@ test_expect_success 'ls-files -o' '
cd top/sub &&
for f in ../x*
do
- echo "error: pathspec $sq$f$sq did not match any file(s) known to git."
+ echo "error: pathspec $sq$f$sq did not match any file(s) known to git"
done >expect.err &&
echo "Did you forget to ${sq}git add${sq}?" >>expect.err &&
ls ../y* >expect.out &&
test_must_fail git ls-files -o --error-unmatch ../[xy]* >actual.out 2>actual.err &&
test_cmp expect.out actual.out &&
- test_cmp expect.err actual.err
+ test_i18ncmp expect.err actual.err
)
'
diff --git a/t/t3006-ls-files-long.sh b/t/t3006-ls-files-long.sh
index 202ad65..e109c3f 100755
--- a/t/t3006-ls-files-long.sh
+++ b/t/t3006-ls-files-long.sh
@@ -29,7 +29,7 @@ test_expect_success 'overly-long path does not replace another by mistake' '
printf "$pat" "$blob_a" "$path_a" "$blob_z" "$path_z" |
git update-index --add --index-info &&
(
- echo "$path_a"
+ echo "$path_a" &&
echo "$path_z"
) >expect &&
git ls-files >actual &&
diff --git a/t/t3008-ls-files-lazy-init-name-hash.sh b/t/t3008-ls-files-lazy-init-name-hash.sh
index 08af596..64f0473 100755
--- a/t/t3008-ls-files-lazy-init-name-hash.sh
+++ b/t/t3008-ls-files-lazy-init-name-hash.sh
@@ -14,10 +14,10 @@ LAZY_THREAD_COST=2000
test_expect_success 'no buffer overflow in lazy_init_name_hash' '
(
- test_seq $LAZY_THREAD_COST | sed "s/^/a_/"
- echo b/b/b
- test_seq $LAZY_THREAD_COST | sed "s/^/c_/"
- test_seq 50 | sed "s/^/d_/" | tr "\n" "/"; echo d
+ test_seq $LAZY_THREAD_COST | sed "s/^/a_/" &&
+ echo b/b/b &&
+ test_seq $LAZY_THREAD_COST | sed "s/^/c_/" &&
+ test_seq 50 | sed "s/^/d_/" | tr "\n" "/" && echo d
) |
sed "s/^/100644 $EMPTY_BLOB /" |
git update-index --index-info &&
diff --git a/t/t3030-merge-recursive.sh b/t/t3030-merge-recursive.sh
index 3563e77..ff641b3 100755
--- a/t/t3030-merge-recursive.sh
+++ b/t/t3030-merge-recursive.sh
@@ -36,15 +36,15 @@ test_expect_success 'setup 1' '
test_tick &&
git commit -m "master modifies a and d/e" &&
c1=$(git rev-parse --verify HEAD) &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o1 a"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o1 d/e"
- echo "100644 $o1 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o1 a" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o1 d/e" &&
+ echo "100644 $o1 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o1 0 d/e"
) >expected &&
test_cmp expected actual
@@ -54,15 +54,15 @@ test_expect_success 'setup 2' '
rm -rf [abcd] &&
git checkout side &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o0 a"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 $o0 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o0 a" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 $o0 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o0 0 d/e"
) >expected &&
test_cmp expected actual &&
@@ -75,15 +75,15 @@ test_expect_success 'setup 2' '
test_tick &&
git commit -m "side modifies a" &&
c2=$(git rev-parse --verify HEAD) &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o2 a"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 $o2 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o2 a" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 $o2 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o0 0 d/e"
) >expected &&
test_cmp expected actual
@@ -93,15 +93,15 @@ test_expect_success 'setup 3' '
rm -rf [abcd] &&
git checkout df-1 &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o0 a"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 $o0 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o0 a" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 $o0 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o0 0 d/e"
) >expected &&
test_cmp expected actual &&
@@ -112,15 +112,15 @@ test_expect_success 'setup 3' '
test_tick &&
git commit -m "df-1 makes b/c" &&
c3=$(git rev-parse --verify HEAD) &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o0 a"
- echo "100644 blob $o3 b/c"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 $o0 0 a"
- echo "100644 $o3 0 b/c"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o0 a" &&
+ echo "100644 blob $o3 b/c" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 $o0 0 a" &&
+ echo "100644 $o3 0 b/c" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o0 0 d/e"
) >expected &&
test_cmp expected actual
@@ -130,15 +130,15 @@ test_expect_success 'setup 4' '
rm -rf [abcd] &&
git checkout df-2 &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o0 a"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 $o0 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o0 a" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 $o0 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o0 0 d/e"
) >expected &&
test_cmp expected actual &&
@@ -149,15 +149,15 @@ test_expect_success 'setup 4' '
test_tick &&
git commit -m "df-2 makes a/c" &&
c4=$(git rev-parse --verify HEAD) &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o4 a/c"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 $o4 0 a/c"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o4 a/c" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 $o4 0 a/c" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o0 0 d/e"
) >expected &&
test_cmp expected actual
@@ -167,15 +167,15 @@ test_expect_success 'setup 5' '
rm -rf [abcd] &&
git checkout remove &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o0 a"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 $o0 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o0 a" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 $o0 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o0 0 d/e"
) >expected &&
test_cmp expected actual &&
@@ -190,13 +190,13 @@ test_expect_success 'setup 5' '
test_tick &&
git commit -m "remove removes b and modifies a" &&
c5=$(git rev-parse --verify HEAD) &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o5 a"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 $o5 0 a"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o5 a" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 $o5 0 a" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o0 0 d/e"
) >expected &&
test_cmp expected actual
@@ -207,15 +207,15 @@ test_expect_success 'setup 6' '
rm -rf [abcd] &&
git checkout df-3 &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o0 a"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 $o0 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o0 a" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 $o0 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o0 0 d/e"
) >expected &&
test_cmp expected actual &&
@@ -226,15 +226,15 @@ test_expect_success 'setup 6' '
test_tick &&
git commit -m "df-3 makes d" &&
c6=$(git rev-parse --verify HEAD) &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o0 a"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o6 d"
- echo "100644 $o0 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o0 a" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o6 d" &&
+ echo "100644 $o0 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o6 0 d"
) >expected &&
test_cmp expected actual
@@ -286,11 +286,11 @@ test_expect_success 'merge-recursive result' '
git ls-files -s >actual &&
(
- echo "100644 $o0 1 a"
- echo "100644 $o2 2 a"
- echo "100644 $o1 3 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 $o0 1 a" &&
+ echo "100644 $o2 2 a" &&
+ echo "100644 $o1 3 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o1 0 d/e"
) >expected &&
test_cmp expected actual
@@ -325,10 +325,10 @@ test_expect_success 'merge-recursive remove conflict' '
git ls-files -s >actual &&
(
- echo "100644 $o0 1 a"
- echo "100644 $o1 2 a"
- echo "100644 $o5 3 a"
- echo "100644 $o0 0 c"
+ echo "100644 $o0 1 a" &&
+ echo "100644 $o1 2 a" &&
+ echo "100644 $o5 3 a" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o1 0 d/e"
) >expected &&
test_cmp expected actual
@@ -347,9 +347,9 @@ test_expect_success 'merge-recursive result' '
git ls-files -s >actual &&
(
- echo "100644 $o1 0 a"
- echo "100644 $o3 0 b/c"
- echo "100644 $o0 0 c"
+ echo "100644 $o1 0 a" &&
+ echo "100644 $o3 0 b/c" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o1 0 d/e"
) >expected &&
test_cmp expected actual
@@ -369,11 +369,11 @@ test_expect_success 'merge-recursive d/f conflict result' '
git ls-files -s >actual &&
(
- echo "100644 $o0 1 a"
- echo "100644 $o1 2 a"
- echo "100644 $o4 0 a/c"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 $o0 1 a" &&
+ echo "100644 $o1 2 a" &&
+ echo "100644 $o4 0 a/c" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o1 0 d/e"
) >expected &&
test_cmp expected actual
@@ -393,11 +393,11 @@ test_expect_success 'merge-recursive d/f conflict result the other way' '
git ls-files -s >actual &&
(
- echo "100644 $o0 1 a"
- echo "100644 $o1 3 a"
- echo "100644 $o4 0 a/c"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 $o0 1 a" &&
+ echo "100644 $o1 3 a" &&
+ echo "100644 $o4 0 a/c" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o1 0 d/e"
) >expected &&
test_cmp expected actual
@@ -417,11 +417,11 @@ test_expect_success 'merge-recursive d/f conflict result' '
git ls-files -s >actual &&
(
- echo "100644 $o1 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
- echo "100644 $o6 3 d"
- echo "100644 $o0 1 d/e"
+ echo "100644 $o1 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
+ echo "100644 $o6 3 d" &&
+ echo "100644 $o0 1 d/e" &&
echo "100644 $o1 2 d/e"
) >expected &&
test_cmp expected actual
@@ -441,11 +441,11 @@ test_expect_success 'merge-recursive d/f conflict result' '
git ls-files -s >actual &&
(
- echo "100644 $o1 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
- echo "100644 $o6 2 d"
- echo "100644 $o0 1 d/e"
+ echo "100644 $o1 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
+ echo "100644 $o6 2 d" &&
+ echo "100644 $o0 1 d/e" &&
echo "100644 $o1 3 d/e"
) >expected &&
test_cmp expected actual
@@ -465,13 +465,13 @@ test_expect_success 'reset and bind merge' '
git read-tree --prefix=M/ master &&
git ls-files -s >actual &&
(
- echo "100644 $o1 0 M/a"
- echo "100644 $o0 0 M/b"
- echo "100644 $o0 0 M/c"
- echo "100644 $o1 0 M/d/e"
- echo "100644 $o1 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 $o1 0 M/a" &&
+ echo "100644 $o0 0 M/b" &&
+ echo "100644 $o0 0 M/c" &&
+ echo "100644 $o1 0 M/d/e" &&
+ echo "100644 $o1 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o1 0 d/e"
) >expected &&
test_cmp expected actual &&
@@ -479,17 +479,17 @@ test_expect_success 'reset and bind merge' '
git read-tree --prefix=a1/ master &&
git ls-files -s >actual &&
(
- echo "100644 $o1 0 M/a"
- echo "100644 $o0 0 M/b"
- echo "100644 $o0 0 M/c"
- echo "100644 $o1 0 M/d/e"
- echo "100644 $o1 0 a"
- echo "100644 $o1 0 a1/a"
- echo "100644 $o0 0 a1/b"
- echo "100644 $o0 0 a1/c"
- echo "100644 $o1 0 a1/d/e"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 $o1 0 M/a" &&
+ echo "100644 $o0 0 M/b" &&
+ echo "100644 $o0 0 M/c" &&
+ echo "100644 $o1 0 M/d/e" &&
+ echo "100644 $o1 0 a" &&
+ echo "100644 $o1 0 a1/a" &&
+ echo "100644 $o0 0 a1/b" &&
+ echo "100644 $o0 0 a1/c" &&
+ echo "100644 $o1 0 a1/d/e" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o1 0 d/e"
) >expected &&
test_cmp expected actual &&
@@ -497,21 +497,21 @@ test_expect_success 'reset and bind merge' '
git read-tree --prefix=z/ master &&
git ls-files -s >actual &&
(
- echo "100644 $o1 0 M/a"
- echo "100644 $o0 0 M/b"
- echo "100644 $o0 0 M/c"
- echo "100644 $o1 0 M/d/e"
- echo "100644 $o1 0 a"
- echo "100644 $o1 0 a1/a"
- echo "100644 $o0 0 a1/b"
- echo "100644 $o0 0 a1/c"
- echo "100644 $o1 0 a1/d/e"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
- echo "100644 $o1 0 d/e"
- echo "100644 $o1 0 z/a"
- echo "100644 $o0 0 z/b"
- echo "100644 $o0 0 z/c"
+ echo "100644 $o1 0 M/a" &&
+ echo "100644 $o0 0 M/b" &&
+ echo "100644 $o0 0 M/c" &&
+ echo "100644 $o1 0 M/d/e" &&
+ echo "100644 $o1 0 a" &&
+ echo "100644 $o1 0 a1/a" &&
+ echo "100644 $o0 0 a1/b" &&
+ echo "100644 $o0 0 a1/c" &&
+ echo "100644 $o1 0 a1/d/e" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
+ echo "100644 $o1 0 d/e" &&
+ echo "100644 $o1 0 z/a" &&
+ echo "100644 $o0 0 z/b" &&
+ echo "100644 $o0 0 z/c" &&
echo "100644 $o1 0 z/d/e"
) >expected &&
test_cmp expected actual
@@ -589,8 +589,8 @@ test_expect_success 'merge-recursive simple w/submodule result' '
git ls-files -s >actual &&
(
- echo "100644 $o5 0 a"
- echo "100644 $o0 0 c"
+ echo "100644 $o5 0 a" &&
+ echo "100644 $o0 0 c" &&
echo "160000 $c1 0 d"
) >expected &&
test_cmp expected actual
@@ -601,13 +601,13 @@ test_expect_success 'merge-recursive copy vs. rename' '
git merge rename &&
( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 blob $o0 e"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
- echo "100644 $o0 0 d/e"
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 blob $o0 e" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
+ echo "100644 $o0 0 d/e" &&
echo "100644 $o0 0 e"
) >expected &&
test_cmp expected actual
@@ -617,17 +617,17 @@ test_expect_failure 'merge-recursive rename vs. rename/symlink' '
git checkout -f rename &&
git merge rename-ln &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "120000 blob $oln a"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 blob $o0 e"
- echo "120000 $oln 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
- echo "100644 $o0 0 d/e"
+ echo "120000 blob $oln a" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 blob $o0 e" &&
+ echo "120000 $oln 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
+ echo "100644 $o0 0 d/e" &&
echo "100644 $o0 0 e"
) >expected &&
test_cmp expected actual
diff --git a/t/t3031-merge-criscross.sh b/t/t3031-merge-criscross.sh
index e59b0a3..3824756 100755
--- a/t/t3031-merge-criscross.sh
+++ b/t/t3031-merge-criscross.sh
@@ -88,7 +88,7 @@ test_expect_success 'setup repo with criss-cross history' '
git branch G
'
-test_expect_success 'recursive merge between F and G, causes segfault' '
+test_expect_success 'recursive merge between F and G does not cause segfault' '
git merge F
'
diff --git a/t/t3050-subprojects-fetch.sh b/t/t3050-subprojects-fetch.sh
index 2f5f41a..f1f09ab 100755
--- a/t/t3050-subprojects-fetch.sh
+++ b/t/t3050-subprojects-fetch.sh
@@ -21,10 +21,10 @@ test_expect_success setup '
test_expect_success clone '
git clone "file://$(pwd)/.git" cloned &&
- (git rev-parse HEAD; git ls-files -s) >expected &&
+ (git rev-parse HEAD && git ls-files -s) >expected &&
(
cd cloned &&
- (git rev-parse HEAD; git ls-files -s) >../actual
+ (git rev-parse HEAD && git ls-files -s) >../actual
) &&
test_cmp expected actual
'
@@ -40,11 +40,11 @@ test_expect_success advance '
'
test_expect_success fetch '
- (git rev-parse HEAD; git ls-files -s) >expected &&
+ (git rev-parse HEAD && git ls-files -s) >expected &&
(
cd cloned &&
git pull &&
- (git rev-parse HEAD; git ls-files -s) >../actual
+ (git rev-parse HEAD && git ls-files -s) >../actual
) &&
test_cmp expected actual
'
diff --git a/t/t3102-ls-tree-wildcards.sh b/t/t3102-ls-tree-wildcards.sh
index e804377..1e16c6b 100755
--- a/t/t3102-ls-tree-wildcards.sh
+++ b/t/t3102-ls-tree-wildcards.sh
@@ -23,7 +23,7 @@ test_expect_success 'ls-tree outside prefix' '
cat >expect <<-EOF &&
100644 blob $EMPTY_BLOB ../a[a]/three
EOF
- ( cd aa && git ls-tree -r HEAD "../a[a]"; ) >actual &&
+ ( cd aa && git ls-tree -r HEAD "../a[a]" ) >actual &&
test_cmp expect actual
'
diff --git a/t/t3210-pack-refs.sh b/t/t3210-pack-refs.sh
index afa27ff..7333d7d 100755
--- a/t/t3210-pack-refs.sh
+++ b/t/t3210-pack-refs.sh
@@ -186,7 +186,7 @@ test_expect_success 'notice d/f conflict with existing directory' '
test_expect_success 'existing directory reports concrete ref' '
test_must_fail git branch foo 2>stderr &&
- grep refs/heads/foo/bar/baz stderr
+ test_i18ngrep refs/heads/foo/bar/baz stderr
'
test_expect_success 'notice d/f conflict with existing ref' '
@@ -231,9 +231,9 @@ test_expect_success 'timeout if packed-refs.lock exists' '
test_expect_success 'retry acquiring packed-refs.lock' '
LOCK=.git/packed-refs.lock &&
>"$LOCK" &&
- test_when_finished "wait; rm -f $LOCK" &&
+ test_when_finished "wait && rm -f $LOCK" &&
{
- ( sleep 1 ; rm -f $LOCK ) &
+ ( sleep 1 && rm -f $LOCK ) &
} &&
git -c core.packedrefstimeout=3000 pack-refs --all --prune
'
diff --git a/t/t3301-notes.sh b/t/t3301-notes.sh
index 2d200fd..ac62dc0 100755
--- a/t/t3301-notes.sh
+++ b/t/t3301-notes.sh
@@ -914,7 +914,7 @@ test_expect_success 'git notes copy --stdin' '
${indent}
${indent}yet another note
EOF
- (echo $(git rev-parse HEAD~3) $(git rev-parse HEAD^); \
+ (echo $(git rev-parse HEAD~3) $(git rev-parse HEAD^) &&
echo $(git rev-parse HEAD~2) $(git rev-parse HEAD)) |
git notes copy --stdin &&
git log -2 >actual &&
@@ -939,7 +939,7 @@ test_expect_success 'git notes copy --for-rewrite (unconfigured)' '
EOF
test_commit 14th &&
test_commit 15th &&
- (echo $(git rev-parse HEAD~3) $(git rev-parse HEAD^); \
+ (echo $(git rev-parse HEAD~3) $(git rev-parse HEAD^) &&
echo $(git rev-parse HEAD~2) $(git rev-parse HEAD)) |
git notes copy --for-rewrite=foo &&
git log -2 >actual &&
@@ -972,7 +972,7 @@ test_expect_success 'git notes copy --for-rewrite (enabled)' '
EOF
test_config notes.rewriteMode overwrite &&
test_config notes.rewriteRef "refs/notes/*" &&
- (echo $(git rev-parse HEAD~3) $(git rev-parse HEAD^); \
+ (echo $(git rev-parse HEAD~3) $(git rev-parse HEAD^) &&
echo $(git rev-parse HEAD~2) $(git rev-parse HEAD)) |
git notes copy --for-rewrite=foo &&
git log -2 >actual &&
@@ -1059,7 +1059,7 @@ test_expect_success 'git notes copy --for-rewrite (append two to one)' '
git notes add -f -m"append 2" HEAD^^ &&
test_config notes.rewriteMode concatenate &&
test_config notes.rewriteRef "refs/notes/*" &&
- (echo $(git rev-parse HEAD^) $(git rev-parse HEAD);
+ (echo $(git rev-parse HEAD^) $(git rev-parse HEAD) &&
echo $(git rev-parse HEAD^^) $(git rev-parse HEAD)) |
git notes copy --for-rewrite=foo &&
git log -1 >actual &&
diff --git a/t/t3310-notes-merge-manual-resolve.sh b/t/t3310-notes-merge-manual-resolve.sh
index 9c1bf6e..68436ee 100755
--- a/t/t3310-notes-merge-manual-resolve.sh
+++ b/t/t3310-notes-merge-manual-resolve.sh
@@ -541,9 +541,9 @@ EOF
test "$(git rev-parse refs/notes/y)" = "$(git rev-parse NOTES_MERGE_PARTIAL^1)" &&
test "$(git rev-parse refs/notes/m)" != "$(git rev-parse NOTES_MERGE_PARTIAL^1)" &&
# Mention refs/notes/m, and its current and expected value in output
- grep -q "refs/notes/m" output &&
- grep -q "$(git rev-parse refs/notes/m)" output &&
- grep -q "$(git rev-parse NOTES_MERGE_PARTIAL^1)" output &&
+ test_i18ngrep -q "refs/notes/m" output &&
+ test_i18ngrep -q "$(git rev-parse refs/notes/m)" output &&
+ test_i18ngrep -q "$(git rev-parse NOTES_MERGE_PARTIAL^1)" output &&
# Verify that other notes refs has not changed (w, x, y and z)
verify_notes w &&
verify_notes x &&
diff --git a/t/t3400-rebase.sh b/t/t3400-rebase.sh
index 72d9564..3996ee0 100755
--- a/t/t3400-rebase.sh
+++ b/t/t3400-rebase.sh
@@ -200,10 +200,10 @@ test_expect_success 'rebase -q is quiet' '
test_expect_success 'Rebase a commit that sprinkles CRs in' '
(
- echo "One"
- echo "TwoQ"
- echo "Three"
- echo "FQur"
+ echo "One" &&
+ echo "TwoQ" &&
+ echo "Three" &&
+ echo "FQur" &&
echo "Five"
) | q_to_cr >CR &&
git add CR &&
diff --git a/t/t3402-rebase-merge.sh b/t/t3402-rebase-merge.sh
index 488945e..a1ec501 100755
--- a/t/t3402-rebase-merge.sh
+++ b/t/t3402-rebase-merge.sh
@@ -25,7 +25,7 @@ test_expect_success setup '
git commit -a -m"master updates a bit more." &&
git checkout side &&
- (echo "0 $T" ; cat original) >renamed &&
+ (echo "0 $T" && cat original) >renamed &&
git add renamed &&
git update-index --force-remove original &&
git commit -a -m"side renames and edits." &&
@@ -143,7 +143,7 @@ test_expect_success 'rebase -s funny -Xopt' '
git checkout -b test-funny master^ &&
test_commit funny &&
(
- PATH=./test-bin:$PATH
+ PATH=./test-bin:$PATH &&
git rebase -s funny -Xopt master
) &&
test -f funny.was.run
diff --git a/t/t3404-rebase-interactive.sh b/t/t3404-rebase-interactive.sh
index 0161690..4c7b1ea 100755
--- a/t/t3404-rebase-interactive.sh
+++ b/t/t3404-rebase-interactive.sh
@@ -119,6 +119,15 @@ test_expect_success 'rebase -i with exec allows git commands in subdirs' '
)
'
+test_expect_success 'rebase -i sets work tree properly' '
+ test_when_finished "rm -rf subdir" &&
+ test_when_finished "test_might_fail git rebase --abort" &&
+ mkdir subdir &&
+ git rebase -x "(cd subdir && git rev-parse --show-toplevel)" HEAD^ \
+ >actual &&
+ ! grep "/subdir$" actual
+'
+
test_expect_success 'rebase -i with the exec command checks tree cleanness' '
git checkout master &&
set_fake_editor &&
@@ -516,7 +525,7 @@ test_expect_success 'interrupted squash works as expected' '
one=$(git rev-parse HEAD~3) &&
set_fake_editor &&
test_must_fail env FAKE_LINES="1 squash 3 2" git rebase -i HEAD~3 &&
- (echo one; echo two; echo four) > conflict &&
+ test_write_lines one two four > conflict &&
git add conflict &&
test_must_fail git rebase --continue &&
echo resolved > conflict &&
@@ -530,10 +539,10 @@ test_expect_success 'interrupted squash works as expected (case 2)' '
one=$(git rev-parse HEAD~3) &&
set_fake_editor &&
test_must_fail env FAKE_LINES="3 squash 1 2" git rebase -i HEAD~3 &&
- (echo one; echo four) > conflict &&
+ test_write_lines one four > conflict &&
git add conflict &&
test_must_fail git rebase --continue &&
- (echo one; echo two; echo four) > conflict &&
+ test_write_lines one two four > conflict &&
git add conflict &&
test_must_fail git rebase --continue &&
echo resolved > conflict &&
@@ -1238,7 +1247,7 @@ rebase_setup_and_clean () {
test_might_fail git branch -D $1 &&
test_might_fail git rebase --abort
" &&
- git checkout -b $1 master
+ git checkout -b $1 ${2:-master}
}
test_expect_success 'drop' '
@@ -1415,4 +1424,12 @@ test_expect_success 'rebase -i --gpg-sign=<key-id> overrides commit.gpgSign' '
test_i18ngrep "$SQ-S\"S I Gner\"$SQ" err
'
+test_expect_success 'valid author header after --root swap' '
+ rebase_setup_and_clean author-header no-conflict-branch &&
+ set_fake_editor &&
+ FAKE_LINES="2 1" git rebase -i --root &&
+ git cat-file commit HEAD^ >out &&
+ grep "^author ..*> [0-9][0-9]* [-+][0-9][0-9][0-9][0-9]$" out
+'
+
test_done
diff --git a/t/t3418-rebase-continue.sh b/t/t3418-rebase-continue.sh
index c145dba..65ed7db 100755
--- a/t/t3418-rebase-continue.sh
+++ b/t/t3418-rebase-continue.sh
@@ -60,7 +60,7 @@ test_expect_success 'rebase --continue remembers merge strategy and options' '
EOF
chmod +x test-bin/git-merge-funny &&
(
- PATH=./test-bin:$PATH
+ PATH=./test-bin:$PATH &&
test_must_fail git rebase -s funny -Xopt master topic
) &&
test -f funny.was.run &&
@@ -68,7 +68,7 @@ test_expect_success 'rebase --continue remembers merge strategy and options' '
echo "Resolved" >F2 &&
git add F2 &&
(
- PATH=./test-bin:$PATH
+ PATH=./test-bin:$PATH &&
git rebase --continue
) &&
test -f funny.was.run
diff --git a/t/t3430-rebase-merges.sh b/t/t3430-rebase-merges.sh
index 78f7c99..9e62297 100755
--- a/t/t3430-rebase-merges.sh
+++ b/t/t3430-rebase-merges.sh
@@ -329,4 +329,38 @@ test_expect_success 'labels that are object IDs are rewritten' '
! grep "^label $third$" .git/ORIGINAL-TODO
'
+test_expect_success 'octopus merges' '
+ git checkout -b three &&
+ test_commit before-octopus &&
+ test_commit three &&
+ git checkout -b two HEAD^ &&
+ test_commit two &&
+ git checkout -b one HEAD^ &&
+ test_commit one &&
+ test_tick &&
+ (GIT_AUTHOR_NAME="Hank" GIT_AUTHOR_EMAIL="hank@sea.world" \
+ git merge -m "Tüntenfüsch" two three) &&
+
+ : fast forward if possible &&
+ before="$(git rev-parse --verify HEAD)" &&
+ test_tick &&
+ git rebase -i -r HEAD^^ &&
+ test_cmp_rev HEAD $before &&
+
+ test_tick &&
+ git rebase -i --force -r HEAD^^ &&
+ test "Hank" = "$(git show -s --format=%an HEAD)" &&
+ test "$before" != $(git rev-parse HEAD) &&
+ test_cmp_graph HEAD^^.. <<-\EOF
+ *-. Tüntenfüsch
+ |\ \
+ | | * three
+ | * | two
+ | |/
+ * | one
+ |/
+ o before-octopus
+ EOF
+'
+
test_done
diff --git a/t/t3507-cherry-pick-conflict.sh b/t/t3507-cherry-pick-conflict.sh
index 7c5ad08..0db1661 100755
--- a/t/t3507-cherry-pick-conflict.sh
+++ b/t/t3507-cherry-pick-conflict.sh
@@ -392,4 +392,17 @@ test_expect_success 'commit --amend -s places the sign-off at the right place' '
test_cmp expect actual
'
+test_expect_success 'cherry-pick preserves sparse-checkout' '
+ pristine_detach initial &&
+ test_config core.sparseCheckout true &&
+ test_when_finished "
+ echo \"/*\" >.git/info/sparse-checkout
+ git read-tree --reset -u HEAD
+ rm .git/info/sparse-checkout" &&
+ echo /unrelated >.git/info/sparse-checkout &&
+ git read-tree --reset -u HEAD &&
+ test_must_fail git cherry-pick -Xours picked>actual &&
+ test_i18ngrep ! "Changes not staged for commit:" actual
+'
+
test_done
diff --git a/t/t3700-add.sh b/t/t3700-add.sh
index 0dc8759..37729ba 100755
--- a/t/t3700-add.sh
+++ b/t/t3700-add.sh
@@ -156,9 +156,9 @@ test_expect_success 'git add with filemode=0, symlinks=0, and unmerged entries'
test_expect_success 'git add with filemode=0, symlinks=0 prefers stage 2 over stage 1' '
git rm --cached -f file symlink &&
(
- echo "100644 $(git hash-object -w stage1) 1 file"
- echo "100755 $(git hash-object -w stage2) 2 file"
- echo "100644 $(printf 1 | git hash-object -w -t blob --stdin) 1 symlink"
+ echo "100644 $(git hash-object -w stage1) 1 file" &&
+ echo "100755 $(git hash-object -w stage2) 2 file" &&
+ echo "100644 $(printf 1 | git hash-object -w -t blob --stdin) 1 symlink" &&
echo "120000 $(printf 2 | git hash-object -w -t blob --stdin) 2 symlink"
) | git update-index --index-info &&
git config core.filemode 0 &&
@@ -264,7 +264,7 @@ test_expect_success 'git add to resolve conflicts on otherwise ignored path' '
git reset --hard &&
H=$(git rev-parse :1/2/a) &&
(
- echo "100644 $H 1 track-this"
+ echo "100644 $H 1 track-this" &&
echo "100644 $H 3 track-this"
) | git update-index --index-info &&
echo track-this >>.gitignore &&
diff --git a/t/t3701-add-interactive.sh b/t/t3701-add-interactive.sh
index 3e9139d..609fbfd 100755
--- a/t/t3701-add-interactive.sh
+++ b/t/t3701-add-interactive.sh
@@ -46,13 +46,13 @@ test_expect_success 'setup expected' '
'
test_expect_success 'diff works (initial)' '
- (echo d; echo 1) | git add -i >output &&
+ test_write_lines d 1 | git add -i >output &&
sed -ne "/new file/,/content/p" <output >diff &&
diff_cmp expected diff
'
test_expect_success 'revert works (initial)' '
git add file &&
- (echo r; echo 1) | git add -i &&
+ test_write_lines r 1 | git add -i &&
git ls-files >output &&
! grep . output
'
@@ -83,13 +83,13 @@ test_expect_success 'setup expected' '
'
test_expect_success 'diff works (commit)' '
- (echo d; echo 1) | git add -i >output &&
+ test_write_lines d 1 | git add -i >output &&
sed -ne "/^index/,/content/p" <output >diff &&
diff_cmp expected diff
'
test_expect_success 'revert works (commit)' '
git add file &&
- (echo r; echo 1) | git add -i &&
+ test_write_lines r 1 | git add -i &&
git add -i </dev/null >output &&
grep "unchanged *+3/-0 file" output
'
@@ -102,7 +102,7 @@ test_expect_success 'setup expected' '
test_expect_success 'dummy edit works' '
test_set_editor : &&
- (echo e; echo a) | git add -p &&
+ test_write_lines e a | git add -p &&
git diff > diff &&
diff_cmp expected diff
'
@@ -127,7 +127,7 @@ test_expect_success 'setup fake editor' '
test_expect_success 'bad edit rejected' '
git reset &&
- (echo e; echo n; echo d) | git add -p >output &&
+ test_write_lines e n d | git add -p >output &&
grep "hunk does not apply" output
'
@@ -140,7 +140,7 @@ test_expect_success 'setup patch' '
test_expect_success 'garbage edit rejected' '
git reset &&
- (echo e; echo n; echo d) | git add -p >output &&
+ test_write_lines e n d | git add -p >output &&
grep "hunk does not apply" output
'
@@ -170,7 +170,7 @@ test_expect_success 'setup expected' '
'
test_expect_success 'real edit works' '
- (echo e; echo n; echo d) | git add -p &&
+ test_write_lines e n d | git add -p &&
git diff >output &&
diff_cmp expected output
'
diff --git a/t/t3904-stash-patch.sh b/t/t3904-stash-patch.sh
index 83744f8..9546b6f 100755
--- a/t/t3904-stash-patch.sh
+++ b/t/t3904-stash-patch.sh
@@ -29,14 +29,14 @@ test_expect_success 'setup' '
test_expect_success 'saying "n" does nothing' '
set_state HEAD HEADfile_work HEADfile_index &&
set_state dir/foo work index &&
- (echo n; echo n; echo n) | test_must_fail git stash save -p &&
+ test_write_lines n n n | test_must_fail git stash save -p &&
verify_state HEAD HEADfile_work HEADfile_index &&
verify_saved_state bar &&
verify_state dir/foo work index
'
test_expect_success 'git stash -p' '
- (echo y; echo n; echo y) | git stash save -p &&
+ test_write_lines y n y | git stash save -p &&
verify_state HEAD committed HEADfile_index &&
verify_saved_state bar &&
verify_state dir/foo head index &&
@@ -51,7 +51,7 @@ test_expect_success 'git stash -p --no-keep-index' '
set_state HEAD HEADfile_work HEADfile_index &&
set_state bar bar_work bar_index &&
set_state dir/foo work index &&
- (echo y; echo n; echo y) | git stash save -p --no-keep-index &&
+ test_write_lines y n y | git stash save -p --no-keep-index &&
verify_state HEAD committed committed &&
verify_state bar bar_work dummy &&
verify_state dir/foo head head &&
@@ -66,7 +66,7 @@ test_expect_success 'git stash --no-keep-index -p' '
set_state HEAD HEADfile_work HEADfile_index &&
set_state bar bar_work bar_index &&
set_state dir/foo work index &&
- (echo y; echo n; echo y) | git stash save --no-keep-index -p &&
+ test_write_lines y n y | git stash save --no-keep-index -p &&
verify_state HEAD committed committed &&
verify_state dir/foo head head &&
verify_state bar bar_work dummy &&
diff --git a/t/t4001-diff-rename.sh b/t/t4001-diff-rename.sh
index bf40303..c16486a 100755
--- a/t/t4001-diff-rename.sh
+++ b/t/t4001-diff-rename.sh
@@ -180,7 +180,7 @@ test_expect_success 'setup for many rename source candidates' '
git add "path??" &&
test_tick &&
git commit -m "hundred" &&
- (cat path1; echo new) >new-path &&
+ (cat path1 && echo new) >new-path &&
echo old >>path1 &&
git add new-path path1 &&
git diff -l 4 -C -C --cached --name-status >actual 2>actual.err &&
diff --git a/t/t4010-diff-pathspec.sh b/t/t4010-diff-pathspec.sh
index b95cc91..281f8fa 100755
--- a/t/t4010-diff-pathspec.sh
+++ b/t/t4010-diff-pathspec.sh
@@ -110,10 +110,10 @@ test_expect_success 'diff-tree -r with wildcard' '
test_expect_success 'setup submodules' '
test_tick &&
git init submod &&
- ( cd submod && test_commit first; ) &&
+ ( cd submod && test_commit first ) &&
git add submod &&
git commit -m first &&
- ( cd submod && test_commit second; ) &&
+ ( cd submod && test_commit second ) &&
git add submod &&
git commit -m second
'
diff --git a/t/t4012-diff-binary.sh b/t/t4012-diff-binary.sh
index 0a8af76..6579c81 100755
--- a/t/t4012-diff-binary.sh
+++ b/t/t4012-diff-binary.sh
@@ -102,10 +102,8 @@ test_expect_success 'apply binary patch' '
test_expect_success 'diff --no-index with binary creation' '
echo Q | q_to_nul >binary &&
- (: hide error code from diff, which just indicates differences
- git diff --binary --no-index /dev/null binary >current ||
- true
- ) &&
+ # hide error code from diff, which just indicates differences
+ test_might_fail git diff --binary --no-index /dev/null binary >current &&
rm binary &&
git apply --binary <current &&
echo Q >expected &&
diff --git a/t/t4015-diff-whitespace.sh b/t/t4015-diff-whitespace.sh
index 5659c26..35fc8b5 100755
--- a/t/t4015-diff-whitespace.sh
+++ b/t/t4015-diff-whitespace.sh
@@ -1220,7 +1220,7 @@ test_expect_success 'plain moved code, inside file' '
test_cmp expected actual
'
-test_expect_success 'detect permutations inside moved code -- dimmed_zebra' '
+test_expect_success 'detect blocks of moved code' '
git reset --hard &&
cat <<-\EOF >lines.txt &&
long line 1
@@ -1268,9 +1268,52 @@ test_expect_success 'detect permutations inside moved code -- dimmed_zebra' '
test_config color.diff.newMovedDimmed "normal cyan" &&
test_config color.diff.oldMovedAlternativeDimmed "normal blue" &&
test_config color.diff.newMovedAlternativeDimmed "normal yellow" &&
- git diff HEAD --no-renames --color-moved=dimmed_zebra --color |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --no-renames --color-moved=blocks --color >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
+ cat <<-\EOF >expected &&
+ <BOLD>diff --git a/lines.txt b/lines.txt<RESET>
+ <BOLD>--- a/lines.txt<RESET>
+ <BOLD>+++ b/lines.txt<RESET>
+ <CYAN>@@ -1,16 +1,16 @@<RESET>
+ <MAGENTA>-long line 1<RESET>
+ <MAGENTA>-long line 2<RESET>
+ <MAGENTA>-long line 3<RESET>
+ line 4<RESET>
+ line 5<RESET>
+ line 6<RESET>
+ line 7<RESET>
+ line 8<RESET>
+ line 9<RESET>
+ <CYAN>+<RESET><CYAN>long line 1<RESET>
+ <CYAN>+<RESET><CYAN>long line 2<RESET>
+ <CYAN>+<RESET><CYAN>long line 3<RESET>
+ <CYAN>+<RESET><CYAN>long line 14<RESET>
+ <CYAN>+<RESET><CYAN>long line 15<RESET>
+ <CYAN>+<RESET><CYAN>long line 16<RESET>
+ line 10<RESET>
+ line 11<RESET>
+ line 12<RESET>
+ line 13<RESET>
+ <MAGENTA>-long line 14<RESET>
+ <MAGENTA>-long line 15<RESET>
+ <MAGENTA>-long line 16<RESET>
+ EOF
+ test_cmp expected actual
+
+'
+
+test_expect_success 'detect permutations inside moved code -- dimmed-zebra' '
+ # reuse setup from test before!
+ test_config color.diff.oldMoved "magenta" &&
+ test_config color.diff.newMoved "cyan" &&
+ test_config color.diff.oldMovedAlternative "blue" &&
+ test_config color.diff.newMovedAlternative "yellow" &&
+ test_config color.diff.oldMovedDimmed "normal magenta" &&
+ test_config color.diff.newMovedDimmed "normal cyan" &&
+ test_config color.diff.oldMovedAlternativeDimmed "normal blue" &&
+ test_config color.diff.newMovedAlternativeDimmed "normal yellow" &&
+ git diff HEAD --no-renames --color-moved=dimmed-zebra --color >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat <<-\EOF >expected &&
<BOLD>diff --git a/lines.txt b/lines.txt<RESET>
<BOLD>--- a/lines.txt<RESET>
@@ -1312,9 +1355,8 @@ test_expect_success 'cmd option assumes configured colored-moved' '
test_config color.diff.oldMovedAlternativeDimmed "normal blue" &&
test_config color.diff.newMovedAlternativeDimmed "normal yellow" &&
test_config diff.colorMoved zebra &&
- git diff HEAD --no-renames --color-moved --color |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --no-renames --color-moved --color >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat <<-\EOF >expected &&
<BOLD>diff --git a/lines.txt b/lines.txt<RESET>
<BOLD>--- a/lines.txt<RESET>
@@ -1392,9 +1434,8 @@ test_expect_success 'move detection ignoring whitespace ' '
line 4
line 5
EOF
- git diff HEAD --no-renames --color-moved --color |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --no-renames --color-moved --color >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat <<-\EOF >expected &&
<BOLD>diff --git a/lines.txt b/lines.txt<RESET>
<BOLD>--- a/lines.txt<RESET>
@@ -1416,9 +1457,9 @@ test_expect_success 'move detection ignoring whitespace ' '
EOF
test_cmp expected actual &&
- git diff HEAD --no-renames -w --color-moved --color |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --no-renames --color-moved --color \
+ --color-moved-ws=ignore-all-space >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat <<-\EOF >expected &&
<BOLD>diff --git a/lines.txt b/lines.txt<RESET>
<BOLD>--- a/lines.txt<RESET>
@@ -1456,9 +1497,8 @@ test_expect_success 'move detection ignoring whitespace changes' '
line 5
EOF
- git diff HEAD --no-renames --color-moved --color |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --no-renames --color-moved --color >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat <<-\EOF >expected &&
<BOLD>diff --git a/lines.txt b/lines.txt<RESET>
<BOLD>--- a/lines.txt<RESET>
@@ -1480,9 +1520,9 @@ test_expect_success 'move detection ignoring whitespace changes' '
EOF
test_cmp expected actual &&
- git diff HEAD --no-renames -b --color-moved --color |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --no-renames --color-moved --color \
+ --color-moved-ws=ignore-space-change >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat <<-\EOF >expected &&
<BOLD>diff --git a/lines.txt b/lines.txt<RESET>
<BOLD>--- a/lines.txt<RESET>
@@ -1523,9 +1563,8 @@ test_expect_success 'move detection ignoring whitespace at eol' '
# avoid cluttering the output with complaints about our eol whitespace
test_config core.whitespace -blank-at-eol &&
- git diff HEAD --no-renames --color-moved --color |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --no-renames --color-moved --color >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat <<-\EOF >expected &&
<BOLD>diff --git a/lines.txt b/lines.txt<RESET>
<BOLD>--- a/lines.txt<RESET>
@@ -1547,9 +1586,9 @@ test_expect_success 'move detection ignoring whitespace at eol' '
EOF
test_cmp expected actual &&
- git diff HEAD --no-renames --ignore-space-at-eol --color-moved --color |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --no-renames --color-moved --color \
+ --color-moved-ws=ignore-space-at-eol >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat <<-\EOF >expected &&
<BOLD>diff --git a/lines.txt b/lines.txt<RESET>
<BOLD>--- a/lines.txt<RESET>
@@ -1594,9 +1633,8 @@ test_expect_success '--color-moved block at end of diff output respects MIN_ALNU
irrelevant_line
EOF
- git diff HEAD --color-moved=zebra --color --no-renames |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --color-moved=zebra --color --no-renames >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat >expected <<-\EOF &&
<BOLD>diff --git a/bar b/bar<RESET>
<BOLD>--- a/bar<RESET>
@@ -1633,9 +1671,8 @@ test_expect_success '--color-moved respects MIN_ALNUM_COUNT' '
nineteen chars 456789
EOF
- git diff HEAD --color-moved=zebra --color --no-renames |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --color-moved=zebra --color --no-renames >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat >expected <<-\EOF &&
<BOLD>diff --git a/bar b/bar<RESET>
<BOLD>--- a/bar<RESET>
@@ -1676,7 +1713,8 @@ test_expect_success '--color-moved treats adjacent blocks as separate for MIN_AL
7charsA
EOF
- git diff HEAD --color-moved=zebra --color --no-renames | grep -v "index" | test_decode_color >actual &&
+ git diff HEAD --color-moved=zebra --color --no-renames >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat >expected <<-\EOF &&
<BOLD>diff --git a/bar b/bar<RESET>
<BOLD>--- a/bar<RESET>
@@ -1719,7 +1757,146 @@ test_expect_success 'move detection with submodules' '
# nor did we mess with it another way
git diff --submodule=diff --color | test_decode_color >expect &&
- test_cmp expect decoded_actual
+ test_cmp expect decoded_actual &&
+ rm -rf bananas &&
+ git submodule deinit bananas
+'
+
+test_expect_success 'only move detection ignores white spaces' '
+ git reset --hard &&
+ q_to_tab <<-\EOF >text.txt &&
+ a long line to exceed per-line minimum
+ another long line to exceed per-line minimum
+ original file
+ EOF
+ git add text.txt &&
+ git commit -m "add text" &&
+ q_to_tab <<-\EOF >text.txt &&
+ Qa long line to exceed per-line minimum
+ Qanother long line to exceed per-line minimum
+ new file
+ EOF
+
+ # Make sure we get a different diff using -w
+ git diff --color --color-moved -w >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
+ q_to_tab <<-\EOF >expected &&
+ <BOLD>diff --git a/text.txt b/text.txt<RESET>
+ <BOLD>--- a/text.txt<RESET>
+ <BOLD>+++ b/text.txt<RESET>
+ <CYAN>@@ -1,3 +1,3 @@<RESET>
+ Qa long line to exceed per-line minimum<RESET>
+ Qanother long line to exceed per-line minimum<RESET>
+ <RED>-original file<RESET>
+ <GREEN>+<RESET><GREEN>new file<RESET>
+ EOF
+ test_cmp expected actual &&
+
+ # And now ignoring white space only in the move detection
+ git diff --color --color-moved \
+ --color-moved-ws=ignore-all-space,ignore-space-change,ignore-space-at-eol >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
+ q_to_tab <<-\EOF >expected &&
+ <BOLD>diff --git a/text.txt b/text.txt<RESET>
+ <BOLD>--- a/text.txt<RESET>
+ <BOLD>+++ b/text.txt<RESET>
+ <CYAN>@@ -1,3 +1,3 @@<RESET>
+ <BOLD;MAGENTA>-a long line to exceed per-line minimum<RESET>
+ <BOLD;MAGENTA>-another long line to exceed per-line minimum<RESET>
+ <RED>-original file<RESET>
+ <BOLD;YELLOW>+<RESET>Q<BOLD;YELLOW>a long line to exceed per-line minimum<RESET>
+ <BOLD;YELLOW>+<RESET>Q<BOLD;YELLOW>another long line to exceed per-line minimum<RESET>
+ <GREEN>+<RESET><GREEN>new file<RESET>
+ EOF
+ test_cmp expected actual
+'
+
+test_expect_success 'compare whitespace delta across moved blocks' '
+
+ git reset --hard &&
+ q_to_tab <<-\EOF >text.txt &&
+ QIndented
+ QText across
+ Qsome lines
+ QBut! <- this stands out
+ QAdjusting with
+ QQdifferent starting
+ Qwhite spaces
+ QAnother outlier
+ QQQIndented
+ QQQText across
+ QQQfive lines
+ QQQthat has similar lines
+ QQQto previous blocks, but with different indent
+ QQQYetQAnotherQoutlierQ
+ EOF
+
+ git add text.txt &&
+ git commit -m "add text.txt" &&
+
+ q_to_tab <<-\EOF >text.txt &&
+ QQIndented
+ QQText across
+ QQsome lines
+ QQQBut! <- this stands out
+ Adjusting with
+ Qdifferent starting
+ white spaces
+ AnotherQoutlier
+ QQIndented
+ QQText across
+ QQfive lines
+ QQthat has similar lines
+ QQto previous blocks, but with different indent
+ QQYetQAnotherQoutlier
+ EOF
+
+ git diff --color --color-moved --color-moved-ws=allow-indentation-change >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
+
+ q_to_tab <<-\EOF >expected &&
+ <BOLD>diff --git a/text.txt b/text.txt<RESET>
+ <BOLD>--- a/text.txt<RESET>
+ <BOLD>+++ b/text.txt<RESET>
+ <CYAN>@@ -1,14 +1,14 @@<RESET>
+ <BOLD;MAGENTA>-QIndented<RESET>
+ <BOLD;MAGENTA>-QText across<RESET>
+ <BOLD;MAGENTA>-Qsome lines<RESET>
+ <RED>-QBut! <- this stands out<RESET>
+ <BOLD;MAGENTA>-QAdjusting with<RESET>
+ <BOLD;MAGENTA>-QQdifferent starting<RESET>
+ <BOLD;MAGENTA>-Qwhite spaces<RESET>
+ <RED>-QAnother outlier<RESET>
+ <BOLD;MAGENTA>-QQQIndented<RESET>
+ <BOLD;MAGENTA>-QQQText across<RESET>
+ <BOLD;MAGENTA>-QQQfive lines<RESET>
+ <BOLD;MAGENTA>-QQQthat has similar lines<RESET>
+ <BOLD;MAGENTA>-QQQto previous blocks, but with different indent<RESET>
+ <RED>-QQQYetQAnotherQoutlierQ<RESET>
+ <BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>Indented<RESET>
+ <BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>Text across<RESET>
+ <BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>some lines<RESET>
+ <GREEN>+<RESET>QQQ<GREEN>But! <- this stands out<RESET>
+ <BOLD;CYAN>+<RESET><BOLD;CYAN>Adjusting with<RESET>
+ <BOLD;CYAN>+<RESET>Q<BOLD;CYAN>different starting<RESET>
+ <BOLD;CYAN>+<RESET><BOLD;CYAN>white spaces<RESET>
+ <GREEN>+<RESET><GREEN>AnotherQoutlier<RESET>
+ <BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>Indented<RESET>
+ <BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>Text across<RESET>
+ <BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>five lines<RESET>
+ <BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>that has similar lines<RESET>
+ <BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>to previous blocks, but with different indent<RESET>
+ <GREEN>+<RESET>QQ<GREEN>YetQAnotherQoutlier<RESET>
+ EOF
+
+ test_cmp expected actual
+'
+
+test_expect_success 'compare whitespace delta incompatible with other space options' '
+ test_must_fail git diff \
+ --color-moved-ws=allow-indentation-change,ignore-all-space \
+ 2>err &&
+ test_i18ngrep allow-indentation-change err
'
test_done
diff --git a/t/t4024-diff-optimize-common.sh b/t/t4024-diff-optimize-common.sh
index 7e76018..6b44ce1 100755
--- a/t/t4024-diff-optimize-common.sh
+++ b/t/t4024-diff-optimize-common.sh
@@ -127,17 +127,17 @@ test_expect_success setup '
for n in $sample
do
- ( zs $n ; echo a ) >file-a$n &&
- ( echo b; zs $n; echo ) >file-b$n &&
- ( printf c; zs $n ) >file-c$n &&
- ( echo d; zs $n ) >file-d$n &&
+ ( zs $n && echo a ) >file-a$n &&
+ ( echo b && zs $n && echo ) >file-b$n &&
+ ( printf c && zs $n ) >file-c$n &&
+ ( echo d && zs $n ) >file-d$n &&
git add file-a$n file-b$n file-c$n file-d$n &&
- ( zs $n ; echo A ) >file-a$n &&
- ( echo B; zs $n; echo ) >file-b$n &&
- ( printf C; zs $n ) >file-c$n &&
- ( echo D; zs $n ) >file-d$n &&
+ ( zs $n && echo A ) >file-a$n &&
+ ( echo B && zs $n && echo ) >file-b$n &&
+ ( printf C && zs $n ) >file-c$n &&
+ ( echo D && zs $n ) >file-d$n &&
expect_pattern $n || return 1
diff --git a/t/t4025-hunk-header.sh b/t/t4025-hunk-header.sh
index 7a3dbc1..fa44e78 100755
--- a/t/t4025-hunk-header.sh
+++ b/t/t4025-hunk-header.sh
@@ -12,12 +12,12 @@ NS="$N$N$N$N$N$N$N$N$N$N$N$N$N"
test_expect_success setup '
(
- echo "A $NS"
+ echo "A $NS" &&
for c in B C D E F G H I J K
do
echo " $c"
- done
- echo "L $NS"
+ done &&
+ echo "L $NS" &&
for c in M N O P Q R S T U V
do
echo " $c"
@@ -34,7 +34,7 @@ test_expect_success 'hunk header truncation with an overly long line' '
git diff | sed -n -e "s/^.*@@//p" >actual &&
(
- echo " A $N$N$N$N$N$N$N$N$N2"
+ echo " A $N$N$N$N$N$N$N$N$N2" &&
echo " L $N$N$N$N$N$N$N$N$N1"
) >expected &&
test_cmp actual expected
diff --git a/t/t4041-diff-submodule-option.sh b/t/t4041-diff-submodule-option.sh
index 058ee08..4e3499e 100755
--- a/t/t4041-diff-submodule-option.sh
+++ b/t/t4041-diff-submodule-option.sh
@@ -498,7 +498,7 @@ test_expect_success 'given commit --submodule=short' '
test_expect_success 'setup .git file for sm2' '
(cd sm2 &&
REAL="$(pwd)/../.real" &&
- mv .git "$REAL"
+ mv .git "$REAL" &&
echo "gitdir: $REAL" >.git)
'
@@ -527,7 +527,7 @@ test_expect_success 'diff --submodule with objects referenced by alternates' '
git commit -m "sub a"
) &&
(cd sub_alt &&
- sha1_before=$(git rev-parse --short HEAD)
+ sha1_before=$(git rev-parse --short HEAD) &&
echo b >b &&
git add b &&
git commit -m b &&
diff --git a/t/t4060-diff-submodule-option-diff-format.sh b/t/t4060-diff-submodule-option-diff-format.sh
index 4b168d0..0eba462 100755
--- a/t/t4060-diff-submodule-option-diff-format.sh
+++ b/t/t4060-diff-submodule-option-diff-format.sh
@@ -721,7 +721,7 @@ test_expect_success 'given commit' '
test_expect_success 'setup .git file for sm2' '
(cd sm2 &&
REAL="$(pwd)/../.real" &&
- mv .git "$REAL"
+ mv .git "$REAL" &&
echo "gitdir: $REAL" >.git)
'
diff --git a/t/t4121-apply-diffs.sh b/t/t4121-apply-diffs.sh
index aff551a..66368ef 100755
--- a/t/t4121-apply-diffs.sh
+++ b/t/t4121-apply-diffs.sh
@@ -27,6 +27,6 @@ test_expect_success 'setup' \
test_expect_success \
'check if contextually independent diffs for the same file apply' \
- '( git diff test~2 test~1; git diff test~1 test~0 )| git apply'
+ '( git diff test~2 test~1 && git diff test~1 test~0 )| git apply'
test_done
diff --git a/t/t4150-am.sh b/t/t4150-am.sh
index 1ebc587..01867a9 100755
--- a/t/t4150-am.sh
+++ b/t/t4150-am.sh
@@ -69,13 +69,15 @@ test_expect_success 'setup: messages' '
EOF
- cat >scissors-msg <<-\EOF &&
- Test git-am with scissors line
+ cat >msg-without-scissors-line <<-\EOF &&
+ Test that git-am --scissors cuts at the scissors line
This line should be included in the commit message.
EOF
- cat - scissors-msg >no-scissors-msg <<-\EOF &&
+ printf "Subject: " >subject-prefix &&
+
+ cat - subject-prefix msg-without-scissors-line >msg-with-scissors-line <<-\EOF &&
This line should not be included in the commit message with --scissors enabled.
- - >8 - - remove everything above this line - - >8 - -
@@ -148,18 +150,17 @@ test_expect_success setup '
} >patch1-hg.eml &&
- echo scissors-file >scissors-file &&
- git add scissors-file &&
- git commit -F scissors-msg &&
- git tag scissors &&
- git format-patch --stdout scissors^ >scissors-patch.eml &&
+ echo file >file &&
+ git add file &&
+ git commit -F msg-without-scissors-line &&
+ git tag expected-for-scissors &&
git reset --hard HEAD^ &&
- echo no-scissors-file >no-scissors-file &&
- git add no-scissors-file &&
- git commit -F no-scissors-msg &&
- git tag no-scissors &&
- git format-patch --stdout no-scissors^ >no-scissors-patch.eml &&
+ echo file >file &&
+ git add file &&
+ git commit -F msg-with-scissors-line &&
+ git tag expected-for-no-scissors &&
+ git format-patch --stdout expected-for-no-scissors^ >patch-with-scissors-line.eml &&
git reset --hard HEAD^ &&
sed -n -e "3,\$p" msg >file &&
@@ -416,10 +417,10 @@ test_expect_success 'am --scissors cuts the message at the scissors line' '
rm -fr .git/rebase-apply &&
git reset --hard &&
git checkout second &&
- git am --scissors scissors-patch.eml &&
+ git am --scissors patch-with-scissors-line.eml &&
test_path_is_missing .git/rebase-apply &&
- git diff --exit-code scissors &&
- test_cmp_rev scissors HEAD
+ git diff --exit-code expected-for-scissors &&
+ test_cmp_rev expected-for-scissors HEAD
'
test_expect_success 'am --no-scissors overrides mailinfo.scissors' '
@@ -427,10 +428,10 @@ test_expect_success 'am --no-scissors overrides mailinfo.scissors' '
git reset --hard &&
git checkout second &&
test_config mailinfo.scissors true &&
- git am --no-scissors no-scissors-patch.eml &&
+ git am --no-scissors patch-with-scissors-line.eml &&
test_path_is_missing .git/rebase-apply &&
- git diff --exit-code no-scissors &&
- test_cmp_rev no-scissors HEAD
+ git diff --exit-code expected-for-no-scissors &&
+ test_cmp_rev expected-for-no-scissors HEAD
'
test_expect_success 'setup: new author and committer' '
diff --git a/t/t4202-log.sh b/t/t4202-log.sh
index 0b1cd34..153a506 100755
--- a/t/t4202-log.sh
+++ b/t/t4202-log.sh
@@ -1555,12 +1555,28 @@ test_expect_success GPG 'setup signed branch' '
git commit -S -m signed_commit
'
+test_expect_success GPGSM 'setup signed branch x509' '
+ test_when_finished "git reset --hard && git checkout master" &&
+ git checkout -b signed-x509 master &&
+ echo foo >foo &&
+ git add foo &&
+ test_config gpg.format x509 &&
+ test_config user.signingkey $GIT_COMMITTER_EMAIL &&
+ git commit -S -m signed_commit
+'
+
test_expect_success GPG 'log --graph --show-signature' '
git log --graph --show-signature -n1 signed >actual &&
grep "^| gpg: Signature made" actual &&
grep "^| gpg: Good signature" actual
'
+test_expect_success GPGSM 'log --graph --show-signature x509' '
+ git log --graph --show-signature -n1 signed-x509 >actual &&
+ grep "^| gpgsm: Signature made" actual &&
+ grep "^| gpgsm: Good signature" actual
+'
+
test_expect_success GPG 'log --graph --show-signature for merged tag' '
test_when_finished "git reset --hard && git checkout master" &&
git checkout -b plain master &&
@@ -1580,6 +1596,27 @@ test_expect_success GPG 'log --graph --show-signature for merged tag' '
grep "^| | gpg: Good signature" actual
'
+test_expect_success GPGSM 'log --graph --show-signature for merged tag x509' '
+ test_when_finished "git reset --hard && git checkout master" &&
+ test_config gpg.format x509 &&
+ test_config user.signingkey $GIT_COMMITTER_EMAIL &&
+ git checkout -b plain-x509 master &&
+ echo aaa >bar &&
+ git add bar &&
+ git commit -m bar_commit &&
+ git checkout -b tagged-x509 master &&
+ echo bbb >baz &&
+ git add baz &&
+ git commit -m baz_commit &&
+ git tag -s -m signed_tag_msg signed_tag_x509 &&
+ git checkout plain-x509 &&
+ git merge --no-ff -m msg signed_tag_x509 &&
+ git log --graph --show-signature -n1 plain-x509 >actual &&
+ grep "^|\\\ merged tag" actual &&
+ grep "^| | gpgsm: Signature made" actual &&
+ grep "^| | gpgsm: Good signature" actual
+'
+
test_expect_success GPG '--no-show-signature overrides --show-signature' '
git log -1 --show-signature --no-show-signature signed >actual &&
! grep "^gpg:" actual
diff --git a/t/t4211-line-log.sh b/t/t4211-line-log.sh
index d0377fa..436b13a 100755
--- a/t/t4211-line-log.sh
+++ b/t/t4211-line-log.sh
@@ -60,7 +60,6 @@ test_bad_opts "-L 1:nonexistent" "There is no path"
test_bad_opts "-L 1:simple" "There is no path"
test_bad_opts "-L '/foo:b.c'" "argument not .start,end:file"
test_bad_opts "-L 1000:b.c" "has only.*lines"
-test_bad_opts "-L 1,1000:b.c" "has only.*lines"
test_bad_opts "-L :b.c" "argument not .start,end:file"
test_bad_opts "-L :foo:b.c" "no match"
@@ -86,12 +85,12 @@ test_expect_success '-L ,Y (Y == nlines)' '
test_expect_success '-L ,Y (Y == nlines + 1)' '
n=$(expr $(wc -l <b.c) + 1) &&
- test_must_fail git log -L ,$n:b.c
+ git log -L ,$n:b.c
'
test_expect_success '-L ,Y (Y == nlines + 2)' '
n=$(expr $(wc -l <b.c) + 2) &&
- test_must_fail git log -L ,$n:b.c
+ git log -L ,$n:b.c
'
test_expect_success '-L with --first-parent and a merge' '
diff --git a/t/t5300-pack-object.sh b/t/t5300-pack-object.sh
index 2336d09..6c620cd 100755
--- a/t/t5300-pack-object.sh
+++ b/t/t5300-pack-object.sh
@@ -191,7 +191,7 @@ test_expect_success 'survive missing objects/pack directory' '
mkdir missing-pack &&
cd missing-pack &&
git init &&
- GOP=.git/objects/pack
+ GOP=.git/objects/pack &&
rm -fr $GOP &&
git index-pack --stdin --keep=test <../test-3-${packname_3}.pack &&
test -f $GOP/pack-${packname_3}.pack &&
diff --git a/t/t5302-pack-index.sh b/t/t5302-pack-index.sh
index bb9b8bb..91d51b3 100755
--- a/t/t5302-pack-index.sh
+++ b/t/t5302-pack-index.sh
@@ -237,7 +237,7 @@ test_expect_success 'running index-pack in the object store' '
rm -f .git/objects/pack/* &&
cp test-1-${pack1}.pack .git/objects/pack/pack-${pack1}.pack &&
(
- cd .git/objects/pack
+ cd .git/objects/pack &&
git index-pack pack-${pack1}.pack
) &&
test -f .git/objects/pack/pack-${pack1}.idx
diff --git a/t/t5318-commit-graph.sh b/t/t5318-commit-graph.sh
index 77d85ae..4f17d77 100755
--- a/t/t5318-commit-graph.sh
+++ b/t/t5318-commit-graph.sh
@@ -11,6 +11,11 @@ test_expect_success 'setup full repo' '
objdir=".git/objects"
'
+test_expect_success 'verify graph with no graph file' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git commit-graph verify
+'
+
test_expect_success 'write graph with no packs' '
cd "$TRASH_DIRECTORY/full" &&
git commit-graph write --object-dir . &&
@@ -28,8 +33,8 @@ test_expect_success 'create commits and repack' '
'
graph_git_two_modes() {
- git -c core.graph=true $1 >output
- git -c core.graph=false $1 >expect
+ git -c core.commitGraph=true $1 >output
+ git -c core.commitGraph=false $1 >expect
test_cmp output expect
}
@@ -200,6 +205,16 @@ test_expect_success 'build graph from commits with append' '
graph_git_behavior 'append graph, commit 8 vs merge 1' full commits/8 merge/1
graph_git_behavior 'append graph, commit 8 vs merge 2' full commits/8 merge/2
+test_expect_success 'build graph using --reachable' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git commit-graph write --reachable &&
+ test_path_is_file $objdir/info/commit-graph &&
+ graph_read_expect "11" "large_edges"
+'
+
+graph_git_behavior 'append graph, commit 8 vs merge 1' full commits/8 merge/1
+graph_git_behavior 'append graph, commit 8 vs merge 2' full commits/8 merge/2
+
test_expect_success 'setup bare repo' '
cd "$TRASH_DIRECTORY" &&
git clone --bare --no-local full bare &&
@@ -230,4 +245,225 @@ test_expect_success 'perform fast-forward merge in full repo' '
test_cmp expect output
'
+test_expect_success 'check that gc computes commit-graph' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git commit --allow-empty -m "blank" &&
+ git commit-graph write --reachable &&
+ cp $objdir/info/commit-graph commit-graph-before-gc &&
+ git reset --hard HEAD~1 &&
+ git config gc.writeCommitGraph true &&
+ git gc &&
+ cp $objdir/info/commit-graph commit-graph-after-gc &&
+ ! test_cmp commit-graph-before-gc commit-graph-after-gc &&
+ git commit-graph write --reachable &&
+ test_cmp commit-graph-after-gc $objdir/info/commit-graph
+'
+
+# the verify tests below expect the commit-graph to contain
+# exactly the commits reachable from the commits/8 branch.
+# If the file changes the set of commits in the list, then the
+# offsets into the binary file will result in different edits
+# and the tests will likely break.
+
+test_expect_success 'git commit-graph verify' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git rev-parse commits/8 | git commit-graph write --stdin-commits &&
+ git commit-graph verify >output
+'
+
+NUM_COMMITS=9
+NUM_OCTOPUS_EDGES=2
+HASH_LEN=20
+GRAPH_BYTE_VERSION=4
+GRAPH_BYTE_HASH=5
+GRAPH_BYTE_CHUNK_COUNT=6
+GRAPH_CHUNK_LOOKUP_OFFSET=8
+GRAPH_CHUNK_LOOKUP_WIDTH=12
+GRAPH_CHUNK_LOOKUP_ROWS=5
+GRAPH_BYTE_OID_FANOUT_ID=$GRAPH_CHUNK_LOOKUP_OFFSET
+GRAPH_BYTE_OID_LOOKUP_ID=$(($GRAPH_CHUNK_LOOKUP_OFFSET + \
+ 1 * $GRAPH_CHUNK_LOOKUP_WIDTH))
+GRAPH_BYTE_COMMIT_DATA_ID=$(($GRAPH_CHUNK_LOOKUP_OFFSET + \
+ 2 * $GRAPH_CHUNK_LOOKUP_WIDTH))
+GRAPH_FANOUT_OFFSET=$(($GRAPH_CHUNK_LOOKUP_OFFSET + \
+ $GRAPH_CHUNK_LOOKUP_WIDTH * $GRAPH_CHUNK_LOOKUP_ROWS))
+GRAPH_BYTE_FANOUT1=$(($GRAPH_FANOUT_OFFSET + 4 * 4))
+GRAPH_BYTE_FANOUT2=$(($GRAPH_FANOUT_OFFSET + 4 * 255))
+GRAPH_OID_LOOKUP_OFFSET=$(($GRAPH_FANOUT_OFFSET + 4 * 256))
+GRAPH_BYTE_OID_LOOKUP_ORDER=$(($GRAPH_OID_LOOKUP_OFFSET + $HASH_LEN * 8))
+GRAPH_BYTE_OID_LOOKUP_MISSING=$(($GRAPH_OID_LOOKUP_OFFSET + $HASH_LEN * 4 + 10))
+GRAPH_COMMIT_DATA_OFFSET=$(($GRAPH_OID_LOOKUP_OFFSET + $HASH_LEN * $NUM_COMMITS))
+GRAPH_BYTE_COMMIT_TREE=$GRAPH_COMMIT_DATA_OFFSET
+GRAPH_BYTE_COMMIT_PARENT=$(($GRAPH_COMMIT_DATA_OFFSET + $HASH_LEN))
+GRAPH_BYTE_COMMIT_EXTRA_PARENT=$(($GRAPH_COMMIT_DATA_OFFSET + $HASH_LEN + 4))
+GRAPH_BYTE_COMMIT_WRONG_PARENT=$(($GRAPH_COMMIT_DATA_OFFSET + $HASH_LEN + 3))
+GRAPH_BYTE_COMMIT_GENERATION=$(($GRAPH_COMMIT_DATA_OFFSET + $HASH_LEN + 11))
+GRAPH_BYTE_COMMIT_DATE=$(($GRAPH_COMMIT_DATA_OFFSET + $HASH_LEN + 12))
+GRAPH_COMMIT_DATA_WIDTH=$(($HASH_LEN + 16))
+GRAPH_OCTOPUS_DATA_OFFSET=$(($GRAPH_COMMIT_DATA_OFFSET + \
+ $GRAPH_COMMIT_DATA_WIDTH * $NUM_COMMITS))
+GRAPH_BYTE_OCTOPUS=$(($GRAPH_OCTOPUS_DATA_OFFSET + 4))
+GRAPH_BYTE_FOOTER=$(($GRAPH_OCTOPUS_DATA_OFFSET + 4 * $NUM_OCTOPUS_EDGES))
+
+# usage: corrupt_graph_and_verify <position> <data> <string>
+# Manipulates the commit-graph file at the position
+# by inserting the data, then runs 'git commit-graph verify'
+# and places the output in the file 'err'. Test 'err' for
+# the given string.
+corrupt_graph_and_verify() {
+ pos=$1
+ data="${2:-\0}"
+ grepstr=$3
+ cd "$TRASH_DIRECTORY/full" &&
+ test_when_finished mv commit-graph-backup $objdir/info/commit-graph &&
+ cp $objdir/info/commit-graph commit-graph-backup &&
+ printf "$data" | dd of="$objdir/info/commit-graph" bs=1 seek="$pos" conv=notrunc &&
+ test_must_fail git commit-graph verify 2>test_err &&
+ grep -v "^+" test_err >err
+ test_i18ngrep "$grepstr" err
+}
+
+test_expect_success 'detect bad signature' '
+ corrupt_graph_and_verify 0 "\0" \
+ "graph signature"
+'
+
+test_expect_success 'detect bad version' '
+ corrupt_graph_and_verify $GRAPH_BYTE_VERSION "\02" \
+ "graph version"
+'
+
+test_expect_success 'detect bad hash version' '
+ corrupt_graph_and_verify $GRAPH_BYTE_HASH "\02" \
+ "hash version"
+'
+
+test_expect_success 'detect low chunk count' '
+ corrupt_graph_and_verify $GRAPH_BYTE_CHUNK_COUNT "\02" \
+ "missing the .* chunk"
+'
+
+test_expect_success 'detect missing OID fanout chunk' '
+ corrupt_graph_and_verify $GRAPH_BYTE_OID_FANOUT_ID "\0" \
+ "missing the OID Fanout chunk"
+'
+
+test_expect_success 'detect missing OID lookup chunk' '
+ corrupt_graph_and_verify $GRAPH_BYTE_OID_LOOKUP_ID "\0" \
+ "missing the OID Lookup chunk"
+'
+
+test_expect_success 'detect missing commit data chunk' '
+ corrupt_graph_and_verify $GRAPH_BYTE_COMMIT_DATA_ID "\0" \
+ "missing the Commit Data chunk"
+'
+
+test_expect_success 'detect incorrect fanout' '
+ corrupt_graph_and_verify $GRAPH_BYTE_FANOUT1 "\01" \
+ "fanout value"
+'
+
+test_expect_success 'detect incorrect fanout final value' '
+ corrupt_graph_and_verify $GRAPH_BYTE_FANOUT2 "\01" \
+ "fanout value"
+'
+
+test_expect_success 'detect incorrect OID order' '
+ corrupt_graph_and_verify $GRAPH_BYTE_OID_LOOKUP_ORDER "\01" \
+ "incorrect OID order"
+'
+
+test_expect_success 'detect OID not in object database' '
+ corrupt_graph_and_verify $GRAPH_BYTE_OID_LOOKUP_MISSING "\01" \
+ "from object database"
+'
+
+test_expect_success 'detect incorrect tree OID' '
+ corrupt_graph_and_verify $GRAPH_BYTE_COMMIT_TREE "\01" \
+ "root tree OID for commit"
+'
+
+test_expect_success 'detect incorrect parent int-id' '
+ corrupt_graph_and_verify $GRAPH_BYTE_COMMIT_PARENT "\01" \
+ "invalid parent"
+'
+
+test_expect_success 'detect extra parent int-id' '
+ corrupt_graph_and_verify $GRAPH_BYTE_COMMIT_EXTRA_PARENT "\00" \
+ "is too long"
+'
+
+test_expect_success 'detect wrong parent' '
+ corrupt_graph_and_verify $GRAPH_BYTE_COMMIT_WRONG_PARENT "\01" \
+ "commit-graph parent for"
+'
+
+test_expect_success 'detect incorrect generation number' '
+ corrupt_graph_and_verify $GRAPH_BYTE_COMMIT_GENERATION "\070" \
+ "generation for commit"
+'
+
+test_expect_success 'detect incorrect generation number' '
+ corrupt_graph_and_verify $GRAPH_BYTE_COMMIT_GENERATION "\01" \
+ "non-zero generation number"
+'
+
+test_expect_success 'detect incorrect commit date' '
+ corrupt_graph_and_verify $GRAPH_BYTE_COMMIT_DATE "\01" \
+ "commit date"
+'
+
+test_expect_success 'detect incorrect parent for octopus merge' '
+ corrupt_graph_and_verify $GRAPH_BYTE_OCTOPUS "\01" \
+ "invalid parent"
+'
+
+test_expect_success 'detect invalid checksum hash' '
+ corrupt_graph_and_verify $GRAPH_BYTE_FOOTER "\00" \
+ "incorrect checksum"
+'
+
+test_expect_success 'git fsck (checks commit-graph)' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git fsck &&
+ corrupt_graph_and_verify $GRAPH_BYTE_FOOTER "\00" \
+ "incorrect checksum" &&
+ test_must_fail git fsck
+'
+
+test_expect_success 'setup non-the_repository tests' '
+ rm -rf repo &&
+ git init repo &&
+ test_commit -C repo one &&
+ test_commit -C repo two &&
+ git -C repo config core.commitGraph true &&
+ git -C repo rev-parse two | \
+ git -C repo commit-graph write --stdin-commits
+'
+
+test_expect_success 'parse_commit_in_graph works for non-the_repository' '
+ test-tool repository parse_commit_in_graph \
+ repo/.git repo "$(git -C repo rev-parse two)" >actual &&
+ echo $(git -C repo log --pretty="%ct" -1) \
+ $(git -C repo rev-parse one) >expect &&
+ test_cmp expect actual &&
+
+ test-tool repository parse_commit_in_graph \
+ repo/.git repo "$(git -C repo rev-parse one)" >actual &&
+ echo $(git -C repo log --pretty="%ct" -1 one) >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'get_commit_tree_in_graph works for non-the_repository' '
+ test-tool repository get_commit_tree_in_graph \
+ repo/.git repo "$(git -C repo rev-parse two)" >actual &&
+ echo $(git -C repo rev-parse two^{tree}) >expect &&
+ test_cmp expect actual &&
+
+ test-tool repository get_commit_tree_in_graph \
+ repo/.git repo "$(git -C repo rev-parse one)" >actual &&
+ echo $(git -C repo rev-parse one^{tree}) >expect &&
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t5400-send-pack.sh b/t/t5400-send-pack.sh
index 911eae1..f1932ea 100755
--- a/t/t5400-send-pack.sh
+++ b/t/t5400-send-pack.sh
@@ -86,7 +86,7 @@ test_expect_success 'push can be used to delete a ref' '
test_expect_success 'refuse deleting push with denyDeletes' '
(
cd victim &&
- ( git branch -D extra || : ) &&
+ test_might_fail git branch -D extra &&
git config receive.denyDeletes true &&
git branch extra master
) &&
@@ -119,7 +119,7 @@ test_expect_success 'override denyDeletes with git -c receive-pack' '
test_expect_success 'denyNonFastforwards trumps --force' '
(
cd victim &&
- ( git branch -D extra || : ) &&
+ test_might_fail git branch -D extra &&
git config receive.denyNonFastforwards true
) &&
victim_orig=$(cd victim && git rev-parse --verify master) &&
diff --git a/t/t5401-update-hooks.sh b/t/t5401-update-hooks.sh
index 7f278d8c..b5f886a 100755
--- a/t/t5401-update-hooks.sh
+++ b/t/t5401-update-hooks.sh
@@ -82,13 +82,13 @@ test_expect_success 'hooks ran' '
'
test_expect_success 'pre-receive hook input' '
- (echo $commit0 $commit1 refs/heads/master;
+ (echo $commit0 $commit1 refs/heads/master &&
echo $commit1 $commit0 refs/heads/tofail
) | test_cmp - victim.git/pre-receive.stdin
'
test_expect_success 'update hook arguments' '
- (echo refs/heads/master $commit0 $commit1;
+ (echo refs/heads/master $commit0 $commit1 &&
echo refs/heads/tofail $commit1 $commit0
) | test_cmp - victim.git/update.args
'
diff --git a/t/t5405-send-pack-rewind.sh b/t/t5405-send-pack-rewind.sh
index 4bda18a..235fb76 100755
--- a/t/t5405-send-pack-rewind.sh
+++ b/t/t5405-send-pack-rewind.sh
@@ -25,8 +25,7 @@ test_expect_success 'non forced push should die not segfault' '
(
cd another &&
- git push .. master:master
- test $? = 1
+ test_must_fail git push .. master:master
)
'
diff --git a/t/t5406-remote-rejects.sh b/t/t5406-remote-rejects.sh
index 59e80a5..ff06f99 100755
--- a/t/t5406-remote-rejects.sh
+++ b/t/t5406-remote-rejects.sh
@@ -6,8 +6,9 @@ test_description='remote push rejects are reported by client'
test_expect_success 'setup' '
mkdir .git/hooks &&
- (echo "#!/bin/sh" ; echo "exit 1") >.git/hooks/update &&
- chmod +x .git/hooks/update &&
+ write_script .git/hooks/update <<-\EOF &&
+ exit 1
+ EOF
echo 1 >file &&
git add file &&
git commit -m 1 &&
diff --git a/t/t5500-fetch-pack.sh b/t/t5500-fetch-pack.sh
index 3d33ab3..8f94523 100755
--- a/t/t5500-fetch-pack.sh
+++ b/t/t5500-fetch-pack.sh
@@ -259,7 +259,7 @@ test_expect_success 'clone shallow object count' '
test_expect_success 'pull in shallow repo with missing merge base' '
(
cd shallow &&
- git fetch --depth 4 .. A
+ git fetch --depth 4 .. A &&
test_must_fail git merge --allow-unrelated-histories FETCH_HEAD
)
'
@@ -403,7 +403,7 @@ test_expect_success 'fetch creating new shallow root' '
git fetch --depth=1 --progress 2>actual &&
# This should fetch only the empty commit, no tree or
# blob objects
- grep "remote: Total 1" actual
+ test_i18ngrep "remote: Total 1" actual
)
'
@@ -814,6 +814,39 @@ test_expect_success 'fetching deepen' '
)
'
+test_expect_success 'use ref advertisement to prune "have" lines sent' '
+ rm -rf server client &&
+ git init server &&
+ test_commit -C server both_have_1 &&
+ git -C server tag -d both_have_1 &&
+ test_commit -C server both_have_2 &&
+
+ git clone server client &&
+ test_commit -C server server_has &&
+ test_commit -C client client_has &&
+
+ # In both protocol v0 and v2, ensure that the parent of both_have_2 is
+ # not sent as a "have" line. The client should know that the server has
+ # both_have_2, so it only needs to inform the server that it has
+ # both_have_2, and the server can infer the rest.
+
+ rm -f trace &&
+ cp -r client clientv0 &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C clientv0 \
+ fetch origin server_has both_have_2 &&
+ grep "have $(git -C client rev-parse client_has)" trace &&
+ grep "have $(git -C client rev-parse both_have_2)" trace &&
+ ! grep "have $(git -C client rev-parse both_have_2^)" trace &&
+
+ rm -f trace &&
+ cp -r client clientv2 &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C clientv2 -c protocol.version=2 \
+ fetch origin server_has both_have_2 &&
+ grep "have $(git -C client rev-parse client_has)" trace &&
+ grep "have $(git -C client rev-parse both_have_2)" trace &&
+ ! grep "have $(git -C client rev-parse both_have_2^)" trace
+'
+
test_expect_success 'filtering by size' '
rm -rf server client &&
test_create_repo server &&
diff --git a/t/t5504-fetch-receive-strict.sh b/t/t5504-fetch-receive-strict.sh
index 49d3621..62f3569 100755
--- a/t/t5504-fetch-receive-strict.sh
+++ b/t/t5504-fetch-receive-strict.sh
@@ -3,13 +3,16 @@
test_description='fetch/receive strict mode'
. ./test-lib.sh
-test_expect_success setup '
+test_expect_success 'setup and inject "corrupt or missing" object' '
echo hello >greetings &&
git add greetings &&
git commit -m greetings &&
S=$(git rev-parse :greetings | sed -e "s|^..|&/|") &&
X=$(echo bye | git hash-object -w --stdin | sed -e "s|^..|&/|") &&
+ echo $S >S &&
+ echo $X >X &&
+ cp .git/objects/$S .git/objects/$S.back &&
mv -f .git/objects/$X .git/objects/$S &&
test_must_fail git fsck
@@ -115,6 +118,13 @@ test_expect_success 'push with transfer.fsckobjects' '
test_cmp exp act
'
+test_expect_success 'repair the "corrupt or missing" object' '
+ mv -f .git/objects/$(cat S) .git/objects/$(cat X) &&
+ mv .git/objects/$(cat S).back .git/objects/$(cat S) &&
+ rm -rf .git/objects/$(cat X) &&
+ git fsck
+'
+
cat >bogus-commit <<EOF
tree $EMPTY_TREE
author Bugs Bunny 1234567890 +0000
@@ -123,6 +133,14 @@ committer Bugs Bunny <bugs@bun.ni> 1234567890 +0000
This commit object intentionally broken
EOF
+test_expect_success 'fsck with invalid or bogus skipList input' '
+ git -c fsck.skipList=/dev/null -c fsck.missingEmail=ignore fsck &&
+ test_must_fail git -c fsck.skipList=does-not-exist -c fsck.missingEmail=ignore fsck 2>err &&
+ test_i18ngrep "Could not open skip list: does-not-exist" err &&
+ test_must_fail git -c fsck.skipList=.git/config -c fsck.missingEmail=ignore fsck 2>err &&
+ test_i18ngrep "Invalid SHA-1: \[core\]" err
+'
+
test_expect_success 'push with receive.fsck.skipList' '
commit="$(git hash-object -t commit -w --stdin <bogus-commit)" &&
git push . $commit:refs/heads/bogus &&
@@ -130,11 +148,61 @@ test_expect_success 'push with receive.fsck.skipList' '
git init dst &&
git --git-dir=dst/.git config receive.fsckObjects true &&
test_must_fail git push --porcelain dst bogus &&
- git --git-dir=dst/.git config receive.fsck.skipList SKIP &&
echo $commit >dst/.git/SKIP &&
+
+ # receive.fsck.* does not fall back on fsck.*
+ git --git-dir=dst/.git config fsck.skipList SKIP &&
+ test_must_fail git push --porcelain dst bogus &&
+
+ # Invalid and/or bogus skipList input
+ git --git-dir=dst/.git config receive.fsck.skipList /dev/null &&
+ test_must_fail git push --porcelain dst bogus &&
+ git --git-dir=dst/.git config receive.fsck.skipList does-not-exist &&
+ test_must_fail git push --porcelain dst bogus 2>err &&
+ test_i18ngrep "Could not open skip list: does-not-exist" err &&
+ git --git-dir=dst/.git config receive.fsck.skipList config &&
+ test_must_fail git push --porcelain dst bogus 2>err &&
+ test_i18ngrep "Invalid SHA-1: \[core\]" err &&
+
+ git --git-dir=dst/.git config receive.fsck.skipList SKIP &&
git push --porcelain dst bogus
'
+test_expect_success 'fetch with fetch.fsck.skipList' '
+ commit="$(git hash-object -t commit -w --stdin <bogus-commit)" &&
+ refspec=refs/heads/bogus:refs/heads/bogus &&
+ git push . $commit:refs/heads/bogus &&
+ rm -rf dst &&
+ git init dst &&
+ git --git-dir=dst/.git config fetch.fsckObjects true &&
+ test_must_fail git --git-dir=dst/.git fetch "file://$(pwd)" $refspec &&
+ git --git-dir=dst/.git config fetch.fsck.skipList /dev/null &&
+ test_must_fail git --git-dir=dst/.git fetch "file://$(pwd)" $refspec &&
+ echo $commit >dst/.git/SKIP &&
+
+ # fetch.fsck.* does not fall back on fsck.*
+ git --git-dir=dst/.git config fsck.skipList dst/.git/SKIP &&
+ test_must_fail git --git-dir=dst/.git fetch "file://$(pwd)" $refspec &&
+
+ # Invalid and/or bogus skipList input
+ git --git-dir=dst/.git config fetch.fsck.skipList /dev/null &&
+ test_must_fail git --git-dir=dst/.git fetch "file://$(pwd)" $refspec &&
+ git --git-dir=dst/.git config fetch.fsck.skipList does-not-exist &&
+ test_must_fail git --git-dir=dst/.git fetch "file://$(pwd)" $refspec 2>err &&
+ test_i18ngrep "Could not open skip list: does-not-exist" err &&
+ git --git-dir=dst/.git config fetch.fsck.skipList dst/.git/config &&
+ test_must_fail git --git-dir=dst/.git fetch "file://$(pwd)" $refspec 2>err &&
+ test_i18ngrep "Invalid SHA-1: \[core\]" err &&
+
+ git --git-dir=dst/.git config fetch.fsck.skipList dst/.git/SKIP &&
+ git --git-dir=dst/.git fetch "file://$(pwd)" $refspec
+'
+
+test_expect_success 'fsck.<unknownmsg-id> dies' '
+ test_must_fail git -c fsck.whatEver=ignore fsck 2>err &&
+ test_i18ngrep "Unhandled message id: whatever" err
+'
+
test_expect_success 'push with receive.fsck.missingEmail=warn' '
commit="$(git hash-object -t commit -w --stdin <bogus-commit)" &&
git push . $commit:refs/heads/bogus &&
@@ -142,19 +210,58 @@ test_expect_success 'push with receive.fsck.missingEmail=warn' '
git init dst &&
git --git-dir=dst/.git config receive.fsckobjects true &&
test_must_fail git push --porcelain dst bogus &&
+
+ # receive.fsck.<msg-id> does not fall back on fsck.<msg-id>
+ git --git-dir=dst/.git config fsck.missingEmail warn &&
+ test_must_fail git push --porcelain dst bogus &&
+
+ # receive.fsck.<unknownmsg-id> warns
+ git --git-dir=dst/.git config \
+ receive.fsck.whatEver error &&
+
git --git-dir=dst/.git config \
receive.fsck.missingEmail warn &&
git push --porcelain dst bogus >act 2>&1 &&
grep "missingEmail" act &&
+ test_i18ngrep "Skipping unknown msg id.*whatever" act &&
git --git-dir=dst/.git branch -D bogus &&
git --git-dir=dst/.git config --add \
receive.fsck.missingEmail ignore &&
- git --git-dir=dst/.git config --add \
- receive.fsck.badDate warn &&
git push --porcelain dst bogus >act 2>&1 &&
! grep "missingEmail" act
'
+test_expect_success 'fetch with fetch.fsck.missingEmail=warn' '
+ commit="$(git hash-object -t commit -w --stdin <bogus-commit)" &&
+ refspec=refs/heads/bogus:refs/heads/bogus &&
+ git push . $commit:refs/heads/bogus &&
+ rm -rf dst &&
+ git init dst &&
+ git --git-dir=dst/.git config fetch.fsckobjects true &&
+ test_must_fail git --git-dir=dst/.git fetch "file://$(pwd)" $refspec &&
+
+ # fetch.fsck.<msg-id> does not fall back on fsck.<msg-id>
+ git --git-dir=dst/.git config fsck.missingEmail warn &&
+ test_must_fail git --git-dir=dst/.git fetch "file://$(pwd)" $refspec &&
+
+ # receive.fsck.<unknownmsg-id> warns
+ git --git-dir=dst/.git config \
+ fetch.fsck.whatEver error &&
+
+ git --git-dir=dst/.git config \
+ fetch.fsck.missingEmail warn &&
+ git --git-dir=dst/.git fetch "file://$(pwd)" $refspec >act 2>&1 &&
+ grep "missingEmail" act &&
+ test_i18ngrep "Skipping unknown msg id.*whatever" act &&
+ rm -rf dst &&
+ git init dst &&
+ git --git-dir=dst/.git config fetch.fsckobjects true &&
+ git --git-dir=dst/.git config \
+ fetch.fsck.missingEmail ignore &&
+ git --git-dir=dst/.git fetch "file://$(pwd)" $refspec >act 2>&1 &&
+ ! grep "missingEmail" act
+'
+
test_expect_success \
'receive.fsck.unterminatedHeader=warn triggers error' '
rm -rf dst &&
@@ -166,4 +273,15 @@ test_expect_success \
grep "Cannot demote unterminatedheader" act
'
+test_expect_success \
+ 'fetch.fsck.unterminatedHeader=warn triggers error' '
+ rm -rf dst &&
+ git init dst &&
+ git --git-dir=dst/.git config fetch.fsckobjects true &&
+ git --git-dir=dst/.git config \
+ fetch.fsck.unterminatedheader warn &&
+ test_must_fail git --git-dir=dst/.git fetch "file://$(pwd)" HEAD &&
+ grep "Cannot demote unterminatedheader" act
+'
+
test_done
diff --git a/t/t5505-remote.sh b/t/t5505-remote.sh
index 6f8c244..241e6a3 100755
--- a/t/t5505-remote.sh
+++ b/t/t5505-remote.sh
@@ -346,17 +346,13 @@ URL: $(pwd)/one
EOF
test_expect_success 'prune --dry-run' '
- (
- cd one &&
- git branch -m side2 side) &&
+ git -C one branch -m side2 side &&
+ test_when_finished "git -C one branch -m side side2" &&
(
cd test &&
git remote prune --dry-run origin >output &&
git rev-parse refs/remotes/origin/side2 &&
test_must_fail git rev-parse refs/remotes/origin/side &&
- (
- cd ../one &&
- git branch -m side side2) &&
test_i18ncmp expect output
)
'
@@ -846,7 +842,7 @@ test_expect_success 'migrate a remote from named file in $GIT_DIR/branches (2)'
git remote rename origin origin &&
test_path_is_missing .git/branches/origin &&
test "$(git config remote.origin.url)" = "quux" &&
- test "$(git config remote.origin.fetch)" = "refs/heads/foom:refs/heads/origin"
+ test "$(git config remote.origin.fetch)" = "refs/heads/foom:refs/heads/origin" &&
test "$(git config remote.origin.push)" = "HEAD:refs/heads/foom"
)
'
@@ -874,7 +870,7 @@ test_expect_success 'remote prune to cause a dangling symref' '
cd eight &&
test_must_fail git branch nomore origin
) 2>err &&
- grep "dangling symref" err
+ test_i18ngrep "dangling symref" err
'
test_expect_success 'show empty remote' '
diff --git a/t/t5510-fetch.sh b/t/t5510-fetch.sh
index e402aee..5e810f4 100755
--- a/t/t5510-fetch.sh
+++ b/t/t5510-fetch.sh
@@ -535,6 +535,41 @@ test_expect_success "should be able to fetch with duplicate refspecs" '
)
'
+test_expect_success 'LHS of refspec follows ref disambiguation rules' '
+ mkdir lhs-ambiguous &&
+ (
+ cd lhs-ambiguous &&
+ git init server &&
+ test_commit -C server unwanted &&
+ test_commit -C server wanted &&
+
+ git init client &&
+
+ # Check a name coming after "refs" alphabetically ...
+ git -C server update-ref refs/heads/s wanted &&
+ git -C server update-ref refs/heads/refs/heads/s unwanted &&
+ git -C client fetch ../server +refs/heads/s:refs/heads/checkthis &&
+ git -C server rev-parse wanted >expect &&
+ git -C client rev-parse checkthis >actual &&
+ test_cmp expect actual &&
+
+ # ... and one before.
+ git -C server update-ref refs/heads/q wanted &&
+ git -C server update-ref refs/heads/refs/heads/q unwanted &&
+ git -C client fetch ../server +refs/heads/q:refs/heads/checkthis &&
+ git -C server rev-parse wanted >expect &&
+ git -C client rev-parse checkthis >actual &&
+ test_cmp expect actual &&
+
+ # Tags are preferred over branches like refs/{heads,tags}/*
+ git -C server update-ref refs/tags/t wanted &&
+ git -C server update-ref refs/heads/t unwanted &&
+ git -C client fetch ../server +t:refs/heads/checkthis &&
+ git -C server rev-parse wanted >expect &&
+ git -C client rev-parse checkthis >actual
+ )
+'
+
# configured prune tests
set_config_tristate () {
@@ -828,9 +863,11 @@ test_expect_success 'fetching with auto-gc does not lock up' '
test_commit test2 &&
(
cd auto-gc &&
+ git config fetch.unpackLimit 1 &&
git config gc.autoPackLimit 1 &&
git config gc.autoDetach false &&
GIT_ASK_YESNO="$D/askyesno" git fetch >fetch.out 2>&1 &&
+ test_i18ngrep "Auto packing the repository" fetch.out &&
! grep "Should I try again" fetch.out
)
'
@@ -865,4 +902,82 @@ test_expect_success C_LOCALE_OUTPUT 'fetch compact output' '
test_cmp expect actual
'
+setup_negotiation_tip () {
+ SERVER="$1"
+ URL="$2"
+ USE_PROTOCOL_V2="$3"
+
+ rm -rf "$SERVER" client trace &&
+ git init "$SERVER" &&
+ test_commit -C "$SERVER" alpha_1 &&
+ test_commit -C "$SERVER" alpha_2 &&
+ git -C "$SERVER" checkout --orphan beta &&
+ test_commit -C "$SERVER" beta_1 &&
+ test_commit -C "$SERVER" beta_2 &&
+
+ git clone "$URL" client &&
+
+ if test "$USE_PROTOCOL_V2" -eq 1
+ then
+ git -C "$SERVER" config protocol.version 2 &&
+ git -C client config protocol.version 2
+ fi &&
+
+ test_commit -C "$SERVER" beta_s &&
+ git -C "$SERVER" checkout master &&
+ test_commit -C "$SERVER" alpha_s &&
+ git -C "$SERVER" tag -d alpha_1 alpha_2 beta_1 beta_2
+}
+
+check_negotiation_tip () {
+ # Ensure that {alpha,beta}_1 are sent as "have", but not {alpha_beta}_2
+ ALPHA_1=$(git -C client rev-parse alpha_1) &&
+ grep "fetch> have $ALPHA_1" trace &&
+ BETA_1=$(git -C client rev-parse beta_1) &&
+ grep "fetch> have $BETA_1" trace &&
+ ALPHA_2=$(git -C client rev-parse alpha_2) &&
+ ! grep "fetch> have $ALPHA_2" trace &&
+ BETA_2=$(git -C client rev-parse beta_2) &&
+ ! grep "fetch> have $BETA_2" trace
+}
+
+test_expect_success '--negotiation-tip limits "have" lines sent' '
+ setup_negotiation_tip server server 0 &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client fetch \
+ --negotiation-tip=alpha_1 --negotiation-tip=beta_1 \
+ origin alpha_s beta_s &&
+ check_negotiation_tip
+'
+
+test_expect_success '--negotiation-tip understands globs' '
+ setup_negotiation_tip server server 0 &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client fetch \
+ --negotiation-tip=*_1 \
+ origin alpha_s beta_s &&
+ check_negotiation_tip
+'
+
+test_expect_success '--negotiation-tip understands abbreviated SHA-1' '
+ setup_negotiation_tip server server 0 &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client fetch \
+ --negotiation-tip=$(git -C client rev-parse --short alpha_1) \
+ --negotiation-tip=$(git -C client rev-parse --short beta_1) \
+ origin alpha_s beta_s &&
+ check_negotiation_tip
+'
+
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success '--negotiation-tip limits "have" lines sent with HTTP protocol v2' '
+ setup_negotiation_tip "$HTTPD_DOCUMENT_ROOT_PATH/server" \
+ "$HTTPD_URL/smart/server" 1 &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client fetch \
+ --negotiation-tip=alpha_1 --negotiation-tip=beta_1 \
+ origin alpha_s beta_s &&
+ check_negotiation_tip
+'
+
+stop_httpd
+
test_done
diff --git a/t/t5512-ls-remote.sh b/t/t5512-ls-remote.sh
index f2c05ae..bc5703f 100755
--- a/t/t5512-ls-remote.sh
+++ b/t/t5512-ls-remote.sh
@@ -15,7 +15,7 @@ test_expect_success setup '
git tag mark1.10 &&
git show-ref --tags -d | sed -e "s/ / /" >expected.tag &&
(
- echo "$(git rev-parse HEAD) HEAD"
+ echo "$(git rev-parse HEAD) HEAD" &&
git show-ref -d | sed -e "s/ / /"
) >expected.all &&
@@ -105,7 +105,7 @@ test_expect_success 'use branch.<name>.remote if possible' '
git clone . other.git &&
(
cd other.git &&
- echo "$(git rev-parse HEAD) HEAD"
+ echo "$(git rev-parse HEAD) HEAD" &&
git show-ref | sed -e "s/ / /"
) >exp &&
diff --git a/t/t5516-fetch-push.sh b/t/t5516-fetch-push.sh
index a5077d8..bd8f23e 100755
--- a/t/t5516-fetch-push.sh
+++ b/t/t5516-fetch-push.sh
@@ -923,7 +923,7 @@ test_expect_success 'push into aliased refs (consistent)' '
(
cd child1 &&
git branch foo &&
- git symbolic-ref refs/heads/bar refs/heads/foo
+ git symbolic-ref refs/heads/bar refs/heads/foo &&
git config receive.denyCurrentBranch false
) &&
(
@@ -945,7 +945,7 @@ test_expect_success 'push into aliased refs (inconsistent)' '
(
cd child1 &&
git branch foo &&
- git symbolic-ref refs/heads/bar refs/heads/foo
+ git symbolic-ref refs/heads/bar refs/heads/foo &&
git config receive.denyCurrentBranch false
) &&
(
@@ -1011,7 +1011,7 @@ test_expect_success 'push --porcelain rejected' '
mk_empty testrepo &&
git push testrepo refs/heads/master:refs/remotes/origin/master &&
(cd testrepo &&
- git reset --hard origin/master^
+ git reset --hard origin/master^ &&
git config receive.denyCurrentBranch true) &&
echo >.git/foo "To testrepo" &&
@@ -1025,7 +1025,7 @@ test_expect_success 'push --porcelain --dry-run rejected' '
mk_empty testrepo &&
git push testrepo refs/heads/master:refs/remotes/origin/master &&
(cd testrepo &&
- git reset --hard origin/master
+ git reset --hard origin/master &&
git config receive.denyCurrentBranch true) &&
echo >.git/foo "To testrepo" &&
@@ -1333,7 +1333,7 @@ test_expect_success 'push --follow-tag only pushes relevant tags' '
git commit --allow-empty -m "future commit" &&
git tag -m "future" future &&
git checkout master &&
- git for-each-ref refs/heads/master refs/tags/tag >../expect
+ git for-each-ref refs/heads/master refs/tags/tag >../expect &&
git push --follow-tag ../dst master
) &&
(
diff --git a/t/t5517-push-mirror.sh b/t/t5517-push-mirror.sh
index 02f160a..c05a661 100755
--- a/t/t5517-push-mirror.sh
+++ b/t/t5517-push-mirror.sh
@@ -71,7 +71,7 @@ test_expect_success 'push mirror force updates existing branches' '
git push --mirror up &&
echo two >foo && git add foo && git commit -m two &&
git push --mirror up &&
- git reset --hard HEAD^
+ git reset --hard HEAD^ &&
git push --mirror up
) &&
master_master=$(cd master && git show-ref -s --verify refs/heads/master) &&
@@ -88,7 +88,7 @@ test_expect_success 'push mirror removes branches' '
echo one >foo && git add foo && git commit -m one &&
git branch remove master &&
git push --mirror up &&
- git branch -D remove
+ git branch -D remove &&
git push --mirror up
) &&
(
@@ -170,7 +170,7 @@ test_expect_success 'push mirror force updates existing tags' '
echo two >foo && git add foo && git commit -m two &&
git tag -f tmaster master &&
git push --mirror up &&
- git reset --hard HEAD^
+ git reset --hard HEAD^ &&
git tag -f tmaster master &&
git push --mirror up
) &&
@@ -188,7 +188,7 @@ test_expect_success 'push mirror removes tags' '
echo one >foo && git add foo && git commit -m one &&
git tag -f tremove master &&
git push --mirror up &&
- git tag -d tremove
+ git tag -d tremove &&
git push --mirror up
) &&
(
@@ -235,7 +235,7 @@ test_expect_success 'remote.foo.mirror adds and removes branches' '
git branch keep master &&
git branch remove master &&
git push up &&
- git branch -D remove
+ git branch -D remove &&
git push up
) &&
(
diff --git a/t/t5520-pull.sh b/t/t5520-pull.sh
index 59c4b77..5e501c8 100755
--- a/t/t5520-pull.sh
+++ b/t/t5520-pull.sh
@@ -475,10 +475,22 @@ test_expect_success 'pull.rebase=interactive' '
false
EOF
test_set_editor "$TRASH_DIRECTORY/fake-editor" &&
+ test_when_finished "test_might_fail git rebase --abort" &&
test_must_fail git pull --rebase=interactive . copy &&
test "I was here" = "$(cat fake.out)"
'
+test_expect_success 'pull --rebase=i' '
+ write_script "$TRASH_DIRECTORY/fake-editor" <<-\EOF &&
+ echo I was here, too >fake.out &&
+ false
+ EOF
+ test_set_editor "$TRASH_DIRECTORY/fake-editor" &&
+ test_when_finished "test_might_fail git rebase --abort" &&
+ test_must_fail git pull --rebase=i . copy &&
+ test "I was here, too" = "$(cat fake.out)"
+'
+
test_expect_success 'pull.rebase=invalid fails' '
git reset --hard before-preserve-rebase &&
test_config pull.rebase invalid &&
@@ -618,6 +630,18 @@ test_expect_success 'pull --rebase fails on unborn branch with staged changes' '
)
'
+test_expect_success 'pull --rebase fails on corrupt HEAD' '
+ test_when_finished "rm -rf corrupt" &&
+ git init corrupt &&
+ (
+ cd corrupt &&
+ test_commit one &&
+ obj=$(git rev-parse --verify HEAD | sed "s#^..#&/#") &&
+ rm -f .git/objects/$obj &&
+ test_must_fail git pull --rebase
+ )
+'
+
test_expect_success 'setup for detecting upstreamed changes' '
mkdir src &&
(cd src &&
diff --git a/t/t5526-fetch-submodules.sh b/t/t5526-fetch-submodules.sh
index 359e03f..0f730d7 100755
--- a/t/t5526-fetch-submodules.sh
+++ b/t/t5526-fetch-submodules.sh
@@ -379,7 +379,7 @@ test_expect_success "'--recurse-submodules=on-demand' recurses as deep as necess
git config -f .gitmodules submodule.subdir/deepsubmodule.fetchRecursive false
) &&
git fetch --recurse-submodules=on-demand >../actual.out 2>../actual.err &&
- git config --unset fetch.recurseSubmodules
+ git config --unset fetch.recurseSubmodules &&
(
cd submodule &&
git config --unset -f .gitmodules submodule.subdir/deepsubmodule.fetchRecursive
diff --git a/t/t5531-deep-submodule-push.sh b/t/t5531-deep-submodule-push.sh
index 39cb2c1..e2c37fd 100755
--- a/t/t5531-deep-submodule-push.sh
+++ b/t/t5531-deep-submodule-push.sh
@@ -354,7 +354,7 @@ test_expect_success 'push succeeds if submodule has no remote and is on the firs
git clone a a1 &&
(
cd a1 &&
- git init b
+ git init b &&
(
cd b &&
>junk &&
diff --git a/t/t5534-push-signed.sh b/t/t5534-push-signed.sh
index 1cea758..030331f 100755
--- a/t/t5534-push-signed.sh
+++ b/t/t5534-push-signed.sh
@@ -194,10 +194,12 @@ test_expect_success GPG 'fail without key and heed user.signingkey' '
EOF
- unset GIT_COMMITTER_EMAIL &&
- git config user.email hasnokey@nowhere.com &&
- test_must_fail git push --signed dst noop ff +noff &&
- git config user.signingkey committer@example.com &&
+ test_config user.email hasnokey@nowhere.com &&
+ (
+ sane_unset GIT_COMMITTER_EMAIL &&
+ test_must_fail git push --signed dst noop ff +noff
+ ) &&
+ test_config user.signingkey $GIT_COMMITTER_EMAIL &&
git push --signed dst noop ff +noff &&
(
@@ -218,4 +220,57 @@ test_expect_success GPG 'fail without key and heed user.signingkey' '
test_cmp expect dst/push-cert-status
'
+test_expect_success GPGSM 'fail without key and heed user.signingkey x509' '
+ test_config gpg.format x509 &&
+ prepare_dst &&
+ mkdir -p dst/.git/hooks &&
+ git -C dst config receive.certnonceseed sekrit &&
+ write_script dst/.git/hooks/post-receive <<-\EOF &&
+ # discard the update list
+ cat >/dev/null
+ # record the push certificate
+ if test -n "${GIT_PUSH_CERT-}"
+ then
+ git cat-file blob $GIT_PUSH_CERT >../push-cert
+ fi &&
+
+ cat >../push-cert-status <<E_O_F
+ SIGNER=${GIT_PUSH_CERT_SIGNER-nobody}
+ KEY=${GIT_PUSH_CERT_KEY-nokey}
+ STATUS=${GIT_PUSH_CERT_STATUS-nostatus}
+ NONCE_STATUS=${GIT_PUSH_CERT_NONCE_STATUS-nononcestatus}
+ NONCE=${GIT_PUSH_CERT_NONCE-nononce}
+ E_O_F
+
+ EOF
+
+ test_config user.email hasnokey@nowhere.com &&
+ test_config user.signingkey "" &&
+ (
+ sane_unset GIT_COMMITTER_EMAIL &&
+ test_must_fail git push --signed dst noop ff +noff
+ ) &&
+ test_config user.signingkey $GIT_COMMITTER_EMAIL &&
+ git push --signed dst noop ff +noff &&
+
+ (
+ cat <<-\EOF &&
+ SIGNER=/CN=C O Mitter/O=Example/SN=C O/GN=Mitter
+ KEY=
+ STATUS=G
+ NONCE_STATUS=OK
+ EOF
+ sed -n -e "s/^nonce /NONCE=/p" -e "/^$/q" dst/push-cert
+ ) >expect.in &&
+ key=$(cat "${GNUPGHOME}/trustlist.txt" | cut -d" " -f1 | tr -d ":") &&
+ sed -e "s/^KEY=/KEY=${key}/" expect.in >expect &&
+
+ noop=$(git rev-parse noop) &&
+ ff=$(git rev-parse ff) &&
+ noff=$(git rev-parse noff) &&
+ grep "$noop $ff refs/heads/ff" dst/push-cert &&
+ grep "$noop $noff refs/heads/noff" dst/push-cert &&
+ test_cmp expect dst/push-cert-status
+'
+
test_done
diff --git a/t/t5541-http-push-smart.sh b/t/t5541-http-push-smart.sh
index a2af693..a0fc400 100755
--- a/t/t5541-http-push-smart.sh
+++ b/t/t5541-http-push-smart.sh
@@ -38,25 +38,16 @@ GET /smart/test_repo.git/info/refs?service=git-upload-pack HTTP/1.1 200
POST /smart/test_repo.git/git-upload-pack HTTP/1.1 200
EOF
test_expect_success 'no empty path components' '
+ # Clear the log, so that it does not affect the "used receive-pack
+ # service" test which reads the log too.
+ test_when_finished ">\"\$HTTPD_ROOT_PATH\"/access.log" &&
+
# In the URL, add a trailing slash, and see if git appends yet another
# slash.
cd "$ROOT_PATH" &&
git clone $HTTPD_URL/smart/test_repo.git/ test_repo_clone &&
- sed -e "
- s/^.* \"//
- s/\"//
- s/ [1-9][0-9]*\$//
- s/^GET /GET /
- " >act <"$HTTPD_ROOT_PATH"/access.log &&
-
- # Clear the log, so that it does not affect the "used receive-pack
- # service" test which reads the log too.
- #
- # We do this before the actual comparison to ensure the log is cleared.
- echo > "$HTTPD_ROOT_PATH"/access.log &&
-
- test_cmp exp act
+ check_access_log exp
'
test_expect_success 'clone remote repository' '
@@ -124,7 +115,6 @@ test_expect_success 'rejected update prints status' '
rm -f "$HTTPD_DOCUMENT_ROOT_PATH/test_repo.git/hooks/update"
cat >exp <<EOF
-
GET /smart/test_repo.git/info/refs?service=git-upload-pack HTTP/1.1 200
POST /smart/test_repo.git/git-upload-pack HTTP/1.1 200
GET /smart/test_repo.git/info/refs?service=git-receive-pack HTTP/1.1 200
@@ -138,13 +128,7 @@ GET /smart/test_repo.git/info/refs?service=git-receive-pack HTTP/1.1 200
POST /smart/test_repo.git/git-receive-pack HTTP/1.1 200
EOF
test_expect_success 'used receive-pack service' '
- sed -e "
- s/^.* \"//
- s/\"//
- s/ [1-9][0-9]*\$//
- s/^GET /GET /
- " >act <"$HTTPD_ROOT_PATH"/access.log &&
- test_cmp exp act
+ check_access_log exp
'
test_http_push_nonff "$HTTPD_DOCUMENT_ROOT_PATH"/test_repo.git \
diff --git a/t/t5543-atomic-push.sh b/t/t5543-atomic-push.sh
index 3480b33..7079bcf 100755
--- a/t/t5543-atomic-push.sh
+++ b/t/t5543-atomic-push.sh
@@ -178,7 +178,7 @@ test_expect_success 'atomic push obeys update hook preventing a branch to be pus
test_expect_success 'atomic push is not advertised if configured' '
mk_repo_pair &&
(
- cd upstream
+ cd upstream &&
git config receive.advertiseatomic 0
) &&
(
diff --git a/t/t5551-http-fetch-smart.sh b/t/t5551-http-fetch-smart.sh
index 913089b..771f36f 100755
--- a/t/t5551-http-fetch-smart.sh
+++ b/t/t5551-http-fetch-smart.sh
@@ -103,13 +103,7 @@ GET /smart/repo.git/info/refs?service=git-upload-pack HTTP/1.1 200
POST /smart/repo.git/git-upload-pack HTTP/1.1 200
EOF
test_expect_success 'used upload-pack service' '
- sed -e "
- s/^.* \"//
- s/\"//
- s/ [1-9][0-9]*\$//
- s/^GET /GET /
- " >act <"$HTTPD_ROOT_PATH"/access.log &&
- test_cmp exp act
+ check_access_log exp
'
test_expect_success 'follow redirects (301)' '
@@ -369,6 +363,24 @@ test_expect_success 'custom http headers' '
submodule update sub
'
+test_expect_success 'using fetch command in remote-curl updates refs' '
+ SERVER="$HTTPD_DOCUMENT_ROOT_PATH/twobranch" &&
+ rm -rf "$SERVER" client &&
+
+ git init "$SERVER" &&
+ test_commit -C "$SERVER" foo &&
+ git -C "$SERVER" update-ref refs/heads/anotherbranch foo &&
+
+ git clone $HTTPD_URL/smart/twobranch client &&
+
+ test_commit -C "$SERVER" bar &&
+ git -C client -c protocol.version=0 fetch &&
+
+ git -C "$SERVER" rev-parse master >expect &&
+ git -C client rev-parse origin/master >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'GIT_REDACT_COOKIES redacts cookies' '
rm -rf clone &&
echo "Set-Cookie: Foo=1" >cookies &&
diff --git a/t/t5552-skipping-fetch-negotiator.sh b/t/t5552-skipping-fetch-negotiator.sh
new file mode 100755
index 0000000..3b60bd4
--- /dev/null
+++ b/t/t5552-skipping-fetch-negotiator.sh
@@ -0,0 +1,202 @@
+#!/bin/sh
+
+test_description='test skipping fetch negotiator'
+. ./test-lib.sh
+
+have_sent () {
+ while test "$#" -ne 0
+ do
+ grep "fetch> have $(git -C client rev-parse $1)" trace
+ if test $? -ne 0
+ then
+ echo "No have $(git -C client rev-parse $1) ($1)"
+ return 1
+ fi
+ shift
+ done
+}
+
+have_not_sent () {
+ while test "$#" -ne 0
+ do
+ grep "fetch> have $(git -C client rev-parse $1)" trace
+ if test $? -eq 0
+ then
+ return 1
+ fi
+ shift
+ done
+}
+
+test_expect_success 'commits with no parents are sent regardless of skip distance' '
+ git init server &&
+ test_commit -C server to_fetch &&
+
+ git init client &&
+ for i in $(seq 7)
+ do
+ test_commit -C client c$i
+ done &&
+
+ # We send: "c7" (skip 1) "c5" (skip 2) "c2" (skip 4). After that, since
+ # "c1" has no parent, it is still sent as "have" even though it would
+ # normally be skipped.
+ test_config -C client fetch.negotiationalgorithm skipping &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client fetch "$(pwd)/server" &&
+ have_sent c7 c5 c2 c1 &&
+ have_not_sent c6 c4 c3
+'
+
+test_expect_success 'unknown fetch.negotiationAlgorithm values error out' '
+ rm -rf server client trace &&
+ git init server &&
+ test_commit -C server to_fetch &&
+
+ git init client &&
+ test_commit -C client on_client &&
+ git -C client checkout on_client &&
+
+ test_config -C client fetch.negotiationAlgorithm invalid &&
+ test_must_fail git -C client fetch "$(pwd)/server" 2>err &&
+ test_i18ngrep "unknown fetch negotiation algorithm" err &&
+
+ # Explicit "default" value
+ test_config -C client fetch.negotiationAlgorithm default &&
+ git -C client -c fetch.negotiationAlgorithm=default fetch "$(pwd)/server" &&
+
+ # Implementation detail: If there is nothing to fetch, we will not error out
+ test_config -C client fetch.negotiationAlgorithm invalid &&
+ git -C client fetch "$(pwd)/server" 2>err &&
+ test_i18ngrep ! "unknown fetch negotiation algorithm" err
+'
+
+test_expect_success 'when two skips collide, favor the larger one' '
+ rm -rf server client trace &&
+ git init server &&
+ test_commit -C server to_fetch &&
+
+ git init client &&
+ for i in $(seq 11)
+ do
+ test_commit -C client c$i
+ done &&
+ git -C client checkout c5 &&
+ test_commit -C client c5side &&
+
+ # Before reaching c5, we send "c5side" (skip 1) and "c11" (skip 1) "c9"
+ # (skip 2) "c6" (skip 4). The larger skip (skip 4) takes precedence, so
+ # the next "have" sent will be "c1" (from "c6" skip 4) and not "c4"
+ # (from "c5side" skip 1).
+ test_config -C client fetch.negotiationalgorithm skipping &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client fetch "$(pwd)/server" &&
+ have_sent c5side c11 c9 c6 c1 &&
+ have_not_sent c10 c8 c7 c5 c4 c3 c2
+'
+
+test_expect_success 'use ref advertisement to filter out commits' '
+ rm -rf server client trace &&
+ git init server &&
+ test_commit -C server c1 &&
+ test_commit -C server c2 &&
+ test_commit -C server c3 &&
+ git -C server tag -d c1 c2 c3 &&
+
+ git clone server client &&
+ test_commit -C client c4 &&
+ test_commit -C client c5 &&
+ git -C client checkout c4^^ &&
+ test_commit -C client c2side &&
+
+ git -C server checkout --orphan anotherbranch &&
+ test_commit -C server to_fetch &&
+
+ # The server advertising "c3" (as "refs/heads/master") means that we do
+ # not need to send any ancestors of "c3", but we still need to send "c3"
+ # itself.
+ test_config -C client fetch.negotiationalgorithm skipping &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client fetch origin to_fetch &&
+ have_sent c5 c4^ c2side &&
+ have_not_sent c4 c4^^ c4^^^
+'
+
+test_expect_success 'handle clock skew' '
+ rm -rf server client trace &&
+ git init server &&
+ test_commit -C server to_fetch &&
+
+ git init client &&
+
+ # 2 regular commits
+ test_tick=2000000000 &&
+ test_commit -C client c1 &&
+ test_commit -C client c2 &&
+
+ # 4 old commits
+ test_tick=1000000000 &&
+ git -C client checkout c1 &&
+ test_commit -C client old1 &&
+ test_commit -C client old2 &&
+ test_commit -C client old3 &&
+ test_commit -C client old4 &&
+
+ # "c2" and "c1" are popped first, then "old4" to "old1". "old1" would
+ # normally be skipped, but is treated as a commit without a parent here
+ # and sent, because (due to clock skew) its only parent has already been
+ # popped off the priority queue.
+ test_config -C client fetch.negotiationalgorithm skipping &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client fetch "$(pwd)/server" &&
+ have_sent c2 c1 old4 old2 old1 &&
+ have_not_sent old3
+'
+
+test_expect_success 'do not send "have" with ancestors of commits that server ACKed' '
+ rm -rf server client trace &&
+ git init server &&
+ test_commit -C server to_fetch &&
+
+ git init client &&
+ for i in $(seq 8)
+ do
+ git -C client checkout --orphan b$i &&
+ test_commit -C client b$i.c0
+ done &&
+ for j in $(seq 19)
+ do
+ for i in $(seq 8)
+ do
+ git -C client checkout b$i &&
+ test_commit -C client b$i.c$j
+ done
+ done &&
+
+ # Copy this branch over to the server and add a commit on it so that it
+ # is reachable but not advertised.
+ git -C server fetch --no-tags "$(pwd)/client" b1:refs/heads/b1 &&
+ git -C server checkout b1 &&
+ test_commit -C server commit-on-b1 &&
+
+ test_config -C client fetch.negotiationalgorithm skipping &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client fetch "$(pwd)/server" to_fetch &&
+ grep " fetch" trace &&
+
+ # fetch-pack sends 2 requests each containing 16 "have" lines before
+ # processing the first response. In these 2 requests, 4 commits from
+ # each branch are sent. Just check the first branch.
+ have_sent b1.c19 b1.c17 b1.c14 b1.c9 &&
+ have_not_sent b1.c18 b1.c16 b1.c15 b1.c13 b1.c12 b1.c11 b1.c10 &&
+
+ # While fetch-pack is processing the first response, it should read that
+ # the server ACKs b1.c19 and b1.c17.
+ grep "fetch< ACK $(git -C client rev-parse b1.c19) common" trace &&
+ grep "fetch< ACK $(git -C client rev-parse b1.c17) common" trace &&
+
+ # fetch-pack should thus not send any more commits in the b1 branch, but
+ # should still send the others (in this test, just check b2).
+ for i in $(seq 0 8)
+ do
+ have_not_sent b1.c$i
+ done &&
+ have_sent b2.c1 b2.c0
+'
+
+test_done
diff --git a/t/t5561-http-backend.sh b/t/t5561-http-backend.sh
index 84a9557..1c49054 100755
--- a/t/t5561-http-backend.sh
+++ b/t/t5561-http-backend.sh
@@ -129,13 +129,7 @@ GET /smart/repo.git/info/refs?service=git-receive-pack HTTP/1.1 403 -
POST /smart/repo.git/git-receive-pack HTTP/1.1 403 -
EOF
test_expect_success 'server request log matches test results' '
- sed -e "
- s/^.* \"//
- s/\"//
- s/ [1-9][0-9]*\$//
- s/^GET /GET /
- " >act <"$HTTPD_ROOT_PATH"/access.log &&
- test_cmp exp act
+ check_access_log exp
'
stop_httpd
diff --git a/t/t5562-http-backend-content-length.sh b/t/t5562-http-backend-content-length.sh
new file mode 100755
index 0000000..43570ce
--- /dev/null
+++ b/t/t5562-http-backend-content-length.sh
@@ -0,0 +1,156 @@
+#!/bin/sh
+
+test_description='test git-http-backend respects CONTENT_LENGTH'
+. ./test-lib.sh
+
+test_lazy_prereq GZIP 'gzip --version'
+
+verify_http_result() {
+ # some fatal errors still produce status 200
+ # so check if there is the error message
+ if grep 'fatal:' act.err
+ then
+ return 1
+ fi
+
+ if ! grep "Status" act.out >act
+ then
+ printf "Status: 200 OK\r\n" >act
+ fi
+ printf "Status: $1\r\n" >exp &&
+ test_cmp exp act
+}
+
+test_http_env() {
+ handler_type="$1"
+ request_body="$2"
+ shift
+ env \
+ CONTENT_TYPE="application/x-git-$handler_type-pack-request" \
+ QUERY_STRING="/repo.git/git-$handler_type-pack" \
+ PATH_TRANSLATED="$PWD/.git/git-$handler_type-pack" \
+ GIT_HTTP_EXPORT_ALL=TRUE \
+ REQUEST_METHOD=POST \
+ "$TEST_DIRECTORY"/t5562/invoke-with-content-length.pl \
+ "$request_body" git http-backend >act.out 2>act.err
+}
+
+ssize_b100dots() {
+ # hardcoded ((size_t) SSIZE_MAX) + 1
+ case "$(build_option sizeof-size_t)" in
+ 8) echo 9223372036854775808;;
+ 4) echo 2147483648;;
+ *) die "Unexpected ssize_t size: $(build_option sizeof-size_t)";;
+ esac
+}
+
+test_expect_success 'setup' '
+ HTTP_CONTENT_ENCODING="identity" &&
+ export HTTP_CONTENT_ENCODING &&
+ git config http.receivepack true &&
+ test_commit c0 &&
+ test_commit c1 &&
+ hash_head=$(git rev-parse HEAD) &&
+ hash_prev=$(git rev-parse HEAD~1) &&
+ printf "want %s" "$hash_head" | packetize >fetch_body &&
+ printf 0000 >>fetch_body &&
+ printf "have %s" "$hash_prev" | packetize >>fetch_body &&
+ printf done | packetize >>fetch_body &&
+ test_copy_bytes 10 <fetch_body >fetch_body.trunc &&
+ hash_next=$(git commit-tree -p HEAD -m next HEAD^{tree}) &&
+ printf "%s %s refs/heads/newbranch\\0report-status\\n" "$_z40" "$hash_next" | packetize >push_body &&
+ printf 0000 >>push_body &&
+ echo "$hash_next" | git pack-objects --stdout >>push_body &&
+ test_copy_bytes 10 <push_body >push_body.trunc &&
+ : >empty_body
+'
+
+test_expect_success GZIP 'setup, compression related' '
+ gzip -c fetch_body >fetch_body.gz &&
+ test_copy_bytes 10 <fetch_body.gz >fetch_body.gz.trunc &&
+ gzip -c push_body >push_body.gz &&
+ test_copy_bytes 10 <push_body.gz >push_body.gz.trunc
+'
+
+test_expect_success 'fetch plain' '
+ test_http_env upload fetch_body &&
+ verify_http_result "200 OK"
+'
+
+test_expect_success 'fetch plain truncated' '
+ test_http_env upload fetch_body.trunc &&
+ ! verify_http_result "200 OK"
+'
+
+test_expect_success 'fetch plain empty' '
+ test_http_env upload empty_body &&
+ ! verify_http_result "200 OK"
+'
+
+test_expect_success GZIP 'fetch gzipped' '
+ test_env HTTP_CONTENT_ENCODING="gzip" test_http_env upload fetch_body.gz &&
+ verify_http_result "200 OK"
+'
+
+test_expect_success GZIP 'fetch gzipped truncated' '
+ test_env HTTP_CONTENT_ENCODING="gzip" test_http_env upload fetch_body.gz.trunc &&
+ ! verify_http_result "200 OK"
+'
+
+test_expect_success GZIP 'fetch gzipped empty' '
+ test_env HTTP_CONTENT_ENCODING="gzip" test_http_env upload empty_body &&
+ ! verify_http_result "200 OK"
+'
+
+test_expect_success GZIP 'push plain' '
+ test_when_finished "git branch -D newbranch" &&
+ test_http_env receive push_body &&
+ verify_http_result "200 OK" &&
+ git rev-parse newbranch >act.head &&
+ echo "$hash_next" >exp.head &&
+ test_cmp act.head exp.head
+'
+
+test_expect_success 'push plain truncated' '
+ test_http_env receive push_body.trunc &&
+ ! verify_http_result "200 OK"
+'
+
+test_expect_success 'push plain empty' '
+ test_http_env receive empty_body &&
+ ! verify_http_result "200 OK"
+'
+
+test_expect_success GZIP 'push gzipped' '
+ test_when_finished "git branch -D newbranch" &&
+ test_env HTTP_CONTENT_ENCODING="gzip" test_http_env receive push_body.gz &&
+ verify_http_result "200 OK" &&
+ git rev-parse newbranch >act.head &&
+ echo "$hash_next" >exp.head &&
+ test_cmp act.head exp.head
+'
+
+test_expect_success GZIP 'push gzipped truncated' '
+ test_env HTTP_CONTENT_ENCODING="gzip" test_http_env receive push_body.gz.trunc &&
+ ! verify_http_result "200 OK"
+'
+
+test_expect_success GZIP 'push gzipped empty' '
+ test_env HTTP_CONTENT_ENCODING="gzip" test_http_env receive empty_body &&
+ ! verify_http_result "200 OK"
+'
+
+test_expect_success 'CONTENT_LENGTH overflow ssite_t' '
+ NOT_FIT_IN_SSIZE=$(ssize_b100dots) &&
+ env \
+ CONTENT_TYPE=application/x-git-upload-pack-request \
+ QUERY_STRING=/repo.git/git-upload-pack \
+ PATH_TRANSLATED="$PWD"/.git/git-upload-pack \
+ GIT_HTTP_EXPORT_ALL=TRUE \
+ REQUEST_METHOD=POST \
+ CONTENT_LENGTH="$NOT_FIT_IN_SSIZE" \
+ git http-backend </dev/zero >/dev/null 2>err &&
+ grep "fatal:.*CONTENT_LENGTH" err
+'
+
+test_done
diff --git a/t/t5562/invoke-with-content-length.pl b/t/t5562/invoke-with-content-length.pl
new file mode 100755
index 0000000..6c2aae7
--- /dev/null
+++ b/t/t5562/invoke-with-content-length.pl
@@ -0,0 +1,37 @@
+#!/usr/bin/perl
+use 5.008;
+use strict;
+use warnings;
+
+my $body_filename = $ARGV[0];
+my @command = @ARGV[1 .. $#ARGV];
+
+# read data
+my $body_size = -s $body_filename;
+$ENV{"CONTENT_LENGTH"} = $body_size;
+open(my $body_fh, "<", $body_filename) or die "Cannot open $body_filename: $!";
+my $body_data;
+defined read($body_fh, $body_data, $body_size) or die "Cannot read $body_filename: $!";
+close($body_fh);
+
+my $exited = 0;
+$SIG{"CHLD"} = sub {
+ $exited = 1;
+};
+
+# write data
+my $pid = open(my $out, "|-", @command);
+{
+ # disable buffering at $out
+ my $old_selected = select;
+ select $out;
+ $| = 1;
+ select $old_selected;
+}
+print $out $body_data or die "Cannot write data: $!";
+
+sleep 60; # is interrupted by SIGCHLD
+if (!$exited) {
+ close($out);
+ die "Command did not exit after reading whole body";
+}
diff --git a/t/t5570-git-daemon.sh b/t/t5570-git-daemon.sh
index 0d4c520..a571f22 100755
--- a/t/t5570-git-daemon.sh
+++ b/t/t5570-git-daemon.sh
@@ -7,9 +7,9 @@ test_description='test fetching over git protocol'
start_git_daemon
check_verbose_connect () {
- grep -F "Looking up 127.0.0.1 ..." stderr &&
- grep -F "Connecting to 127.0.0.1 (port " stderr &&
- grep -F "done." stderr
+ test_i18ngrep -F "Looking up 127.0.0.1 ..." stderr &&
+ test_i18ngrep -F "Connecting to 127.0.0.1 (port " stderr &&
+ test_i18ngrep -F "done." stderr
}
test_expect_success 'setup repository' '
diff --git a/t/t5601-clone.sh b/t/t5601-clone.sh
index 0b62037..ddaa96a 100755
--- a/t/t5601-clone.sh
+++ b/t/t5601-clone.sh
@@ -618,7 +618,7 @@ hex2oct () {
test_expect_success 'clone on case-insensitive fs' '
git init icasefs &&
(
- cd icasefs
+ cd icasefs &&
o=$(git hash-object -w --stdin </dev/null | hex2oct) &&
t=$(printf "100644 X\0${o}100644 x\0${o}" |
git hash-object -w -t tree --stdin) &&
diff --git a/t/t5605-clone-local.sh b/t/t5605-clone-local.sh
index 3c087e9..af23419 100755
--- a/t/t5605-clone-local.sh
+++ b/t/t5605-clone-local.sh
@@ -94,7 +94,7 @@ test_expect_success 'clone empty repository' '
git config receive.denyCurrentBranch warn) &&
git clone empty empty-clone &&
test_tick &&
- (cd empty-clone
+ (cd empty-clone &&
echo "content" >> foo &&
git add foo &&
git commit -m "Initial commit" &&
diff --git a/t/t5608-clone-2gb.sh b/t/t5608-clone-2gb.sh
index df822d9..2c6bc07 100755
--- a/t/t5608-clone-2gb.sh
+++ b/t/t5608-clone-2gb.sh
@@ -23,7 +23,7 @@ test_expect_success CLONE_2GB 'setup' '
printf "blob\nmark :$i\ndata $blobsize\n" &&
#test-tool genrandom $i $blobsize &&
printf "%-${blobsize}s" $i &&
- echo "M 100644 :$i $i" >> commit
+ echo "M 100644 :$i $i" >> commit &&
i=$(($i+1)) ||
echo $? > exit-status
done &&
diff --git a/t/t5616-partial-clone.sh b/t/t5616-partial-clone.sh
index 44d8e80..bbbe753 100755
--- a/t/t5616-partial-clone.sh
+++ b/t/t5616-partial-clone.sh
@@ -216,6 +216,50 @@ test_expect_success 'upon cloning, check that all refs point to objects' '
! test -e "$HTTPD_ROOT_PATH/one-time-sed"
'
+test_expect_success 'when partial cloning, tolerate server not sending target of tag' '
+ SERVER="$HTTPD_DOCUMENT_ROOT_PATH/server" &&
+ rm -rf "$SERVER" repo &&
+ test_create_repo "$SERVER" &&
+ test_commit -C "$SERVER" foo &&
+ test_config -C "$SERVER" uploadpack.allowfilter 1 &&
+ test_config -C "$SERVER" uploadpack.allowanysha1inwant 1 &&
+
+ # Create an annotated tag pointing to a blob.
+ BLOB=$(echo blob-contents | git -C "$SERVER" hash-object --stdin -w) &&
+ git -C "$SERVER" tag -m message -a myblob "$BLOB" &&
+
+ # Craft a packfile including the tag, but not the blob it points to.
+ # Also, omit objects referenced from HEAD in order to force a second
+ # fetch (to fetch missing objects) upon the automatic checkout that
+ # happens after a clone.
+ printf "%s\n%s\n--not\n%s\n%s\n" \
+ $(git -C "$SERVER" rev-parse HEAD) \
+ $(git -C "$SERVER" rev-parse myblob) \
+ $(git -C "$SERVER" rev-parse HEAD^{tree}) \
+ $(git -C "$SERVER" rev-parse myblob^{blob}) |
+ git -C "$SERVER" pack-objects --thin --stdout >incomplete.pack &&
+
+ # Replace the existing packfile with the crafted one. The protocol
+ # requires that the packfile be sent in sideband 1, hence the extra
+ # \x01 byte at the beginning.
+ printf "1,/packfile/!c %04x\\\\x01%s0000" \
+ "$(($(wc -c <incomplete.pack) + 5))" \
+ "$(sed_escape <incomplete.pack)" \
+ >"$HTTPD_ROOT_PATH/one-time-sed" &&
+
+ # Use protocol v2 because the sed command looks for the "packfile"
+ # section header.
+ test_config -C "$SERVER" protocol.version 2 &&
+
+ # Exercise to make sure it works.
+ git -c protocol.version=2 clone \
+ --filter=blob:none $HTTPD_URL/one_time_sed/server repo 2> err &&
+ ! grep "missing object referenced by" err &&
+
+ # Ensure that the one-time-sed script was used.
+ ! test -e "$HTTPD_ROOT_PATH/one-time-sed"
+'
+
stop_httpd
test_done
diff --git a/t/t5702-protocol-v2.sh b/t/t5702-protocol-v2.sh
index a4fe650..3beeed4 100755
--- a/t/t5702-protocol-v2.sh
+++ b/t/t5702-protocol-v2.sh
@@ -181,7 +181,12 @@ test_expect_success 'clone with file:// using protocol v2' '
test_cmp expect actual &&
# Server responded using protocol v2
- grep "clone< version 2" log
+ grep "clone< version 2" log &&
+
+ # Client sent ref-prefixes to filter the ref-advertisement
+ grep "ref-prefix HEAD" log &&
+ grep "ref-prefix refs/heads/" log &&
+ grep "ref-prefix refs/tags/" log
'
test_expect_success 'fetch with file:// using protocol v2' '
@@ -204,6 +209,7 @@ test_expect_success 'ref advertisment is filtered during fetch using protocol v2
test_when_finished "rm -f log" &&
test_commit -C file_parent three &&
+ git -C file_parent branch unwanted-branch three &&
GIT_TRACE_PACKET="$(pwd)/log" git -C file_child -c protocol.version=2 \
fetch origin master &&
@@ -212,9 +218,8 @@ test_expect_success 'ref advertisment is filtered during fetch using protocol v2
git -C file_parent log -1 --format=%s >expect &&
test_cmp expect actual &&
- ! grep "refs/tags/one" log &&
- ! grep "refs/tags/two" log &&
- ! grep "refs/tags/three" log
+ grep "refs/heads/master" log &&
+ ! grep "refs/heads/unwanted-branch" log
'
test_expect_success 'server-options are sent when fetching' '
@@ -359,6 +364,71 @@ test_expect_success 'default refspec is used to filter ref when fetchcing' '
grep "ref-prefix refs/tags/" log
'
+test_expect_success 'fetch supports various ways of have lines' '
+ rm -rf server client trace &&
+ git init server &&
+ test_commit -C server dwim &&
+ TREE=$(git -C server rev-parse HEAD^{tree}) &&
+ git -C server tag exact \
+ $(git -C server commit-tree -m a "$TREE") &&
+ git -C server tag dwim-unwanted \
+ $(git -C server commit-tree -m b "$TREE") &&
+ git -C server tag exact-unwanted \
+ $(git -C server commit-tree -m c "$TREE") &&
+ git -C server tag prefix1 \
+ $(git -C server commit-tree -m d "$TREE") &&
+ git -C server tag prefix2 \
+ $(git -C server commit-tree -m e "$TREE") &&
+ git -C server tag fetch-by-sha1 \
+ $(git -C server commit-tree -m f "$TREE") &&
+ git -C server tag completely-unrelated \
+ $(git -C server commit-tree -m g "$TREE") &&
+
+ git init client &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client -c protocol.version=2 \
+ fetch "file://$(pwd)/server" \
+ dwim \
+ refs/tags/exact \
+ refs/tags/prefix*:refs/tags/prefix* \
+ "$(git -C server rev-parse fetch-by-sha1)" &&
+
+ # Ensure that the appropriate prefixes are sent (using a sample)
+ grep "fetch> ref-prefix dwim" trace &&
+ grep "fetch> ref-prefix refs/heads/dwim" trace &&
+ grep "fetch> ref-prefix refs/tags/prefix" trace &&
+
+ # Ensure that the correct objects are returned
+ git -C client cat-file -e $(git -C server rev-parse dwim) &&
+ git -C client cat-file -e $(git -C server rev-parse exact) &&
+ git -C client cat-file -e $(git -C server rev-parse prefix1) &&
+ git -C client cat-file -e $(git -C server rev-parse prefix2) &&
+ git -C client cat-file -e $(git -C server rev-parse fetch-by-sha1) &&
+ test_must_fail git -C client cat-file -e \
+ $(git -C server rev-parse dwim-unwanted) &&
+ test_must_fail git -C client cat-file -e \
+ $(git -C server rev-parse exact-unwanted) &&
+ test_must_fail git -C client cat-file -e \
+ $(git -C server rev-parse completely-unrelated)
+'
+
+test_expect_success 'fetch supports include-tag and tag following' '
+ rm -rf server client trace &&
+ git init server &&
+
+ test_commit -C server to_fetch &&
+ git -C server tag -a annotated_tag -m message &&
+
+ git init client &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client -c protocol.version=2 \
+ fetch "$(pwd)/server" to_fetch:to_fetch &&
+
+ grep "fetch> ref-prefix to_fetch" trace &&
+ grep "fetch> ref-prefix refs/tags/" trace &&
+ grep "fetch> include-tag" trace &&
+
+ git -C client cat-file -e $(git -C client rev-parse annotated_tag)
+'
+
# Test protocol v2 with 'http://' transport
#
. "$TEST_DIRECTORY"/lib-httpd.sh
diff --git a/t/t5801-remote-helpers.sh b/t/t5801-remote-helpers.sh
index 362b158..aaaa722 100755
--- a/t/t5801-remote-helpers.sh
+++ b/t/t5801-remote-helpers.sh
@@ -96,7 +96,7 @@ test_expect_success 'push new branch with old:new refspec' '
test_expect_success 'push new branch with HEAD:new refspec' '
(cd local &&
- git checkout new-name
+ git checkout new-name &&
git push origin HEAD:new-refspec-2
) &&
compare_refs local HEAD server refs/heads/new-refspec-2
@@ -126,7 +126,7 @@ test_expect_success 'forced push' '
test_expect_success 'cloning without refspec' '
GIT_REMOTE_TESTGIT_REFSPEC="" \
git clone "testgit::${PWD}/server" local2 2>error &&
- grep "This remote helper should implement refspec capability" error &&
+ test_i18ngrep "this remote helper should implement refspec capability" error &&
compare_refs local2 HEAD server HEAD
'
@@ -134,7 +134,7 @@ test_expect_success 'pulling without refspecs' '
(cd local2 &&
git reset --hard &&
GIT_REMOTE_TESTGIT_REFSPEC="" git pull 2>../error) &&
- grep "This remote helper should implement refspec capability" error &&
+ test_i18ngrep "this remote helper should implement refspec capability" error &&
compare_refs local2 HEAD server HEAD
'
@@ -146,7 +146,7 @@ test_expect_success 'pushing without refspecs' '
GIT_REMOTE_TESTGIT_REFSPEC="" &&
export GIT_REMOTE_TESTGIT_REFSPEC &&
test_must_fail git push 2>../error) &&
- grep "remote-helper doesn.t support push; refspec needed" error
+ test_i18ngrep "remote-helper doesn.t support push; refspec needed" error
'
test_expect_success 'pulling without marks' '
@@ -246,7 +246,7 @@ test_expect_success 'proper failure checks for fetching' '
(cd local &&
test_must_fail env GIT_REMOTE_TESTGIT_FAILURE=1 git fetch 2>error &&
cat error &&
- grep -q "Error while running fast-import" error
+ test_i18ngrep -q "error while running fast-import" error
)
'
diff --git a/t/t6010-merge-base.sh b/t/t6010-merge-base.sh
index aa2d360..44c726e 100755
--- a/t/t6010-merge-base.sh
+++ b/t/t6010-merge-base.sh
@@ -245,7 +245,7 @@ test_expect_success 'using reflog to find the fork point' '
git commit --allow-empty -m "Derived #$count" &&
git rev-parse HEAD >derived$count &&
git checkout -B base $E || exit 1
- done
+ done &&
for count in 1 2 3
do
diff --git a/t/t6020-merge-df.sh b/t/t6020-merge-df.sh
index 2af1bee..46b506b 100755
--- a/t/t6020-merge-df.sh
+++ b/t/t6020-merge-df.sh
@@ -89,9 +89,6 @@ test_expect_success 'modify/delete + directory/file conflict' '
'
test_expect_success 'modify/delete + directory/file conflict; other way' '
- # Yes, we really need the double reset since "letters" appears as
- # both a file and a directory.
- git reset --hard &&
git reset --hard &&
git clean -f &&
git checkout modify^0 &&
diff --git a/t/t6029-merge-subtree.sh b/t/t6029-merge-subtree.sh
index 3e69245..793f0c8 100755
--- a/t/t6029-merge-subtree.sh
+++ b/t/t6029-merge-subtree.sh
@@ -29,6 +29,34 @@ test_expect_success 'subtree available and works like recursive' '
'
+test_expect_success 'setup branch sub' '
+ git checkout --orphan sub &&
+ git rm -rf . &&
+ test_commit foo
+'
+
+test_expect_success 'setup branch main' '
+ git checkout -b main master &&
+ git merge -s ours --no-commit --allow-unrelated-histories sub &&
+ git read-tree --prefix=dir/ -u sub &&
+ git commit -m "initial merge of sub into main" &&
+ test_path_is_file dir/foo.t &&
+ test_path_is_file hello
+'
+
+test_expect_success 'update branch sub' '
+ git checkout sub &&
+ test_commit bar
+'
+
+test_expect_success 'update branch main' '
+ git checkout main &&
+ git merge -s subtree sub -m "second merge of sub into main" &&
+ test_path_is_file dir/bar.t &&
+ test_path_is_file dir/foo.t &&
+ test_path_is_file hello
+'
+
test_expect_success 'setup' '
mkdir git-gui &&
cd git-gui &&
@@ -55,7 +83,7 @@ test_expect_success 'initial merge' '
git checkout -b work &&
git ls-files -s >actual &&
(
- echo "100644 $o1 0 git-gui/git-gui.sh"
+ echo "100644 $o1 0 git-gui/git-gui.sh" &&
echo "100644 $o2 0 git.c"
) >expected &&
test_cmp expected actual
@@ -72,7 +100,7 @@ test_expect_success 'merge update' '
git pull -s subtree gui master2 &&
git ls-files -s >actual &&
(
- echo "100644 $o3 0 git-gui/git-gui.sh"
+ echo "100644 $o3 0 git-gui/git-gui.sh" &&
echo "100644 $o2 0 git.c"
) >expected &&
test_cmp expected actual
@@ -88,8 +116,8 @@ test_expect_success 'initial ambiguous subtree' '
git checkout -b work2 &&
git ls-files -s >actual &&
(
- echo "100644 $o1 0 git-gui/git-gui.sh"
- echo "100644 $o1 0 git-gui2/git-gui.sh"
+ echo "100644 $o1 0 git-gui/git-gui.sh" &&
+ echo "100644 $o1 0 git-gui2/git-gui.sh" &&
echo "100644 $o2 0 git.c"
) >expected &&
test_cmp expected actual
@@ -101,8 +129,8 @@ test_expect_success 'merge using explicit' '
git pull -Xsubtree=git-gui gui master2 &&
git ls-files -s >actual &&
(
- echo "100644 $o3 0 git-gui/git-gui.sh"
- echo "100644 $o1 0 git-gui2/git-gui.sh"
+ echo "100644 $o3 0 git-gui/git-gui.sh" &&
+ echo "100644 $o1 0 git-gui2/git-gui.sh" &&
echo "100644 $o2 0 git.c"
) >expected &&
test_cmp expected actual
@@ -114,8 +142,8 @@ test_expect_success 'merge2 using explicit' '
git pull -Xsubtree=git-gui2 gui master2 &&
git ls-files -s >actual &&
(
- echo "100644 $o1 0 git-gui/git-gui.sh"
- echo "100644 $o3 0 git-gui2/git-gui.sh"
+ echo "100644 $o1 0 git-gui/git-gui.sh" &&
+ echo "100644 $o3 0 git-gui2/git-gui.sh" &&
echo "100644 $o2 0 git.c"
) >expected &&
test_cmp expected actual
diff --git a/t/t6036-recursive-corner-cases.sh b/t/t6036-recursive-corner-cases.sh
index b562130..59e52c5 100755
--- a/t/t6036-recursive-corner-cases.sh
+++ b/t/t6036-recursive-corner-cases.sh
@@ -72,7 +72,7 @@ test_expect_success 'merge simple rename+criss-cross with no modifications' '
git rev-parse >actual \
:2:three :3:three &&
git hash-object >>actual \
- three~HEAD three~R2^0
+ three~HEAD three~R2^0 &&
test_cmp expect actual
)
'
@@ -148,7 +148,7 @@ test_expect_success 'merge criss-cross + rename merges with basic modification'
git rev-parse >actual \
:2:three :3:three &&
git hash-object >>actual \
- three~HEAD three~R2^0
+ three~HEAD three~R2^0 &&
test_cmp expect actual
)
'
@@ -228,7 +228,7 @@ test_expect_success 'git detects differently handled merges conflict' '
D:new_a E:new_a &&
git rev-parse >actual \
:2:new_a :3:new_a &&
- test_cmp expect actual
+ test_cmp expect actual &&
git cat-file -p B:new_a >ours &&
git cat-file -p C:new_a >theirs &&
@@ -345,40 +345,97 @@ test_expect_success 'git detects conflict merging criss-cross+modify/delete, rev
)
'
+# SORRY FOR THE SUPER LONG DESCRIPTION, BUT THIS NEXT ONE IS HAIRY
#
# criss-cross + d/f conflict via add/add:
# Commit A: Neither file 'a' nor directory 'a/' exists.
# Commit B: Introduce 'a'
# Commit C: Introduce 'a/file'
-# Commit D: Merge B & C, keeping 'a' and deleting 'a/'
-#
-# Two different later cases:
+# Commit D1: Merge B & C, keeping 'a' and deleting 'a/'
# Commit E1: Merge B & C, deleting 'a' but keeping 'a/file'
-# Commit E2: Merge B & C, deleting 'a' but keeping a slightly modified 'a/file'
#
-# B D
+# B D1 or D2
# o---o
# / \ / \
# A o X ? F
# \ / \ /
# o---o
-# C E1 or E2
+# C E1 or E2 or E3
+#
+# I'll describe D2, E2, & E3 (which are alternatives for D1 & E1) more below...
+#
+# Merging D1 & E1 requires we first create a virtual merge base X from
+# merging A & B in memory. There are several possibilities for the merge-base:
+# 1: Keep both 'a' and 'a/file' (assuming crazy filesystem allowing a tree
+# with a directory and file at same path): results in merge of D1 & E1
+# being clean with both files deleted. Bad (no conflict detected).
+# 2: Keep 'a' but not 'a/file': Merging D1 & E1 is clean and matches E1. Bad.
+# 3: Keep 'a/file' but not 'a': Merging D1 & E1 is clean and matches D1. Bad.
+# 4: Keep neither file: Merging D1 & E1 reports the D/F add/add conflict.
+#
+# So 4 sounds good for this case, but if we were to merge D1 & E3, where E3
+# is defined as:
+# Commit E3: Merge B & C, keeping modified a, and deleting a/
+# then we'd get an add/add conflict for 'a', which seems suboptimal. A little
+# creativity leads us to an alternate choice:
+# 5: Keep 'a' as 'a~$UNIQUE' and a/file; results:
+# Merge D1 & E1: rename/delete conflict for 'a'; a/file silently deleted
+# Merge D1 & E3 is clean, as expected.
#
-# Merging D & E1 requires we first create a virtual merge base X from
-# merging A & B in memory. Now, if X could keep both 'a' and 'a/file' in
-# the index, then the merge of D & E1 could be resolved cleanly with both
-# 'a' and 'a/file' removed. Since git does not currently allow creating
-# such a tree, the best we can do is have X contain both 'a~<unique>' and
-# 'a/file' resulting in the merge of D and E1 having a rename/delete
-# conflict for 'a'. (Although this merge appears to be unsolvable with git
-# currently, git could do a lot better than it currently does with these
-# d/f conflicts, which is the purpose of this test.)
+# So choice 5 at least provides some kind of conflict for the original case,
+# and can merge cleanly as expected with D1 and E3. It also made things just
+# slightly funny for merging D1 and e$, where E4 is defined as:
+# Commit E4: Merge B & C, modifying 'a' and renaming to 'a2', and deleting 'a/'
+# in this case, we'll get a rename/rename(1to2) conflict because a~$UNIQUE
+# gets renamed to 'a' in D1 and to 'a2' in E4. But that's better than having
+# two files (both 'a' and 'a2') sitting around without the user being notified
+# that we could detect they were related and need to be merged. Also, choice
+# 5 makes the handling of 'a/file' seem suboptimal. What if we were to merge
+# D2 and E4, where D2 is:
+# Commit D2: Merge B & C, renaming 'a'->'a2', keeping 'a/file'
+# This would result in a clean merge with 'a2' having three-way merged
+# contents (good), and deleting 'a/' (bad) -- it doesn't detect the
+# conflict in how the different sides treated a/file differently.
+# Continuing down the creative route:
+# 6: Keep 'a' as 'a~$UNIQUE1' and keep 'a/' as 'a~$UNIQUE2/'; results:
+# Merge D1 & E1: rename/delete conflict for 'a' and each path under 'a/'.
+# Merge D1 & E3: clean, as expected.
+# Merge D1 & E4: rename/rename(1to2) conflict on 'a' vs 'a2'.
+# Merge D2 & E4: clean for 'a2', rename/delete for a/file
#
-# Merge of D & E2 has similar issues for path 'a', but should always result
-# in a modify/delete conflict for path 'a/file'.
+# Choice 6 could cause rename detection to take longer (providing more targets
+# that need to be searched). Also, the conflict message for each path under
+# 'a/' might be annoying unless we can detect it at the directory level, print
+# it once, and then suppress it for individual filepaths underneath.
#
-# We run each merge in both directions, to check for directional issues
-# with D/F conflict handling.
+#
+# As of time of writing, git uses choice 5. Directory rename detection and
+# rename detection performance improvements might make choice 6 a desirable
+# improvement. But we can at least document where we fall short for now...
+#
+#
+# Historically, this testcase also used:
+# Commit E2: Merge B & C, deleting 'a' but keeping slightly modified 'a/file'
+# The merge of D1 & E2 is very similar to D1 & E1 -- it has similar issues for
+# path 'a', but should always result in a modify/delete conflict for path
+# 'a/file'. These tests ran the two merges
+# D1 & E1
+# D1 & E2
+# in both directions, to check for directional issues with D/F conflict
+# handling. Later we added
+# D1 & E3
+# D1 & E4
+# D2 & E4
+# for good measure, though we only ran those one way because we had pretty
+# good confidence in merge-recursive's directional handling of D/F issues.
+#
+# Just to summarize all the intermediate merge commits:
+# Commit D1: Merge B & C, keeping a and deleting a/
+# Commit D2: Merge B & C, renaming a->a2, keeping a/file
+# Commit E1: Merge B & C, deleting a but keeping a/file
+# Commit E2: Merge B & C, deleting a but keeping slightly modified a/file
+# Commit E3: Merge B & C, keeping modified a, and deleting a/
+# Commit E4: Merge B & C, modifying 'a' and renaming to 'a2', and deleting 'a/'
#
test_expect_success 'setup differently handled merges of directory/file conflict' '
@@ -395,56 +452,70 @@ test_expect_success 'setup differently handled merges of directory/file conflict
git branch B &&
git checkout -b C &&
mkdir a &&
- echo 10 >a/file &&
+ test_write_lines a b c d e f g >a/file &&
git add a/file &&
test_tick &&
git commit -m C &&
git checkout B &&
- echo 5 >a &&
+ test_write_lines 1 2 3 4 5 6 7 >a &&
git add a &&
test_tick &&
git commit -m B &&
git checkout B^0 &&
- test_must_fail git merge C &&
- git clean -f &&
- rm -rf a/ &&
- echo 5 >a &&
- git add a &&
- test_tick &&
- git commit -m D &&
- git tag D &&
+ git merge -s ours -m D1 C^0 &&
+ git tag D1 &&
+
+ git checkout B^0 &&
+ test_must_fail git merge C^0 &&
+ git clean -fd &&
+ git rm -rf a/ &&
+ git rm a &&
+ git cat-file -p B:a >a2 &&
+ git add a2 &&
+ git commit -m D2 &&
+ git tag D2 &&
git checkout C^0 &&
- test_must_fail git merge B &&
- git clean -f &&
- git rm --cached a &&
- echo 10 >a/file &&
- git add a/file &&
- test_tick &&
- git commit -m E1 &&
+ git merge -s ours -m E1 B^0 &&
git tag E1 &&
git checkout C^0 &&
- test_must_fail git merge B &&
- git clean -f &&
- git rm --cached a &&
- printf "10\n11\n" >a/file &&
+ git merge -s ours -m E2 B^0 &&
+ test_write_lines a b c d e f g h >a/file &&
git add a/file &&
- test_tick &&
- git commit -m E2 &&
- git tag E2
+ git commit --amend -C HEAD &&
+ git tag E2 &&
+
+ git checkout C^0 &&
+ test_must_fail git merge B^0 &&
+ git clean -fd &&
+ git rm -rf a/ &&
+ test_write_lines 1 2 3 4 5 6 7 8 >a &&
+ git add a &&
+ git commit -m E3 &&
+ git tag E3 &&
+
+ git checkout C^0 &&
+ test_must_fail git merge B^0 &&
+ git clean -fd &&
+ git rm -rf a/ &&
+ git rm a &&
+ test_write_lines 1 2 3 4 5 6 7 8 >a2 &&
+ git add a2 &&
+ git commit -m E4 &&
+ git tag E4
)
'
-test_expect_success 'merge of D & E1 fails but has appropriate contents' '
+test_expect_success 'merge of D1 & E1 fails but has appropriate contents' '
test_when_finished "git -C directory-file reset --hard" &&
test_when_finished "git -C directory-file clean -fdqx" &&
(
cd directory-file &&
- git checkout D^0 &&
+ git checkout D1^0 &&
test_must_fail git merge -s recursive E1^0 &&
@@ -463,7 +534,7 @@ test_expect_success 'merge of D & E1 fails but has appropriate contents' '
)
'
-test_expect_success 'merge of E1 & D fails but has appropriate contents' '
+test_expect_success 'merge of E1 & D1 fails but has appropriate contents' '
test_when_finished "git -C directory-file reset --hard" &&
test_when_finished "git -C directory-file clean -fdqx" &&
(
@@ -471,7 +542,7 @@ test_expect_success 'merge of E1 & D fails but has appropriate contents' '
git checkout E1^0 &&
- test_must_fail git merge -s recursive D^0 &&
+ test_must_fail git merge -s recursive D1^0 &&
git ls-files -s >out &&
test_line_count = 2 out &&
@@ -488,13 +559,13 @@ test_expect_success 'merge of E1 & D fails but has appropriate contents' '
)
'
-test_expect_success 'merge of D & E2 fails but has appropriate contents' '
+test_expect_success 'merge of D1 & E2 fails but has appropriate contents' '
test_when_finished "git -C directory-file reset --hard" &&
test_when_finished "git -C directory-file clean -fdqx" &&
(
cd directory-file &&
- git checkout D^0 &&
+ git checkout D1^0 &&
test_must_fail git merge -s recursive E2^0 &&
@@ -506,16 +577,16 @@ test_expect_success 'merge of D & E2 fails but has appropriate contents' '
test_line_count = 2 out &&
git rev-parse >expect \
- B:a E2:a/file c:a/file A:ignore-me &&
+ B:a E2:a/file C:a/file A:ignore-me &&
git rev-parse >actual \
:2:a :3:a/file :1:a/file :0:ignore-me &&
- test_cmp expect actual
+ test_cmp expect actual &&
test_path_is_file a~HEAD
)
'
-test_expect_success 'merge of E2 & D fails but has appropriate contents' '
+test_expect_success 'merge of E2 & D1 fails but has appropriate contents' '
test_when_finished "git -C directory-file reset --hard" &&
test_when_finished "git -C directory-file clean -fdqx" &&
(
@@ -523,7 +594,7 @@ test_expect_success 'merge of E2 & D fails but has appropriate contents' '
git checkout E2^0 &&
- test_must_fail git merge -s recursive D^0 &&
+ test_must_fail git merge -s recursive D1^0 &&
git ls-files -s >out &&
test_line_count = 4 out &&
@@ -533,12 +604,87 @@ test_expect_success 'merge of E2 & D fails but has appropriate contents' '
test_line_count = 2 out &&
git rev-parse >expect \
- B:a E2:a/file c:a/file A:ignore-me &&
+ B:a E2:a/file C:a/file A:ignore-me &&
git rev-parse >actual \
:3:a :2:a/file :1:a/file :0:ignore-me &&
+ test_cmp expect actual &&
+
+ test_path_is_file a~D1^0
+ )
+'
+
+test_expect_success 'merge of D1 & E3 succeeds' '
+ test_when_finished "git -C directory-file reset --hard" &&
+ test_when_finished "git -C directory-file clean -fdqx" &&
+ (
+ cd directory-file &&
+
+ git checkout D1^0 &&
+
+ git merge -s recursive E3^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 2 out &&
+ git ls-files -u >out &&
+ test_line_count = 0 out &&
+ git ls-files -o >out &&
+ test_line_count = 1 out &&
+
+ git rev-parse >expect \
+ A:ignore-me E3:a &&
+ git rev-parse >actual \
+ :0:ignore-me :0:a &&
test_cmp expect actual
+ )
+'
- test_path_is_file a~D^0
+test_expect_success 'merge of D1 & E4 notifies user a and a2 are related' '
+ test_when_finished "git -C directory-file reset --hard" &&
+ test_when_finished "git -C directory-file clean -fdqx" &&
+ (
+ cd directory-file &&
+
+ git checkout D1^0 &&
+
+ test_must_fail git merge -s recursive E4^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 4 out &&
+ git ls-files -u >out &&
+ test_line_count = 3 out &&
+ git ls-files -o >out &&
+ test_line_count = 1 out &&
+
+ git rev-parse >expect \
+ A:ignore-me B:a D1:a E4:a2 &&
+ git rev-parse >actual \
+ :0:ignore-me :1:a~Temporary\ merge\ branch\ 2 :2:a :3:a2 &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_failure 'merge of D2 & E4 merges a2s & reports conflict for a/file' '
+ test_when_finished "git -C directory-file reset --hard" &&
+ test_when_finished "git -C directory-file clean -fdqx" &&
+ (
+ cd directory-file &&
+
+ git checkout D2^0 &&
+
+ test_must_fail git merge -s recursive E4^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 3 out &&
+ git ls-files -u >out &&
+ test_line_count = 1 out &&
+ git ls-files -o >out &&
+ test_line_count = 1 out &&
+
+ git rev-parse >expect \
+ A:ignore-me E4:a2 D2:a/file &&
+ git rev-parse >actual \
+ :0:ignore-me :0:a2 :2:a/file &&
+ test_cmp expect actual
)
'
@@ -805,4 +951,455 @@ test_expect_success 'virtual merge base handles rename/rename(1to2)/add-dest' '
)
'
+#
+# criss-cross with modify/modify on a symlink:
+#
+# B D
+# o---o
+# / \ / \
+# A o X ? F
+# \ / \ /
+# o---o
+# C E
+#
+# Commit A: simple simlink fickle->lagoon
+# Commit B: redirect fickle->disneyland
+# Commit C: redirect fickle->home
+# Commit D: merge B&C, resolving in favor of B
+# Commit E: merge B&C, resolving in favor of C
+#
+# This is an obvious modify/modify conflict for the symlink 'fickle'. Can
+# git detect it?
+
+test_expect_success 'setup symlink modify/modify' '
+ test_create_repo symlink-modify-modify &&
+ (
+ cd symlink-modify-modify &&
+
+ test_ln_s_add lagoon fickle &&
+ git commit -m A &&
+ git tag A &&
+
+ git checkout -b B A &&
+ git rm fickle &&
+ test_ln_s_add disneyland fickle &&
+ git commit -m B &&
+
+ git checkout -b C A &&
+ git rm fickle &&
+ test_ln_s_add home fickle &&
+ git add fickle &&
+ git commit -m C &&
+
+ git checkout -q B^0 &&
+ git merge -s ours -m D C^0 &&
+ git tag D &&
+
+ git checkout -q C^0 &&
+ git merge -s ours -m E B^0 &&
+ git tag E
+ )
+'
+
+test_expect_failure 'check symlink modify/modify' '
+ (
+ cd symlink-modify-modify &&
+
+ git checkout D^0 &&
+
+ test_must_fail git merge -s recursive E^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 3 out &&
+ git ls-files -u >out &&
+ test_line_count = 3 out &&
+ git ls-files -o >out &&
+ test_line_count = 1 out
+ )
+'
+
+#
+# criss-cross with add/add of a symlink:
+#
+# B D
+# o---o
+# / \ / \
+# A o X ? F
+# \ / \ /
+# o---o
+# C E
+#
+# Commit A: No symlink or path exists yet
+# Commit B: set up symlink: fickle->disneyland
+# Commit C: set up symlink: fickle->home
+# Commit D: merge B&C, resolving in favor of B
+# Commit E: merge B&C, resolving in favor of C
+#
+# This is an obvious add/add conflict for the symlink 'fickle'. Can
+# git detect it?
+
+test_expect_success 'setup symlink add/add' '
+ test_create_repo symlink-add-add &&
+ (
+ cd symlink-add-add &&
+
+ touch ignoreme &&
+ git add ignoreme &&
+ git commit -m A &&
+ git tag A &&
+
+ git checkout -b B A &&
+ test_ln_s_add disneyland fickle &&
+ git commit -m B &&
+
+ git checkout -b C A &&
+ test_ln_s_add home fickle &&
+ git add fickle &&
+ git commit -m C &&
+
+ git checkout -q B^0 &&
+ git merge -s ours -m D C^0 &&
+ git tag D &&
+
+ git checkout -q C^0 &&
+ git merge -s ours -m E B^0 &&
+ git tag E
+ )
+'
+
+test_expect_failure 'check symlink add/add' '
+ (
+ cd symlink-add-add &&
+
+ git checkout D^0 &&
+
+ test_must_fail git merge -s recursive E^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 2 out &&
+ git ls-files -u >out &&
+ test_line_count = 2 out &&
+ git ls-files -o >out &&
+ test_line_count = 1 out
+ )
+'
+
+#
+# criss-cross with modify/modify on a submodule:
+#
+# B D
+# o---o
+# / \ / \
+# A o X ? F
+# \ / \ /
+# o---o
+# C E
+#
+# Commit A: simple submodule repo
+# Commit B: update repo
+# Commit C: update repo differently
+# Commit D: merge B&C, resolving in favor of B
+# Commit E: merge B&C, resolving in favor of C
+#
+# This is an obvious modify/modify conflict for the submodule 'repo'. Can
+# git detect it?
+
+test_expect_success 'setup submodule modify/modify' '
+ test_create_repo submodule-modify-modify &&
+ (
+ cd submodule-modify-modify &&
+
+ test_create_repo submod &&
+ (
+ cd submod &&
+ touch file-A &&
+ git add file-A &&
+ git commit -m A &&
+ git tag A &&
+
+ git checkout -b B A &&
+ touch file-B &&
+ git add file-B &&
+ git commit -m B &&
+ git tag B &&
+
+ git checkout -b C A &&
+ touch file-C &&
+ git add file-C &&
+ git commit -m C &&
+ git tag C
+ ) &&
+
+ git -C submod reset --hard A &&
+ git add submod &&
+ git commit -m A &&
+ git tag A &&
+
+ git checkout -b B A &&
+ git -C submod reset --hard B &&
+ git add submod &&
+ git commit -m B &&
+
+ git checkout -b C A &&
+ git -C submod reset --hard C &&
+ git add submod &&
+ git commit -m C &&
+
+ git checkout -q B^0 &&
+ git merge -s ours -m D C^0 &&
+ git tag D &&
+
+ git checkout -q C^0 &&
+ git merge -s ours -m E B^0 &&
+ git tag E
+ )
+'
+
+test_expect_failure 'check submodule modify/modify' '
+ (
+ cd submodule-modify-modify &&
+
+ git checkout D^0 &&
+
+ test_must_fail git merge -s recursive E^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 3 out &&
+ git ls-files -u >out &&
+ test_line_count = 3 out &&
+ git ls-files -o >out &&
+ test_line_count = 1 out
+ )
+'
+
+#
+# criss-cross with add/add on a submodule:
+#
+# B D
+# o---o
+# / \ / \
+# A o X ? F
+# \ / \ /
+# o---o
+# C E
+#
+# Commit A: nothing of note
+# Commit B: introduce submodule repo
+# Commit C: introduce submodule repo at different commit
+# Commit D: merge B&C, resolving in favor of B
+# Commit E: merge B&C, resolving in favor of C
+#
+# This is an obvious add/add conflict for the submodule 'repo'. Can
+# git detect it?
+
+test_expect_success 'setup submodule add/add' '
+ test_create_repo submodule-add-add &&
+ (
+ cd submodule-add-add &&
+
+ test_create_repo submod &&
+ (
+ cd submod &&
+ touch file-A &&
+ git add file-A &&
+ git commit -m A &&
+ git tag A &&
+
+ git checkout -b B A &&
+ touch file-B &&
+ git add file-B &&
+ git commit -m B &&
+ git tag B &&
+
+ git checkout -b C A &&
+ touch file-C &&
+ git add file-C &&
+ git commit -m C &&
+ git tag C
+ ) &&
+
+ touch irrelevant-file &&
+ git add irrelevant-file &&
+ git commit -m A &&
+ git tag A &&
+
+ git checkout -b B A &&
+ git -C submod reset --hard B &&
+ git add submod &&
+ git commit -m B &&
+
+ git checkout -b C A &&
+ git -C submod reset --hard C &&
+ git add submod &&
+ git commit -m C &&
+
+ git checkout -q B^0 &&
+ git merge -s ours -m D C^0 &&
+ git tag D &&
+
+ git checkout -q C^0 &&
+ git merge -s ours -m E B^0 &&
+ git tag E
+ )
+'
+
+test_expect_failure 'check submodule add/add' '
+ (
+ cd submodule-add-add &&
+
+ git checkout D^0 &&
+
+ test_must_fail git merge -s recursive E^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 3 out &&
+ git ls-files -u >out &&
+ test_line_count = 2 out &&
+ git ls-files -o >out &&
+ test_line_count = 1 out
+ )
+'
+
+#
+# criss-cross with conflicting entry types:
+#
+# B D
+# o---o
+# / \ / \
+# A o X ? F
+# \ / \ /
+# o---o
+# C E
+#
+# Commit A: nothing of note
+# Commit B: introduce submodule 'path'
+# Commit C: introduce symlink 'path'
+# Commit D: merge B&C, resolving in favor of B
+# Commit E: merge B&C, resolving in favor of C
+#
+# This is an obvious add/add conflict for 'path'. Can git detect it?
+
+test_expect_success 'setup conflicting entry types (submodule vs symlink)' '
+ test_create_repo submodule-symlink-add-add &&
+ (
+ cd submodule-symlink-add-add &&
+
+ test_create_repo path &&
+ (
+ cd path &&
+ touch file-B &&
+ git add file-B &&
+ git commit -m B &&
+ git tag B
+ ) &&
+
+ touch irrelevant-file &&
+ git add irrelevant-file &&
+ git commit -m A &&
+ git tag A &&
+
+ git checkout -b B A &&
+ git -C path reset --hard B &&
+ git add path &&
+ git commit -m B &&
+
+ git checkout -b C A &&
+ rm -rf path/ &&
+ test_ln_s_add irrelevant-file path &&
+ git commit -m C &&
+
+ git checkout -q B^0 &&
+ git merge -s ours -m D C^0 &&
+ git tag D &&
+
+ git checkout -q C^0 &&
+ git merge -s ours -m E B^0 &&
+ git tag E
+ )
+'
+
+test_expect_failure 'check conflicting entry types (submodule vs symlink)' '
+ (
+ cd submodule-symlink-add-add &&
+
+ git checkout D^0 &&
+
+ test_must_fail git merge -s recursive E^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 3 out &&
+ git ls-files -u >out &&
+ test_line_count = 2 out &&
+ git ls-files -o >out &&
+ test_line_count = 1 out
+ )
+'
+
+#
+# criss-cross with regular files that have conflicting modes:
+#
+# B D
+# o---o
+# / \ / \
+# A o X ? F
+# \ / \ /
+# o---o
+# C E
+#
+# Commit A: nothing of note
+# Commit B: introduce file source_me.bash, not executable
+# Commit C: introduce file source_me.bash, executable
+# Commit D: merge B&C, resolving in favor of B
+# Commit E: merge B&C, resolving in favor of C
+#
+# This is an obvious add/add mode conflict. Can git detect it?
+
+test_expect_success 'setup conflicting modes for regular file' '
+ test_create_repo regular-file-mode-conflict &&
+ (
+ cd regular-file-mode-conflict &&
+
+ touch irrelevant-file &&
+ git add irrelevant-file &&
+ git commit -m A &&
+ git tag A &&
+
+ git checkout -b B A &&
+ echo "command_to_run" >source_me.bash &&
+ git add source_me.bash &&
+ git commit -m B &&
+
+ git checkout -b C A &&
+ echo "command_to_run" >source_me.bash &&
+ git add source_me.bash &&
+ test_chmod +x source_me.bash &&
+ git commit -m C &&
+
+ git checkout -q B^0 &&
+ git merge -s ours -m D C^0 &&
+ git tag D &&
+
+ git checkout -q C^0 &&
+ git merge -s ours -m E B^0 &&
+ git tag E
+ )
+'
+
+test_expect_failure 'check conflicting modes for regular file' '
+ (
+ cd regular-file-mode-conflict &&
+
+ git checkout D^0 &&
+
+ test_must_fail git merge -s recursive E^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 3 out &&
+ git ls-files -u >out &&
+ test_line_count = 2 out &&
+ git ls-files -o >out &&
+ test_line_count = 1 out
+ )
+'
+
test_done
diff --git a/t/t6042-merge-rename-corner-cases.sh b/t/t6042-merge-rename-corner-cases.sh
index 1cbd946..b97aca7 100755
--- a/t/t6042-merge-rename-corner-cases.sh
+++ b/t/t6042-merge-rename-corner-cases.sh
@@ -324,7 +324,6 @@ test_expect_success 'rename/directory conflict + content merge conflict' '
cd rename-directory-1 &&
git reset --hard &&
- git reset --hard &&
git clean -fdqx &&
git checkout left-conflict^0 &&
@@ -352,7 +351,7 @@ test_expect_success 'rename/directory conflict + content merge conflict' '
base:file left-conflict:newfile right:file &&
git rev-parse >actual \
:1:newfile :2:newfile :3:newfile &&
- test_cmp expect actual
+ test_cmp expect actual &&
test_path_is_file newfile/realfile &&
test_path_is_file newfile~HEAD
@@ -580,7 +579,7 @@ test_expect_failure 'detect conflict with rename/rename(1to2)/add-source merge'
C:a A:a B:b C:C &&
git rev-parse >actual \
:3:a :1:a :2:b :3:c &&
- test_cmp expect actual
+ test_cmp expect actual &&
test_path_is_file a &&
test_path_is_file b &&
@@ -680,17 +679,262 @@ test_expect_success 'rename/rename/add-dest merge still knows about conflicting
A:a C:b B:b C:c B:c &&
git rev-parse >actual \
:1:a :2:b :3:b :2:c :3:c &&
- test_cmp expect actual
+ test_cmp expect actual &&
git rev-parse >expect \
C:c B:c C:b B:b &&
git hash-object >actual \
c~HEAD c~B\^0 b~HEAD b~B\^0 &&
- test_cmp expect actual
+ test_cmp expect actual &&
test_path_is_missing b &&
test_path_is_missing c
)
'
+# Testcase rad, rename/add/delete
+# Commit O: foo
+# Commit A: rm foo, add different bar
+# Commit B: rename foo->bar
+# Expected: CONFLICT (rename/add/delete), two-way merged bar
+
+test_expect_success 'rad-setup: rename/add/delete conflict' '
+ test_create_repo rad &&
+ (
+ cd rad &&
+ echo "original file" >foo &&
+ git add foo &&
+ git commit -m "original" &&
+
+ git branch O &&
+ git branch A &&
+ git branch B &&
+
+ git checkout A &&
+ git rm foo &&
+ echo "different file" >bar &&
+ git add bar &&
+ git commit -m "Remove foo, add bar" &&
+
+ git checkout B &&
+ git mv foo bar &&
+ git commit -m "rename foo to bar"
+ )
+'
+
+test_expect_failure 'rad-check: rename/add/delete conflict' '
+ (
+ cd rad &&
+
+ git checkout B^0 &&
+ test_must_fail git merge -s recursive A^0 >out 2>err &&
+
+ # Not sure whether the output should contain just one
+ # "CONFLICT (rename/add/delete)" line, or if it should break
+ # it into a pair of "CONFLICT (rename/delete)" and
+ # "CONFLICT (rename/add)"; allow for either.
+ test_i18ngrep "CONFLICT (rename.*add)" out &&
+ test_i18ngrep "CONFLICT (rename.*delete)" out &&
+ test_must_be_empty err &&
+
+ git ls-files -s >file_count &&
+ test_line_count = 2 file_count &&
+ git ls-files -u >file_count &&
+ test_line_count = 2 file_count &&
+ git ls-files -o >file_count &&
+ test_line_count = 2 file_count &&
+
+ git rev-parse >actual \
+ :2:bar :3:bar &&
+ git rev-parse >expect \
+ B:bar A:bar &&
+
+ test_cmp file_is_missing foo &&
+ # bar should have two-way merged contents of the different
+ # versions of bar; check that content from both sides is
+ # present.
+ grep original bar &&
+ grep different bar
+ )
+'
+
+# Testcase rrdd, rename/rename(2to1)/delete/delete
+# Commit O: foo, bar
+# Commit A: rename foo->baz, rm bar
+# Commit B: rename bar->baz, rm foo
+# Expected: CONFLICT (rename/rename/delete/delete), two-way merged baz
+
+test_expect_success 'rrdd-setup: rename/rename(2to1)/delete/delete conflict' '
+ test_create_repo rrdd &&
+ (
+ cd rrdd &&
+ echo foo >foo &&
+ echo bar >bar &&
+ git add foo bar &&
+ git commit -m O &&
+
+ git branch O &&
+ git branch A &&
+ git branch B &&
+
+ git checkout A &&
+ git mv foo baz &&
+ git rm bar &&
+ git commit -m "Rename foo, remove bar" &&
+
+ git checkout B &&
+ git mv bar baz &&
+ git rm foo &&
+ git commit -m "Rename bar, remove foo"
+ )
+'
+
+test_expect_failure 'rrdd-check: rename/rename(2to1)/delete/delete conflict' '
+ (
+ cd rrdd &&
+
+ git checkout A^0 &&
+ test_must_fail git merge -s recursive B^0 >out 2>err &&
+
+ # Not sure whether the output should contain just one
+ # "CONFLICT (rename/rename/delete/delete)" line, or if it
+ # should break it into three: "CONFLICT (rename/rename)" and
+ # two "CONFLICT (rename/delete)" lines; allow for either.
+ test_i18ngrep "CONFLICT (rename/rename)" out &&
+ test_i18ngrep "CONFLICT (rename.*delete)" out &&
+ test_must_be_empty err &&
+
+ git ls-files -s >file_count &&
+ test_line_count = 2 file_count &&
+ git ls-files -u >file_count &&
+ test_line_count = 2 file_count &&
+ git ls-files -o >file_count &&
+ test_line_count = 2 file_count &&
+
+ git rev-parse >actual \
+ :2:baz :3:baz &&
+ git rev-parse >expect \
+ O:foo O:bar &&
+
+ test_cmp file_is_missing foo &&
+ test_cmp file_is_missing bar &&
+ # baz should have two-way merged contents of the original
+ # contents of foo and bar; check that content from both sides
+ # is present.
+ grep foo baz &&
+ grep bar baz
+ )
+'
+
+# Testcase mod6, chains of rename/rename(1to2) and rename/rename(2to1)
+# Commit O: one, three, five
+# Commit A: one->two, three->four, five->six
+# Commit B: one->six, three->two, five->four
+# Expected: six CONFLICT(rename/rename) messages, each path in two of the
+# multi-way merged contents found in two, four, six
+
+test_expect_success 'mod6-setup: chains of rename/rename(1to2) and rename/rename(2to1)' '
+ test_create_repo mod6 &&
+ (
+ cd mod6 &&
+ test_seq 11 19 >one &&
+ test_seq 31 39 >three &&
+ test_seq 51 59 >five &&
+ git add . &&
+ test_tick &&
+ git commit -m "O" &&
+
+ git branch O &&
+ git branch A &&
+ git branch B &&
+
+ git checkout A &&
+ test_seq 10 19 >one &&
+ echo 40 >>three &&
+ git add one three &&
+ git mv one two &&
+ git mv three four &&
+ git mv five six &&
+ test_tick &&
+ git commit -m "A" &&
+
+ git checkout B &&
+ echo 20 >>one &&
+ echo forty >>three &&
+ echo 60 >>five &&
+ git add one three five &&
+ git mv one six &&
+ git mv three two &&
+ git mv five four &&
+ test_tick &&
+ git commit -m "B"
+ )
+'
+
+test_expect_failure 'mod6-check: chains of rename/rename(1to2) and rename/rename(2to1)' '
+ (
+ cd mod6 &&
+
+ git checkout A^0 &&
+
+ test_must_fail git merge -s recursive B^0 >out 2>err &&
+
+ test_i18ngrep "CONFLICT (rename/rename)" out &&
+ test_must_be_empty err &&
+
+ git ls-files -s >file_count &&
+ test_line_count = 6 file_count &&
+ git ls-files -u >file_count &&
+ test_line_count = 6 file_count &&
+ git ls-files -o >file_count &&
+ test_line_count = 3 file_count &&
+
+ test_seq 10 20 >merged-one &&
+ test_seq 51 60 >merged-five &&
+ # Determine what the merge of three would give us.
+ test_seq 30 40 >three-side-A &&
+ test_seq 31 39 >three-side-B &&
+ echo forty >three-side-B &&
+ >empty &&
+ test_must_fail git merge-file \
+ -L "HEAD" \
+ -L "" \
+ -L "B^0" \
+ three-side-A empty three-side-B &&
+ sed -e "s/^\([<=>]\)/\1\1\1/" three-side-A >merged-three &&
+
+ # Verify the index is as expected
+ git rev-parse >actual \
+ :2:two :3:two \
+ :2:four :3:four \
+ :2:six :3:six &&
+ git hash-object >expect \
+ merged-one merged-three \
+ merged-three merged-five \
+ merged-five merged-one &&
+ test_cmp expect actual &&
+
+ git cat-file -p :2:two >expect &&
+ git cat-file -p :3:two >other &&
+ test_must_fail git merge-file \
+ -L "HEAD" -L "" -L "B^0" \
+ expect empty other &&
+ test_cmp expect two &&
+
+ git cat-file -p :2:four >expect &&
+ git cat-file -p :3:four >other &&
+ test_must_fail git merge-file \
+ -L "HEAD" -L "" -L "B^0" \
+ expect empty other &&
+ test_cmp expect four &&
+
+ git cat-file -p :2:six >expect &&
+ git cat-file -p :3:six >other &&
+ test_must_fail git merge-file \
+ -L "HEAD" -L "" -L "B^0" \
+ expect empty other &&
+ test_cmp expect six
+ )
+'
+
test_done
diff --git a/t/t6043-merge-rename-directories.sh b/t/t6043-merge-rename-directories.sh
index 2e28f29..4a71f17 100755
--- a/t/t6043-merge-rename-directories.sh
+++ b/t/t6043-merge-rename-directories.sh
@@ -3583,7 +3583,7 @@ test_expect_success '11d-check: Avoid losing not-uptodate with rename + D/F conf
grep -q stuff z/c &&
test_seq 1 10 >expected &&
echo stuff >>expected &&
- test_cmp expected z/c
+ test_cmp expected z/c &&
git ls-files -s >out &&
test_line_count = 4 out &&
diff --git a/t/t6044-merge-unrelated-index-changes.sh b/t/t6044-merge-unrelated-index-changes.sh
index 23b86fb..5e3779e 100755
--- a/t/t6044-merge-unrelated-index-changes.sh
+++ b/t/t6044-merge-unrelated-index-changes.sh
@@ -82,7 +82,8 @@ test_expect_success 'ff update, important file modified' '
touch subdir/e &&
git add subdir/e &&
- test_must_fail git merge E^0
+ test_must_fail git merge E^0 &&
+ test_path_is_missing .git/MERGE_HEAD
'
test_expect_success 'resolve, trivial' '
@@ -91,7 +92,8 @@ test_expect_success 'resolve, trivial' '
touch random_file && git add random_file &&
- test_must_fail git merge -s resolve C^0
+ test_must_fail git merge -s resolve C^0 &&
+ test_path_is_missing .git/MERGE_HEAD
'
test_expect_success 'resolve, non-trivial' '
@@ -100,7 +102,8 @@ test_expect_success 'resolve, non-trivial' '
touch random_file && git add random_file &&
- test_must_fail git merge -s resolve D^0
+ test_must_fail git merge -s resolve D^0 &&
+ test_path_is_missing .git/MERGE_HEAD
'
test_expect_success 'recursive' '
@@ -109,7 +112,8 @@ test_expect_success 'recursive' '
touch random_file && git add random_file &&
- test_must_fail git merge -s recursive C^0
+ test_must_fail git merge -s recursive C^0 &&
+ test_path_is_missing .git/MERGE_HEAD
'
test_expect_success 'recursive, when merge branch matches merge base' '
@@ -118,7 +122,45 @@ test_expect_success 'recursive, when merge branch matches merge base' '
touch random_file && git add random_file &&
- test_must_fail git merge -s recursive F^0
+ test_must_fail git merge -s recursive F^0 &&
+ test_path_is_missing .git/MERGE_HEAD
+'
+
+test_expect_success 'merge-recursive, when index==head but head!=HEAD' '
+ git reset --hard &&
+ git checkout C^0 &&
+
+ # Make index match B
+ git diff C B -- | git apply --cached &&
+ # Merge B & F, with B as "head"
+ git merge-recursive A -- B F > out &&
+ test_i18ngrep "Already up to date" out
+'
+
+test_expect_success 'recursive, when file has staged changes not matching HEAD nor what a merge would give' '
+ git reset --hard &&
+ git checkout B^0 &&
+
+ mkdir subdir &&
+ test_seq 1 10 >subdir/a &&
+ git add subdir/a &&
+
+ # We have staged changes; merge should error out
+ test_must_fail git merge -s recursive E^0 2>err &&
+ test_i18ngrep "changes to the following files would be overwritten" err
+'
+
+test_expect_success 'recursive, when file has staged changes matching what a merge would give' '
+ git reset --hard &&
+ git checkout B^0 &&
+
+ mkdir subdir &&
+ test_seq 1 11 >subdir/a &&
+ git add subdir/a &&
+
+ # We have staged changes; merge should error out
+ test_must_fail git merge -s recursive E^0 2>err &&
+ test_i18ngrep "changes to the following files would be overwritten" err
'
test_expect_success 'octopus, unrelated file touched' '
@@ -127,7 +169,8 @@ test_expect_success 'octopus, unrelated file touched' '
touch random_file && git add random_file &&
- test_must_fail git merge C^0 D^0
+ test_must_fail git merge C^0 D^0 &&
+ test_path_is_missing .git/MERGE_HEAD
'
test_expect_success 'octopus, related file removed' '
@@ -136,7 +179,8 @@ test_expect_success 'octopus, related file removed' '
git rm b &&
- test_must_fail git merge C^0 D^0
+ test_must_fail git merge C^0 D^0 &&
+ test_path_is_missing .git/MERGE_HEAD
'
test_expect_success 'octopus, related file modified' '
@@ -145,7 +189,8 @@ test_expect_success 'octopus, related file modified' '
echo 12 >>a && git add a &&
- test_must_fail git merge C^0 D^0
+ test_must_fail git merge C^0 D^0 &&
+ test_path_is_missing .git/MERGE_HEAD
'
test_expect_success 'ours' '
@@ -154,7 +199,8 @@ test_expect_success 'ours' '
touch random_file && git add random_file &&
- test_must_fail git merge -s ours C^0
+ test_must_fail git merge -s ours C^0 &&
+ test_path_is_missing .git/MERGE_HEAD
'
test_expect_success 'subtree' '
@@ -163,7 +209,8 @@ test_expect_success 'subtree' '
touch random_file && git add random_file &&
- test_must_fail git merge -s subtree E^0
+ test_must_fail git merge -s subtree E^0 &&
+ test_path_is_missing .git/MERGE_HEAD
'
test_done
diff --git a/t/t6050-replace.sh b/t/t6050-replace.sh
index aa3e249..86374a9 100755
--- a/t/t6050-replace.sh
+++ b/t/t6050-replace.sh
@@ -113,6 +113,12 @@ test_expect_success 'test GIT_NO_REPLACE_OBJECTS env variable' '
GIT_NO_REPLACE_OBJECTS=1 git show $HASH2 | grep "A U Thor"
'
+test_expect_success 'test core.usereplacerefs config option' '
+ test_config core.usereplacerefs false &&
+ git cat-file commit $HASH2 | grep "author A U Thor" &&
+ git show $HASH2 | grep "A U Thor"
+'
+
cat >tag.sig <<EOF
object $HASH2
type commit
diff --git a/t/t7001-mv.sh b/t/t7001-mv.sh
index cc3fd2b..9e59e5a 100755
--- a/t/t7001-mv.sh
+++ b/t/t7001-mv.sh
@@ -509,7 +509,7 @@ test_expect_success 'moving nested submodules' '
touch nested_level1 &&
git init &&
git add . &&
- git commit -m "nested level 1"
+ git commit -m "nested level 1" &&
git submodule add ../sub_nested_nested &&
git commit -m "add nested level 2"
) &&
diff --git a/t/t7004-tag.sh b/t/t7004-tag.sh
index 93a6694..465eb4e 100755
--- a/t/t7004-tag.sh
+++ b/t/t7004-tag.sh
@@ -1352,6 +1352,19 @@ test_expect_success GPG \
'test_config gpg.program echo &&
test_must_fail git tag -s -m tail tag-gpg-failure'
+# try to sign with bad user.signingkey
+test_expect_success GPGSM \
+ 'git tag -s fails if gpgsm is misconfigured (bad key)' \
+ 'test_config user.signingkey BobTheMouse &&
+ test_config gpg.format x509 &&
+ test_must_fail git tag -s -m tail tag-gpg-failure'
+
+# try to produce invalid signature
+test_expect_success GPGSM \
+ 'git tag -s fails if gpgsm is misconfigured (bad signature format)' \
+ 'test_config gpg.x509.program echo &&
+ test_config gpg.format x509 &&
+ test_must_fail git tag -s -m tail tag-gpg-failure'
# try to verify without gpg:
diff --git a/t/t7030-verify-tag.sh b/t/t7030-verify-tag.sh
index 1f06871..041e319 100755
--- a/t/t7030-verify-tag.sh
+++ b/t/t7030-verify-tag.sh
@@ -41,6 +41,13 @@ test_expect_success GPG 'create signed tags' '
git tag -uB7227189 -m eighth eighth-signed-alt
'
+test_expect_success GPGSM 'create signed tags x509 ' '
+ test_config gpg.format x509 &&
+ test_config user.signingkey $GIT_COMMITTER_EMAIL &&
+ echo 9 >file && test_tick && git commit -a -m "nineth gpgsm-signed" &&
+ git tag -s -m nineth nineth-signed-x509
+'
+
test_expect_success GPG 'verify and show signatures' '
(
for tag in initial second merge fourth-signed sixth-signed seventh-signed
@@ -72,6 +79,13 @@ test_expect_success GPG 'verify and show signatures' '
)
'
+test_expect_success GPGSM 'verify and show signatures x509' '
+ git verify-tag nineth-signed-x509 2>actual &&
+ grep "Good signature from" actual &&
+ ! grep "BAD signature from" actual &&
+ echo nineth-signed-x509 OK
+'
+
test_expect_success GPG 'detect fudged signature' '
git cat-file tag seventh-signed >raw &&
sed -e "/^tag / s/seventh/7th forged/" raw >forged1 &&
@@ -112,6 +126,13 @@ test_expect_success GPG 'verify signatures with --raw' '
)
'
+test_expect_success GPGSM 'verify signatures with --raw x509' '
+ git verify-tag --raw nineth-signed-x509 2>actual &&
+ grep "GOODSIG" actual &&
+ ! grep "BADSIG" actual &&
+ echo nineth-signed-x509 OK
+'
+
test_expect_success GPG 'verify multiple tags' '
tags="fourth-signed sixth-signed seventh-signed" &&
for i in $tags
@@ -125,6 +146,19 @@ test_expect_success GPG 'verify multiple tags' '
test_cmp expect.stderr actual.stderr
'
+test_expect_success GPGSM 'verify multiple tags x509' '
+ tags="seventh-signed nineth-signed-x509" &&
+ for i in $tags
+ do
+ git verify-tag -v --raw $i || return 1
+ done >expect.stdout 2>expect.stderr.1 &&
+ grep "^.GNUPG:." <expect.stderr.1 >expect.stderr &&
+ git verify-tag -v --raw $tags >actual.stdout 2>actual.stderr.1 &&
+ grep "^.GNUPG:." <actual.stderr.1 >actual.stderr &&
+ test_cmp expect.stdout actual.stdout &&
+ test_cmp expect.stderr actual.stderr
+'
+
test_expect_success GPG 'verifying tag with --format' '
cat >expect <<-\EOF &&
tagname : fourth-signed
diff --git a/t/t7063-status-untracked-cache.sh b/t/t7063-status-untracked-cache.sh
index 6d8256a..2da57fc 100755
--- a/t/t7063-status-untracked-cache.sh
+++ b/t/t7063-status-untracked-cache.sh
@@ -665,7 +665,7 @@ test_expect_success 'test ident field is working' '
mkdir ../other_worktree &&
cp -R done dthree dtwo four three ../other_worktree &&
GIT_WORK_TREE=../other_worktree git status 2>../err &&
- echo "warning: Untracked cache is disabled on this system or location." >../expect &&
+ echo "warning: untracked cache is disabled on this system or location" >../expect &&
test_i18ncmp ../expect ../err
'
diff --git a/t/t7105-reset-patch.sh b/t/t7105-reset-patch.sh
index 98b7d7b..bd10a96 100755
--- a/t/t7105-reset-patch.sh
+++ b/t/t7105-reset-patch.sh
@@ -19,20 +19,20 @@ test_expect_success PERL 'setup' '
test_expect_success PERL 'saying "n" does nothing' '
set_and_save_state dir/foo work work &&
- (echo n; echo n) | git reset -p &&
+ test_write_lines n n | git reset -p &&
verify_saved_state dir/foo &&
verify_saved_state bar
'
test_expect_success PERL 'git reset -p' '
- (echo n; echo y) | git reset -p >output &&
+ test_write_lines n y | git reset -p >output &&
verify_state dir/foo work head &&
verify_saved_state bar &&
test_i18ngrep "Unstage" output
'
test_expect_success PERL 'git reset -p HEAD^' '
- (echo n; echo y) | git reset -p HEAD^ >output &&
+ test_write_lines n y | git reset -p HEAD^ >output &&
verify_state dir/foo work parent &&
verify_saved_state bar &&
test_i18ngrep "Apply" output
@@ -45,20 +45,20 @@ test_expect_success PERL 'git reset -p HEAD^' '
test_expect_success PERL 'git reset -p dir' '
set_state dir/foo work work &&
- (echo y; echo n) | git reset -p dir &&
+ test_write_lines y n | git reset -p dir &&
verify_state dir/foo work head &&
verify_saved_state bar
'
test_expect_success PERL 'git reset -p -- foo (inside dir)' '
set_state dir/foo work work &&
- (echo y; echo n) | (cd dir && git reset -p -- foo) &&
+ test_write_lines y n | (cd dir && git reset -p -- foo) &&
verify_state dir/foo work head &&
verify_saved_state bar
'
test_expect_success PERL 'git reset -p HEAD^ -- dir' '
- (echo y; echo n) | git reset -p HEAD^ -- dir &&
+ test_write_lines y n | git reset -p HEAD^ -- dir &&
verify_state dir/foo work parent &&
verify_saved_state bar
'
diff --git a/t/t7201-co.sh b/t/t7201-co.sh
index ab9da61..94cb039 100755
--- a/t/t7201-co.sh
+++ b/t/t7201-co.sh
@@ -528,10 +528,10 @@ test_expect_success 'checkout with --merge' '
cat sample >filf &&
git checkout -m -- fild file filf &&
(
- echo "<<<<<<< ours"
- echo ourside
- echo "======="
- echo theirside
+ echo "<<<<<<< ours" &&
+ echo ourside &&
+ echo "=======" &&
+ echo theirside &&
echo ">>>>>>> theirs"
) >merged &&
test_cmp expect fild &&
@@ -549,12 +549,12 @@ test_expect_success 'checkout with --merge, in diff3 -m style' '
cat sample >filf &&
git checkout -m -- fild file filf &&
(
- echo "<<<<<<< ours"
- echo ourside
- echo "||||||| base"
- echo original
- echo "======="
- echo theirside
+ echo "<<<<<<< ours" &&
+ echo ourside &&
+ echo "||||||| base" &&
+ echo original &&
+ echo "=======" &&
+ echo theirside &&
echo ">>>>>>> theirs"
) >merged &&
test_cmp expect fild &&
@@ -572,10 +572,10 @@ test_expect_success 'checkout --conflict=merge, overriding config' '
cat sample >filf &&
git checkout --conflict=merge -- fild file filf &&
(
- echo "<<<<<<< ours"
- echo ourside
- echo "======="
- echo theirside
+ echo "<<<<<<< ours" &&
+ echo ourside &&
+ echo "=======" &&
+ echo theirside &&
echo ">>>>>>> theirs"
) >merged &&
test_cmp expect fild &&
@@ -593,12 +593,12 @@ test_expect_success 'checkout --conflict=diff3' '
cat sample >filf &&
git checkout --conflict=diff3 -- fild file filf &&
(
- echo "<<<<<<< ours"
- echo ourside
- echo "||||||| base"
- echo original
- echo "======="
- echo theirside
+ echo "<<<<<<< ours" &&
+ echo ourside &&
+ echo "||||||| base" &&
+ echo original &&
+ echo "=======" &&
+ echo theirside &&
echo ">>>>>>> theirs"
) >merged &&
test_cmp expect fild &&
@@ -673,7 +673,6 @@ test_expect_success 'custom merge driver with checkout -m' '
do
grep $t arm || exit 1
done
- exit 0
) &&
mv arm expect &&
diff --git a/t/t7301-clean-interactive.sh b/t/t7301-clean-interactive.sh
index 1bf9789..a07e8b8 100755
--- a/t/t7301-clean-interactive.sh
+++ b/t/t7301-clean-interactive.sh
@@ -107,7 +107,7 @@ test_expect_success 'git clean -id (filter all)' '
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo f; echo "*"; echo; echo c) | \
+ test_write_lines f "*" "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
@@ -129,7 +129,7 @@ test_expect_success 'git clean -id (filter patterns)' '
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo f; echo "part3.* *.out"; echo; echo c) | \
+ test_write_lines f "part3.* *.out" "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
@@ -151,7 +151,7 @@ test_expect_success 'git clean -id (filter patterns 2)' '
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo f; echo "* !*.out"; echo; echo c) | \
+ test_write_lines f "* !*.out" "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
@@ -173,7 +173,7 @@ test_expect_success 'git clean -id (select - all)' '
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo s; echo "*"; echo; echo c) | \
+ test_write_lines s "*" "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
@@ -195,7 +195,7 @@ test_expect_success 'git clean -id (select - none)' '
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo s; echo; echo c) | \
+ test_write_lines s "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
@@ -217,7 +217,7 @@ test_expect_success 'git clean -id (select - number)' '
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo s; echo 3; echo; echo c) | \
+ test_write_lines s 3 "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
@@ -239,7 +239,7 @@ test_expect_success 'git clean -id (select - number 2)' '
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo s; echo 2 3; echo 5; echo; echo c) | \
+ test_write_lines s "2 3" 5 "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
@@ -261,7 +261,7 @@ test_expect_success 'git clean -id (select - number 3)' '
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo s; echo 3,4 5; echo; echo c) | \
+ test_write_lines s "3,4 5" "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
@@ -282,7 +282,7 @@ test_expect_success 'git clean -id (select - filenames)' '
mkdir -p build docs &&
touch a.out foo.txt bar.txt baz.txt &&
- (echo s; echo a.out fo ba bar; echo; echo c) | \
+ test_write_lines s "a.out fo ba bar" "" c |
git clean -id &&
test -f Makefile &&
test ! -f a.out &&
@@ -298,7 +298,7 @@ test_expect_success 'git clean -id (select - range)' '
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo s; echo 1,3-4; echo 2; echo; echo c) | \
+ test_write_lines s "1,3-4" 2 "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
@@ -320,7 +320,7 @@ test_expect_success 'git clean -id (select - range 2)' '
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo s; echo 4- 1; echo; echo c) | \
+ test_write_lines s "4- 1" "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
@@ -342,7 +342,7 @@ test_expect_success 'git clean -id (inverse select)' '
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo s; echo "*"; echo -5- 1 -2; echo; echo c) | \
+ test_write_lines s "*" "-5- 1 -2" "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
@@ -364,7 +364,7 @@ test_expect_success 'git clean -id (ask)' '
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo a; echo Y; echo y; echo no; echo yes; echo bad; echo) | \
+ test_write_lines a Y y no yes bad "" |
git clean -id &&
test -f Makefile &&
test -f README &&
@@ -386,7 +386,7 @@ test_expect_success 'git clean -id (ask - Ctrl+D)' '
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo a; echo Y; echo no; echo yes; echo "\04") | \
+ test_write_lines a Y no yes "\04" |
git clean -id &&
test -f Makefile &&
test -f README &&
@@ -408,8 +408,8 @@ test_expect_success 'git clean -id with prefix and path (filter)' '
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (cd build/ && \
- (echo f; echo "docs"; echo "*.h"; echo ; echo c) | \
+ (cd build/ &&
+ test_write_lines f docs "*.h" "" c |
git clean -id ..) &&
test -f Makefile &&
test -f README &&
@@ -431,9 +431,8 @@ test_expect_success 'git clean -id with prefix and path (select by name)' '
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (cd build/ && \
- (echo s; echo "../docs/"; echo "../src/part3.c"; \
- echo "../src/part4.c"; echo; echo c) | \
+ (cd build/ &&
+ test_write_lines s ../docs/ ../src/part3.c ../src/part4.c "" c |
git clean -id ..) &&
test -f Makefile &&
test -f README &&
@@ -455,8 +454,8 @@ test_expect_success 'git clean -id with prefix and path (ask)' '
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (cd build/ && \
- (echo a; echo Y; echo y; echo no; echo yes; echo bad; echo) | \
+ (cd build/ &&
+ test_write_lines a Y y no yes bad "" |
git clean -id ..) &&
test -f Makefile &&
test -f README &&
diff --git a/t/t7400-submodule-basic.sh b/t/t7400-submodule-basic.sh
index 48fd14f..2b71e62 100755
--- a/t/t7400-submodule-basic.sh
+++ b/t/t7400-submodule-basic.sh
@@ -171,12 +171,13 @@ test_expect_success 'submodule add to .gitignored path with --force' '
test_expect_success 'submodule add to reconfigure existing submodule with --force' '
(
cd addtest-ignore &&
- git submodule add --force bogus-url submod &&
- git submodule add -b initial "$submodurl" submod-branch &&
- test "bogus-url" = "$(git config -f .gitmodules submodule.submod.url)" &&
- test "bogus-url" = "$(git config submodule.submod.url)" &&
+ bogus_url="$(pwd)/bogus-url" &&
+ git submodule add --force "$bogus_url" submod &&
+ git submodule add --force -b initial "$submodurl" submod-branch &&
+ test "$bogus_url" = "$(git config -f .gitmodules submodule.submod.url)" &&
+ test "$bogus_url" = "$(git config submodule.submod.url)" &&
# Restore the url
- git submodule add --force "$submodurl" submod
+ git submodule add --force "$submodurl" submod &&
test "$submodurl" = "$(git config -f .gitmodules submodule.submod.url)" &&
test "$submodurl" = "$(git config submodule.submod.url)"
)
@@ -377,7 +378,7 @@ test_expect_success 'init should register submodule url in .git/config' '
test_failure_with_unknown_submodule () {
test_must_fail git submodule $1 no-such-submodule 2>output.err &&
- grep "^error: .*no-such-submodule" output.err
+ test_i18ngrep "^error: .*no-such-submodule" output.err
}
test_expect_success 'init should fail with unknown submodule' '
@@ -818,7 +819,7 @@ test_expect_success '../bar/a/b/c works with relative local path - ../foo/bar.gi
cp pristine-.git-config .git/config &&
cp pristine-.gitmodules .gitmodules &&
mkdir -p a/b/c &&
- (cd a/b/c; git init) &&
+ (cd a/b/c && git init) &&
git config remote.origin.url ../foo/bar.git &&
git submodule add ../bar/a/b/c ./a/b/c &&
git submodule init &&
diff --git a/t/t7405-submodule-merge.sh b/t/t7405-submodule-merge.sh
index 7bfb2f4..7855bd8 100755
--- a/t/t7405-submodule-merge.sh
+++ b/t/t7405-submodule-merge.sh
@@ -279,4 +279,177 @@ test_expect_success 'recursive merge with submodule' '
grep "$(cat expect3)" actual > /dev/null)
'
+# File/submodule conflict
+# Commit O: <empty>
+# Commit A: path (submodule)
+# Commit B: path
+# Expected: path/ is submodule and file contents for B's path are somewhere
+
+test_expect_success 'setup file/submodule conflict' '
+ test_create_repo file-submodule &&
+ (
+ cd file-submodule &&
+
+ git commit --allow-empty -m O &&
+
+ git branch A &&
+ git branch B &&
+
+ git checkout B &&
+ echo content >path &&
+ git add path &&
+ git commit -m B &&
+
+ git checkout A &&
+ test_create_repo path &&
+ test_commit -C path world &&
+ git submodule add ./path &&
+ git commit -m A
+ )
+'
+
+test_expect_failure 'file/submodule conflict' '
+ test_when_finished "git -C file-submodule reset --hard" &&
+ (
+ cd file-submodule &&
+
+ git checkout A^0 &&
+ test_must_fail git merge B^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 3 out &&
+ git ls-files -u >out &&
+ test_line_count = 2 out &&
+
+ # path/ is still a submodule
+ test_path_is_dir path/.git &&
+
+ # There is a submodule at "path", so B:path cannot be written
+ # there. We expect it to be written somewhere in the same
+ # directory, though, so just grep for its content in all
+ # files, and ignore "grep: path: Is a directory" message
+ echo Checking if contents from B:path showed up anywhere &&
+ grep -q content * 2>/dev/null
+ )
+'
+
+test_expect_success 'file/submodule conflict; merge --abort works afterward' '
+ test_when_finished "git -C file-submodule reset --hard" &&
+ (
+ cd file-submodule &&
+
+ git checkout A^0 &&
+ test_must_fail git merge B^0 >out 2>err &&
+
+ test_path_is_file .git/MERGE_HEAD &&
+ git merge --abort
+ )
+'
+
+# Directory/submodule conflict
+# Commit O: <empty>
+# Commit A: path (submodule), with sole tracked file named 'world'
+# Commit B1: path/file
+# Commit B2: path/world
+#
+# Expected from merge of A & B1:
+# Contents under path/ from commit B1 are renamed elsewhere; we do not
+# want to write files from one of our tracked directories into a submodule
+#
+# Expected from merge of A & B2:
+# Similar to last merge, but with a slight twist: we don't want paths
+# under the submodule to be treated as untracked or in the way.
+
+test_expect_success 'setup directory/submodule conflict' '
+ test_create_repo directory-submodule &&
+ (
+ cd directory-submodule &&
+
+ git commit --allow-empty -m O &&
+
+ git branch A &&
+ git branch B1 &&
+ git branch B2 &&
+
+ git checkout B1 &&
+ mkdir path &&
+ echo contents >path/file &&
+ git add path/file &&
+ git commit -m B1 &&
+
+ git checkout B2 &&
+ mkdir path &&
+ echo contents >path/world &&
+ git add path/world &&
+ git commit -m B2 &&
+
+ git checkout A &&
+ test_create_repo path &&
+ test_commit -C path hello world &&
+ git submodule add ./path &&
+ git commit -m A
+ )
+'
+
+test_expect_failure 'directory/submodule conflict; keep submodule clean' '
+ test_when_finished "git -C directory-submodule reset --hard" &&
+ (
+ cd directory-submodule &&
+
+ git checkout A^0 &&
+ test_must_fail git merge B1^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 3 out &&
+ git ls-files -u >out &&
+ test_line_count = 1 out &&
+
+ # path/ is still a submodule
+ test_path_is_dir path/.git &&
+
+ echo Checking if contents from B1:path/file showed up &&
+ # Would rather use grep -r, but that is GNU extension...
+ git ls-files -co | xargs grep -q contents 2>/dev/null &&
+
+ # However, B1:path/file should NOT have shown up at path/file,
+ # because we should not write into the submodule
+ test_path_is_missing path/file
+ )
+'
+
+test_expect_failure 'directory/submodule conflict; should not treat submodule files as untracked or in the way' '
+ test_when_finished "git -C directory-submodule/path reset --hard" &&
+ test_when_finished "git -C directory-submodule reset --hard" &&
+ (
+ cd directory-submodule &&
+
+ git checkout A^0 &&
+ test_must_fail git merge B2^0 >out 2>err &&
+
+ # We do not want files within the submodule to prevent the
+ # merge from starting; we should not be writing to such paths
+ # anyway.
+ test_i18ngrep ! "refusing to lose untracked file at" err
+ )
+'
+
+test_expect_failure 'directory/submodule conflict; merge --abort works afterward' '
+ test_when_finished "git -C directory-submodule/path reset --hard" &&
+ test_when_finished "git -C directory-submodule reset --hard" &&
+ (
+ cd directory-submodule &&
+
+ git checkout A^0 &&
+ test_must_fail git merge B2^0 &&
+ test_path_is_file .git/MERGE_HEAD &&
+
+ # merge --abort should succeed, should clear .git/MERGE_HEAD,
+ # and should not leave behind any conflicted files
+ git merge --abort &&
+ test_path_is_missing .git/MERGE_HEAD &&
+ git ls-files -u >conflicts &&
+ test_must_be_empty conflicts
+ )
+'
+
test_done
diff --git a/t/t7406-submodule-update.sh b/t/t7406-submodule-update.sh
index 9e0d317..686e6a4 100755
--- a/t/t7406-submodule-update.sh
+++ b/t/t7406-submodule-update.sh
@@ -115,17 +115,17 @@ Submodule path '../super/submodule': checked out '$submodulesha1'
EOF
cat <<EOF >expect2
+Cloning into '$pwd/recursivesuper/super/merging'...
+Cloning into '$pwd/recursivesuper/super/none'...
+Cloning into '$pwd/recursivesuper/super/rebasing'...
+Cloning into '$pwd/recursivesuper/super/submodule'...
Submodule 'merging' ($pwd/merging) registered for path '../super/merging'
Submodule 'none' ($pwd/none) registered for path '../super/none'
Submodule 'rebasing' ($pwd/rebasing) registered for path '../super/rebasing'
Submodule 'submodule' ($pwd/submodule) registered for path '../super/submodule'
-Cloning into '$pwd/recursivesuper/super/merging'...
done.
-Cloning into '$pwd/recursivesuper/super/none'...
done.
-Cloning into '$pwd/recursivesuper/super/rebasing'...
done.
-Cloning into '$pwd/recursivesuper/super/submodule'...
done.
EOF
@@ -137,7 +137,8 @@ test_expect_success 'submodule update --init --recursive from subdirectory' '
git submodule update --init --recursive ../super >../../actual 2>../../actual2
) &&
test_i18ncmp expect actual &&
- test_i18ncmp expect2 actual2
+ sort actual2 >actual2.sorted &&
+ test_i18ncmp expect2 actual2.sorted
'
cat <<EOF >expect2
@@ -865,9 +866,9 @@ test_expect_success 'submodule update places git-dir in superprojects git-dir re
(cd submodule/subsubmodule &&
git log > ../../expected
) &&
- (cd .git/modules/submodule/modules/subsubmodule
+ (cd .git/modules/submodule/modules/subsubmodule &&
git log > ../../../../../actual
- )
+ ) &&
test_cmp actual expected
)
'
@@ -886,7 +887,7 @@ test_expect_success 'submodule update properly revives a moved submodule' '
git commit -am "pre move" &&
H2=$(git rev-parse --short HEAD) &&
git status | sed "s/$H/XXX/" >expect &&
- H=$(cd submodule2; git rev-parse HEAD) &&
+ H=$(cd submodule2 && git rev-parse HEAD) &&
git rm --cached submodule2 &&
rm -rf submodule2 &&
mkdir -p "moved/sub module" &&
diff --git a/t/t7408-submodule-reference.sh b/t/t7408-submodule-reference.sh
index 08d9add..34ac28c 100755
--- a/t/t7408-submodule-reference.sh
+++ b/t/t7408-submodule-reference.sh
@@ -148,7 +148,7 @@ test_expect_success 'preparing second superproject with a nested submodule plus
cd supersuper &&
echo "I am super super." >file &&
git add file &&
- git commit -m B-super-super-initial
+ git commit -m B-super-super-initial &&
git submodule add "file://$base_dir/super" subwithsub &&
git commit -m B-super-super-added &&
git submodule update --init --recursive &&
diff --git a/t/t7415-submodule-names.sh b/t/t7415-submodule-names.sh
index b68c5f5..293e2e1 100755
--- a/t/t7415-submodule-names.sh
+++ b/t/t7415-submodule-names.sh
@@ -176,4 +176,19 @@ test_expect_success 'fsck detects non-blob .gitmodules' '
)
'
+test_expect_success 'fsck detects corrupt .gitmodules' '
+ git init corrupt &&
+ (
+ cd corrupt &&
+
+ echo "[broken" >.gitmodules &&
+ git add .gitmodules &&
+ git commit -m "broken gitmodules" &&
+
+ git fsck 2>output &&
+ grep gitmodulesParse output &&
+ test_i18ngrep ! "bad config" output
+ )
+'
+
test_done
diff --git a/t/t7501-commit.sh b/t/t7501-commit.sh
index 9dbbd01..51646d8 100755
--- a/t/t7501-commit.sh
+++ b/t/t7501-commit.sh
@@ -47,7 +47,7 @@ test_expect_success 'paths and -a do not mix' '
test_expect_success PERL 'can use paths with --interactive' '
echo bong-o-bong >file &&
# 2: update, 1:st path, that is all, 7: quit
- ( echo 2; echo 1; echo; echo 7 ) |
+ test_write_lines 2 1 "" 7 |
git commit -m foo --interactive file &&
git reset --hard HEAD^
'
@@ -293,7 +293,7 @@ test_expect_success PERL 'interactive add' '
test_expect_success PERL "commit --interactive doesn't change index if editor aborts" '
echo zoo >file &&
test_must_fail git diff --exit-code >diff1 &&
- (echo u ; echo "*" ; echo q) |
+ test_write_lines u "*" q |
(
EDITOR=: &&
export EDITOR &&
@@ -411,8 +411,8 @@ test_expect_success 'sign off (1)' '
git commit -s -m "thank you" &&
git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
(
- echo thank you
- echo
+ echo thank you &&
+ echo &&
git var GIT_COMMITTER_IDENT |
sed -e "s/>.*/>/" -e "s/^/Signed-off-by: /"
) >expected &&
@@ -430,9 +430,9 @@ test_expect_success 'sign off (2)' '
$existing" &&
git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
(
- echo thank you
- echo
- echo $existing
+ echo thank you &&
+ echo &&
+ echo $existing &&
git var GIT_COMMITTER_IDENT |
sed -e "s/>.*/>/" -e "s/^/Signed-off-by: /"
) >expected &&
@@ -450,9 +450,9 @@ test_expect_success 'signoff gap' '
$alt" &&
git cat-file commit HEAD | sed -e "1,/^\$/d" > actual &&
(
- echo welcome
- echo
- echo $alt
+ echo welcome &&
+ echo &&
+ echo $alt &&
git var GIT_COMMITTER_IDENT |
sed -e "s/>.*/>/" -e "s/^/Signed-off-by: /"
) >expected &&
@@ -470,11 +470,11 @@ We have now
$alt" &&
git cat-file commit HEAD | sed -e "1,/^\$/d" > actual &&
(
- echo welcome
- echo
- echo We have now
- echo $alt
- echo
+ echo welcome &&
+ echo &&
+ echo We have now &&
+ echo $alt &&
+ echo &&
git var GIT_COMMITTER_IDENT |
sed -e "s/>.*/>/" -e "s/^/Signed-off-by: /"
) >expected &&
@@ -491,11 +491,11 @@ non-trailer line
Myfooter: x" &&
git cat-file commit HEAD | sed -e "1,/^\$/d" > actual &&
(
- echo subject
- echo
- echo non-trailer line
- echo Myfooter: x
- echo
+ echo subject &&
+ echo &&
+ echo non-trailer line &&
+ echo Myfooter: x &&
+ echo &&
echo "Signed-off-by: $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL>"
) >expected &&
test_cmp expected actual &&
@@ -508,10 +508,10 @@ non-trailer line
Myfooter: x" &&
git cat-file commit HEAD | sed -e "1,/^\$/d" > actual &&
(
- echo subject
- echo
- echo non-trailer line
- echo Myfooter: x
+ echo subject &&
+ echo &&
+ echo non-trailer line &&
+ echo Myfooter: x &&
echo "Signed-off-by: $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL>"
) >expected &&
test_cmp expected actual
@@ -524,10 +524,10 @@ test_expect_success 'multiple -m' '
git commit -m "one" -m "two" -m "three" &&
git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
(
- echo one
- echo
- echo two
- echo
+ echo one &&
+ echo &&
+ echo two &&
+ echo &&
echo three
) >expected &&
test_cmp expected actual
diff --git a/t/t7504-commit-msg-hook.sh b/t/t7504-commit-msg-hook.sh
index 302a3a2..31b9c6a 100755
--- a/t/t7504-commit-msg-hook.sh
+++ b/t/t7504-commit-msg-hook.sh
@@ -157,6 +157,7 @@ test_expect_success 'merge bypasses failing hook with --no-verify' '
test_when_finished "git branch -D newbranch" &&
test_when_finished "git checkout -f master" &&
git checkout --orphan newbranch &&
+ git rm -f file &&
: >file2 &&
git add file2 &&
git commit --no-verify file2 -m in-side-branch &&
@@ -168,7 +169,7 @@ test_expect_success 'merge bypasses failing hook with --no-verify' '
chmod -x "$HOOK"
test_expect_success POSIXPERM 'with non-executable hook' '
- echo "content" >> file &&
+ echo "content" >file &&
git add file &&
git commit -m "content"
@@ -249,6 +250,7 @@ test_expect_success 'hook called in git-merge picks up commit message' '
test_when_finished "git branch -D newbranch" &&
test_when_finished "git checkout -f master" &&
git checkout --orphan newbranch &&
+ git rm -f file &&
: >file2 &&
git add file2 &&
git commit --no-verify file2 -m in-side-branch &&
diff --git a/t/t7506-status-submodule.sh b/t/t7506-status-submodule.sh
index b4b74db..943708f 100755
--- a/t/t7506-status-submodule.sh
+++ b/t/t7506-status-submodule.sh
@@ -193,9 +193,9 @@ test_expect_success 'status with added and untracked file in modified submodule
test_expect_success 'setup .git file for sub' '
(cd sub &&
- rm -f new-file
+ rm -f new-file &&
REAL="$(pwd)/../.real" &&
- mv .git "$REAL"
+ mv .git "$REAL" &&
echo "gitdir: $REAL" >.git) &&
echo .real >>.gitignore &&
git commit -m "added .real to .gitignore" .gitignore
@@ -209,12 +209,12 @@ test_expect_success 'status with added file in modified submodule with .git file
test_expect_success 'status with a lot of untracked files in the submodule' '
(
- cd sub
+ cd sub &&
i=0 &&
while test $i -lt 1024
do
- >some-file-$i
- i=$(( $i + 1 ))
+ >some-file-$i &&
+ i=$(( $i + 1 )) || exit 1
done
) &&
git status --porcelain sub 2>err.actual &&
diff --git a/t/t7510-signed-commit.sh b/t/t7510-signed-commit.sh
index 6e2015e..4e37ff8 100755
--- a/t/t7510-signed-commit.sh
+++ b/t/t7510-signed-commit.sh
@@ -227,4 +227,11 @@ test_expect_success GPG 'log.showsignature behaves like --show-signature' '
grep "gpg: Good signature" actual
'
+test_expect_success GPG 'check config gpg.format values' '
+ test_config gpg.format openpgp &&
+ git commit -S --amend -m "success" &&
+ test_config gpg.format OpEnPgP &&
+ test_must_fail git commit -S --amend -m "fail"
+'
+
test_done
diff --git a/t/t7610-mergetool.sh b/t/t7610-mergetool.sh
index 4d46c59..b18503d 100755
--- a/t/t7610-mergetool.sh
+++ b/t/t7610-mergetool.sh
@@ -57,18 +57,18 @@ test_expect_success 'setup' '
git checkout -b delete-base branch1 &&
mkdir -p a/a &&
- (echo one; echo two; echo 3; echo 4) >a/a/file.txt &&
+ test_write_lines one two 3 4 >a/a/file.txt &&
git add a/a/file.txt &&
git commit -m"base file" &&
git checkout -b move-to-b delete-base &&
mkdir -p b/b &&
git mv a/a/file.txt b/b/file.txt &&
- (echo one; echo two; echo 4) >b/b/file.txt &&
+ test_write_lines one two 4 >b/b/file.txt &&
git commit -a -m"move to b" &&
git checkout -b move-to-c delete-base &&
mkdir -p c/c &&
git mv a/a/file.txt c/c/file.txt &&
- (echo one; echo two; echo 3) >c/c/file.txt &&
+ test_write_lines one two 3 >c/c/file.txt &&
git commit -a -m"move to c" &&
git checkout -b stash1 master &&
@@ -349,7 +349,7 @@ test_expect_success 'mergetool keeps tempfiles when aborting delete/delete' '
git checkout -b test$test_count move-to-c &&
test_config mergetool.keepTemporaries true &&
test_must_fail git merge move-to-b &&
- ! (echo a; echo n) | git mergetool a/a/file.txt &&
+ ! test_write_lines a n | git mergetool a/a/file.txt &&
test -d a/a &&
cat >expect <<-\EOF &&
file_BASE_.txt
diff --git a/t/t7611-merge-abort.sh b/t/t7611-merge-abort.sh
index 7b4798e..7c84a51 100755
--- a/t/t7611-merge-abort.sh
+++ b/t/t7611-merge-abort.sh
@@ -187,31 +187,6 @@ test_expect_success 'Fail clean merge with matching dirty worktree' '
test_cmp expect actual
'
-test_expect_success 'Abort clean merge with matching dirty index' '
- git add bar &&
- git diff --staged > expect &&
- git merge --no-commit clean_branch &&
- test -f .git/MERGE_HEAD &&
- ### When aborting the merge, git will discard all staged changes,
- ### including those that were staged pre-merge. In other words,
- ### --abort will LOSE any staged changes (the staged changes that
- ### are lost must match the merge result, or the merge would not
- ### have been allowed to start). Change expectations accordingly:
- rm expect &&
- touch expect &&
- # Abort merge
- git merge --abort &&
- test ! -f .git/MERGE_HEAD &&
- test "$pre_merge_head" = "$(git rev-parse HEAD)" &&
- git diff --staged > actual &&
- test_cmp expect actual &&
- test -z "$(git diff)"
-'
-
-test_expect_success 'Reset worktree changes' '
- git reset --hard "$pre_merge_head"
-'
-
test_expect_success 'Fail conflicting merge with matching dirty worktree' '
echo barf > bar &&
git diff > expect &&
@@ -223,97 +198,4 @@ test_expect_success 'Fail conflicting merge with matching dirty worktree' '
test_cmp expect actual
'
-test_expect_success 'Abort conflicting merge with matching dirty index' '
- git add bar &&
- git diff --staged > expect &&
- test_must_fail git merge conflict_branch &&
- test -f .git/MERGE_HEAD &&
- ### When aborting the merge, git will discard all staged changes,
- ### including those that were staged pre-merge. In other words,
- ### --abort will LOSE any staged changes (the staged changes that
- ### are lost must match the merge result, or the merge would not
- ### have been allowed to start). Change expectations accordingly:
- rm expect &&
- touch expect &&
- # Abort merge
- git merge --abort &&
- test ! -f .git/MERGE_HEAD &&
- test "$pre_merge_head" = "$(git rev-parse HEAD)" &&
- git diff --staged > actual &&
- test_cmp expect actual &&
- test -z "$(git diff)"
-'
-
-test_expect_success 'Reset worktree changes' '
- git reset --hard "$pre_merge_head"
-'
-
-test_expect_success 'Abort merge with pre- and post-merge worktree changes' '
- # Pre-merge worktree changes
- echo xyzzy > foo &&
- echo barf > bar &&
- git add bar &&
- git diff > expect &&
- git diff --staged > expect-staged &&
- # Perform merge
- test_must_fail git merge conflict_branch &&
- test -f .git/MERGE_HEAD &&
- # Post-merge worktree changes
- echo yzxxz > foo &&
- echo blech > baz &&
- ### When aborting the merge, git will discard staged changes (bar)
- ### and unmerged changes (baz). Other changes that are neither
- ### staged nor marked as unmerged (foo), will be preserved. For
- ### these changed, git cannot tell pre-merge changes apart from
- ### post-merge changes, so the post-merge changes will be
- ### preserved. Change expectations accordingly:
- git diff -- foo > expect &&
- rm expect-staged &&
- touch expect-staged &&
- # Abort merge
- git merge --abort &&
- test ! -f .git/MERGE_HEAD &&
- test "$pre_merge_head" = "$(git rev-parse HEAD)" &&
- git diff > actual &&
- test_cmp expect actual &&
- git diff --staged > actual-staged &&
- test_cmp expect-staged actual-staged
-'
-
-test_expect_success 'Reset worktree changes' '
- git reset --hard "$pre_merge_head"
-'
-
-test_expect_success 'Abort merge with pre- and post-merge index changes' '
- # Pre-merge worktree changes
- echo xyzzy > foo &&
- echo barf > bar &&
- git add bar &&
- git diff > expect &&
- git diff --staged > expect-staged &&
- # Perform merge
- test_must_fail git merge conflict_branch &&
- test -f .git/MERGE_HEAD &&
- # Post-merge worktree changes
- echo yzxxz > foo &&
- echo blech > baz &&
- git add foo bar &&
- ### When aborting the merge, git will discard all staged changes
- ### (foo, bar and baz), and no changes will be preserved. Whether
- ### the changes were staged pre- or post-merge does not matter
- ### (except for not preventing starting the merge).
- ### Change expectations accordingly:
- rm expect expect-staged &&
- touch expect &&
- touch expect-staged &&
- # Abort merge
- git merge --abort &&
- test ! -f .git/MERGE_HEAD &&
- test "$pre_merge_head" = "$(git rev-parse HEAD)" &&
- git diff > actual &&
- test_cmp expect actual &&
- git diff --staged > actual-staged &&
- test_cmp expect-staged actual-staged
-'
-
test_done
diff --git a/t/t7810-grep.sh b/t/t7810-grep.sh
index fc44ec9..d826e24 100755
--- a/t/t7810-grep.sh
+++ b/t/t7810-grep.sh
@@ -262,6 +262,21 @@ do
fi
'
+ test_expect_success "grep $L (with --column, --only-matching)" '
+ {
+ echo ${HC}file:1:5:mmap
+ echo ${HC}file:2:5:mmap
+ echo ${HC}file:3:5:mmap
+ echo ${HC}file:3:13:mmap
+ echo ${HC}file:4:5:mmap
+ echo ${HC}file:4:13:mmap
+ echo ${HC}file:5:5:mmap
+ echo ${HC}file:5:13:mmap
+ } >expected &&
+ git grep --column -n -o -e mmap $H >actual &&
+ test_cmp expected actual
+ '
+
test_expect_success "grep $L (t-1)" '
echo "${HC}t/t:1:test" >expected &&
git grep -n -e test $H >actual &&
@@ -939,10 +954,9 @@ test_expect_success 'grep from a subdirectory to search wider area (1)' '
test_expect_success 'grep from a subdirectory to search wider area (2)' '
mkdir -p s &&
(
- cd s || exit 1
- ( git grep xxyyzz .. >out ; echo $? >status )
- ! test -s out &&
- test 1 = $(cat status)
+ cd s &&
+ test_expect_code 1 git grep xxyyzz .. >out &&
+ ! test -s out
)
'
diff --git a/t/t8003-blame-corner-cases.sh b/t/t8003-blame-corner-cases.sh
index 661f9d4..c92a47b 100755
--- a/t/t8003-blame-corner-cases.sh
+++ b/t/t8003-blame-corner-cases.sh
@@ -216,14 +216,18 @@ test_expect_success 'blame -L with invalid start' '
'
test_expect_success 'blame -L with invalid end' '
- test_must_fail git blame -L1,5 tres 2>errors &&
- test_i18ngrep "has only 2 lines" errors
+ git blame -L1,5 tres >out &&
+ test_line_count = 2 out
'
test_expect_success 'blame parses <end> part of -L' '
git blame -L1,1 tres >out &&
- cat out &&
- test $(wc -l < out) -eq 1
+ test_line_count = 1 out
+'
+
+test_expect_success 'blame -Ln,-(n+1)' '
+ git blame -L3,-4 nine_lines >out &&
+ test_line_count = 3 out
'
test_expect_success 'indent of line numbers, nine lines' '
diff --git a/t/t9001-send-email.sh b/t/t9001-send-email.sh
index b734224..1ef1a19 100755
--- a/t/t9001-send-email.sh
+++ b/t/t9001-send-email.sh
@@ -331,7 +331,7 @@ test_expect_success $PREREQ 'Show all headers' '
test_expect_success $PREREQ 'Prompting works' '
clean_fake_sendmail &&
- (echo "to@example.com"
+ (echo "to@example.com" &&
echo ""
) | GIT_SEND_EMAIL_NOTTY=1 git send-email \
--smtp-server="$(pwd)/fake.sendmail" \
@@ -508,8 +508,8 @@ test_expect_success $PREREQ 'Invalid In-Reply-To' '
test_expect_success $PREREQ 'Valid In-Reply-To when prompting' '
clean_fake_sendmail &&
- (echo "From Example <from@example.com>"
- echo "To Example <to@example.com>"
+ (echo "From Example <from@example.com>" &&
+ echo "To Example <to@example.com>" &&
echo ""
) | GIT_SEND_EMAIL_NOTTY=1 git send-email \
--smtp-server="$(pwd)/fake.sendmail" \
@@ -2022,11 +2022,11 @@ test_expect_success $PREREQ 'invoke hook' '
# Verify error message when a patch is rejected by the hook
sed -e "s/add master/x/" ../0001-add-master.patch >../another.patch &&
- git send-email \
+ test_must_fail git send-email \
--from="Example <nobody@example.com>" \
--to=nobody@example.com \
--smtp-server="$(pwd)/../fake.sendmail" \
- ../another.patch 2>err
+ ../another.patch 2>err &&
test_i18ngrep "rejected by sendemail-validate hook" err
)
'
diff --git a/t/t9100-git-svn-basic.sh b/t/t9100-git-svn-basic.sh
index c937330..9af6078 100755
--- a/t/t9100-git-svn-basic.sh
+++ b/t/t9100-git-svn-basic.sh
@@ -31,7 +31,7 @@ test_expect_success \
(
cd import &&
echo foo >foo &&
- ln -s foo foo.link
+ ln -s foo foo.link &&
mkdir -p dir/a/b/c/d/e &&
echo "deep dir" >dir/a/b/c/d/e/file &&
mkdir bar &&
diff --git a/t/t9101-git-svn-props.sh b/t/t9101-git-svn-props.sh
index 07bfb63..8a5c8dc 100755
--- a/t/t9101-git-svn-props.sh
+++ b/t/t9101-git-svn-props.sh
@@ -149,7 +149,7 @@ test_expect_success 'test show-ignore' "
svn_cmd up &&
svn_cmd propset -R svn:ignore '
no-such-file*
-' .
+' . &&
svn_cmd commit -m 'propset svn:ignore'
) &&
git svn show-ignore > show-ignore.got &&
diff --git a/t/t9119-git-svn-info.sh b/t/t9119-git-svn-info.sh
index 88241ba..8201c3e 100755
--- a/t/t9119-git-svn-info.sh
+++ b/t/t9119-git-svn-info.sh
@@ -22,8 +22,8 @@ esac
# same value as "svn info" (i.e. the commit timestamp that touched the
# path most recently); do not expect that field to match.
test_cmp_info () {
- sed -e '/^Text Last Updated:/d' "$1" >tmp.expect
- sed -e '/^Text Last Updated:/d' "$2" >tmp.actual
+ sed -e '/^Text Last Updated:/d' "$1" >tmp.expect &&
+ sed -e '/^Text Last Updated:/d' "$2" >tmp.actual &&
test_cmp tmp.expect tmp.actual &&
rm -f tmp.expect tmp.actual
}
@@ -59,24 +59,24 @@ test_expect_success 'setup repository and import' '
'
test_expect_success 'info' "
- (cd svnwc; svn info) > expected.info &&
- (cd gitwc; git svn info) > actual.info &&
+ (cd svnwc && svn info) > expected.info &&
+ (cd gitwc && git svn info) > actual.info &&
test_cmp_info expected.info actual.info
"
test_expect_success 'info --url' '
- test "$(cd gitwc; git svn info --url)" = "$quoted_svnrepo"
+ test "$(cd gitwc && git svn info --url)" = "$quoted_svnrepo"
'
test_expect_success 'info .' "
- (cd svnwc; svn info .) > expected.info-dot &&
- (cd gitwc; git svn info .) > actual.info-dot &&
+ (cd svnwc && svn info .) > expected.info-dot &&
+ (cd gitwc && git svn info .) > actual.info-dot &&
test_cmp_info expected.info-dot actual.info-dot
"
test_expect_success 'info $(pwd)' '
- (cd svnwc; svn info "$(pwd)") >expected.info-pwd &&
- (cd gitwc; git svn info "$(pwd)") >actual.info-pwd &&
+ (cd svnwc && svn info "$(pwd)") >expected.info-pwd &&
+ (cd gitwc && git svn info "$(pwd)") >actual.info-pwd &&
grep -v ^Path: <expected.info-pwd >expected.info-np &&
grep -v ^Path: <actual.info-pwd >actual.info-np &&
test_cmp_info expected.info-np actual.info-np &&
@@ -85,8 +85,8 @@ test_expect_success 'info $(pwd)' '
'
test_expect_success 'info $(pwd)/../___wc' '
- (cd svnwc; svn info "$(pwd)/../svnwc") >expected.info-pwd &&
- (cd gitwc; git svn info "$(pwd)/../gitwc") >actual.info-pwd &&
+ (cd svnwc && svn info "$(pwd)/../svnwc") >expected.info-pwd &&
+ (cd gitwc && git svn info "$(pwd)/../gitwc") >actual.info-pwd &&
grep -v ^Path: <expected.info-pwd >expected.info-np &&
grep -v ^Path: <actual.info-pwd >actual.info-np &&
test_cmp_info expected.info-np actual.info-np &&
@@ -95,8 +95,8 @@ test_expect_success 'info $(pwd)/../___wc' '
'
test_expect_success 'info $(pwd)/../___wc//file' '
- (cd svnwc; svn info "$(pwd)/../svnwc//file") >expected.info-pwd &&
- (cd gitwc; git svn info "$(pwd)/../gitwc//file") >actual.info-pwd &&
+ (cd svnwc && svn info "$(pwd)/../svnwc//file") >expected.info-pwd &&
+ (cd gitwc && git svn info "$(pwd)/../gitwc//file") >actual.info-pwd &&
grep -v ^Path: <expected.info-pwd >expected.info-np &&
grep -v ^Path: <actual.info-pwd >actual.info-np &&
test_cmp_info expected.info-np actual.info-np &&
@@ -105,56 +105,56 @@ test_expect_success 'info $(pwd)/../___wc//file' '
'
test_expect_success 'info --url .' '
- test "$(cd gitwc; git svn info --url .)" = "$quoted_svnrepo"
+ test "$(cd gitwc && git svn info --url .)" = "$quoted_svnrepo"
'
test_expect_success 'info file' "
- (cd svnwc; svn info file) > expected.info-file &&
- (cd gitwc; git svn info file) > actual.info-file &&
+ (cd svnwc && svn info file) > expected.info-file &&
+ (cd gitwc && git svn info file) > actual.info-file &&
test_cmp_info expected.info-file actual.info-file
"
test_expect_success 'info --url file' '
- test "$(cd gitwc; git svn info --url file)" = "$quoted_svnrepo/file"
+ test "$(cd gitwc && git svn info --url file)" = "$quoted_svnrepo/file"
'
test_expect_success 'info directory' "
- (cd svnwc; svn info directory) > expected.info-directory &&
- (cd gitwc; git svn info directory) > actual.info-directory &&
+ (cd svnwc && svn info directory) > expected.info-directory &&
+ (cd gitwc && git svn info directory) > actual.info-directory &&
test_cmp_info expected.info-directory actual.info-directory
"
test_expect_success 'info inside directory' "
- (cd svnwc/directory; svn info) > expected.info-inside-directory &&
- (cd gitwc/directory; git svn info) > actual.info-inside-directory &&
+ (cd svnwc/directory && svn info) > expected.info-inside-directory &&
+ (cd gitwc/directory && git svn info) > actual.info-inside-directory &&
test_cmp_info expected.info-inside-directory actual.info-inside-directory
"
test_expect_success 'info --url directory' '
- test "$(cd gitwc; git svn info --url directory)" = "$quoted_svnrepo/directory"
+ test "$(cd gitwc && git svn info --url directory)" = "$quoted_svnrepo/directory"
'
test_expect_success 'info symlink-file' "
- (cd svnwc; svn info symlink-file) > expected.info-symlink-file &&
- (cd gitwc; git svn info symlink-file) > actual.info-symlink-file &&
+ (cd svnwc && svn info symlink-file) > expected.info-symlink-file &&
+ (cd gitwc && git svn info symlink-file) > actual.info-symlink-file &&
test_cmp_info expected.info-symlink-file actual.info-symlink-file
"
test_expect_success 'info --url symlink-file' '
- test "$(cd gitwc; git svn info --url symlink-file)" \
+ test "$(cd gitwc && git svn info --url symlink-file)" \
= "$quoted_svnrepo/symlink-file"
'
test_expect_success 'info symlink-directory' "
- (cd svnwc; svn info symlink-directory) \
+ (cd svnwc && svn info symlink-directory) \
> expected.info-symlink-directory &&
- (cd gitwc; git svn info symlink-directory) \
+ (cd gitwc && git svn info symlink-directory) \
> actual.info-symlink-directory &&
test_cmp_info expected.info-symlink-directory actual.info-symlink-directory
"
test_expect_success 'info --url symlink-directory' '
- test "$(cd gitwc; git svn info --url symlink-directory)" \
+ test "$(cd gitwc && git svn info --url symlink-directory)" \
= "$quoted_svnrepo/symlink-directory"
'
@@ -169,13 +169,13 @@ test_expect_success 'info added-file' "
cd svnwc &&
svn_cmd add added-file > /dev/null
) &&
- (cd svnwc; svn info added-file) > expected.info-added-file &&
- (cd gitwc; git svn info added-file) > actual.info-added-file &&
+ (cd svnwc && svn info added-file) > expected.info-added-file &&
+ (cd gitwc && git svn info added-file) > actual.info-added-file &&
test_cmp_info expected.info-added-file actual.info-added-file
"
test_expect_success 'info --url added-file' '
- test "$(cd gitwc; git svn info --url added-file)" \
+ test "$(cd gitwc && git svn info --url added-file)" \
= "$quoted_svnrepo/added-file"
'
@@ -190,15 +190,15 @@ test_expect_success 'info added-directory' "
cd gitwc &&
git add added-directory
) &&
- (cd svnwc; svn info added-directory) \
+ (cd svnwc && svn info added-directory) \
> expected.info-added-directory &&
- (cd gitwc; git svn info added-directory) \
+ (cd gitwc && git svn info added-directory) \
> actual.info-added-directory &&
test_cmp_info expected.info-added-directory actual.info-added-directory
"
test_expect_success 'info --url added-directory' '
- test "$(cd gitwc; git svn info --url added-directory)" \
+ test "$(cd gitwc && git svn info --url added-directory)" \
= "$quoted_svnrepo/added-directory"
'
@@ -213,16 +213,16 @@ test_expect_success 'info added-symlink-file' "
ln -s added-file added-symlink-file &&
svn_cmd add added-symlink-file > /dev/null
) &&
- (cd svnwc; svn info added-symlink-file) \
+ (cd svnwc && svn info added-symlink-file) \
> expected.info-added-symlink-file &&
- (cd gitwc; git svn info added-symlink-file) \
+ (cd gitwc && git svn info added-symlink-file) \
> actual.info-added-symlink-file &&
test_cmp_info expected.info-added-symlink-file \
actual.info-added-symlink-file
"
test_expect_success 'info --url added-symlink-file' '
- test "$(cd gitwc; git svn info --url added-symlink-file)" \
+ test "$(cd gitwc && git svn info --url added-symlink-file)" \
= "$quoted_svnrepo/added-symlink-file"
'
@@ -237,16 +237,16 @@ test_expect_success 'info added-symlink-directory' "
ln -s added-directory added-symlink-directory &&
svn_cmd add added-symlink-directory > /dev/null
) &&
- (cd svnwc; svn info added-symlink-directory) \
+ (cd svnwc && svn info added-symlink-directory) \
> expected.info-added-symlink-directory &&
- (cd gitwc; git svn info added-symlink-directory) \
+ (cd gitwc && git svn info added-symlink-directory) \
> actual.info-added-symlink-directory &&
test_cmp_info expected.info-added-symlink-directory \
actual.info-added-symlink-directory
"
test_expect_success 'info --url added-symlink-directory' '
- test "$(cd gitwc; git svn info --url added-symlink-directory)" \
+ test "$(cd gitwc && git svn info --url added-symlink-directory)" \
= "$quoted_svnrepo/added-symlink-directory"
'
@@ -259,13 +259,13 @@ test_expect_success 'info deleted-file' "
cd svnwc &&
svn_cmd rm --force file > /dev/null
) &&
- (cd svnwc; svn info file) >expected.info-deleted-file &&
- (cd gitwc; git svn info file) >actual.info-deleted-file &&
+ (cd svnwc && svn info file) >expected.info-deleted-file &&
+ (cd gitwc && git svn info file) >actual.info-deleted-file &&
test_cmp_info expected.info-deleted-file actual.info-deleted-file
"
test_expect_success 'info --url file (deleted)' '
- test "$(cd gitwc; git svn info --url file)" \
+ test "$(cd gitwc && git svn info --url file)" \
= "$quoted_svnrepo/file"
'
@@ -278,13 +278,13 @@ test_expect_success 'info deleted-directory' "
cd svnwc &&
svn_cmd rm --force directory > /dev/null
) &&
- (cd svnwc; svn info directory) >expected.info-deleted-directory &&
- (cd gitwc; git svn info directory) >actual.info-deleted-directory &&
+ (cd svnwc && svn info directory) >expected.info-deleted-directory &&
+ (cd gitwc && git svn info directory) >actual.info-deleted-directory &&
test_cmp_info expected.info-deleted-directory actual.info-deleted-directory
"
test_expect_success 'info --url directory (deleted)' '
- test "$(cd gitwc; git svn info --url directory)" \
+ test "$(cd gitwc && git svn info --url directory)" \
= "$quoted_svnrepo/directory"
'
@@ -297,13 +297,13 @@ test_expect_success 'info deleted-symlink-file' "
cd svnwc &&
svn_cmd rm --force symlink-file > /dev/null
) &&
- (cd svnwc; svn info symlink-file) >expected.info-deleted-symlink-file &&
- (cd gitwc; git svn info symlink-file) >actual.info-deleted-symlink-file &&
+ (cd svnwc && svn info symlink-file) >expected.info-deleted-symlink-file &&
+ (cd gitwc && git svn info symlink-file) >actual.info-deleted-symlink-file &&
test_cmp_info expected.info-deleted-symlink-file actual.info-deleted-symlink-file
"
test_expect_success 'info --url symlink-file (deleted)' '
- test "$(cd gitwc; git svn info --url symlink-file)" \
+ test "$(cd gitwc && git svn info --url symlink-file)" \
= "$quoted_svnrepo/symlink-file"
'
@@ -316,13 +316,13 @@ test_expect_success 'info deleted-symlink-directory' "
cd svnwc &&
svn_cmd rm --force symlink-directory > /dev/null
) &&
- (cd svnwc; svn info symlink-directory) >expected.info-deleted-symlink-directory &&
- (cd gitwc; git svn info symlink-directory) >actual.info-deleted-symlink-directory &&
+ (cd svnwc && svn info symlink-directory) >expected.info-deleted-symlink-directory &&
+ (cd gitwc && git svn info symlink-directory) >actual.info-deleted-symlink-directory &&
test_cmp_info expected.info-deleted-symlink-directory actual.info-deleted-symlink-directory
"
test_expect_success 'info --url symlink-directory (deleted)' '
- test "$(cd gitwc; git svn info --url symlink-directory)" \
+ test "$(cd gitwc && git svn info --url symlink-directory)" \
= "$quoted_svnrepo/symlink-directory"
'
@@ -331,27 +331,27 @@ test_expect_success 'info --url symlink-directory (deleted)' '
test_expect_success 'info unknown-file' "
echo two > gitwc/unknown-file &&
- (cd gitwc; test_must_fail git svn info unknown-file) \
+ (cd gitwc && test_must_fail git svn info unknown-file) \
2> actual.info-unknown-file &&
grep unknown-file actual.info-unknown-file
"
test_expect_success 'info --url unknown-file' '
echo two > gitwc/unknown-file &&
- (cd gitwc; test_must_fail git svn info --url unknown-file) \
+ (cd gitwc && test_must_fail git svn info --url unknown-file) \
2> actual.info-url-unknown-file &&
grep unknown-file actual.info-url-unknown-file
'
test_expect_success 'info unknown-directory' "
mkdir gitwc/unknown-directory svnwc/unknown-directory &&
- (cd gitwc; test_must_fail git svn info unknown-directory) \
+ (cd gitwc && test_must_fail git svn info unknown-directory) \
2> actual.info-unknown-directory &&
grep unknown-directory actual.info-unknown-directory
"
test_expect_success 'info --url unknown-directory' '
- (cd gitwc; test_must_fail git svn info --url unknown-directory) \
+ (cd gitwc && test_must_fail git svn info --url unknown-directory) \
2> actual.info-url-unknown-directory &&
grep unknown-directory actual.info-url-unknown-directory
'
@@ -361,13 +361,13 @@ test_expect_success 'info unknown-symlink-file' "
cd gitwc &&
ln -s unknown-file unknown-symlink-file
) &&
- (cd gitwc; test_must_fail git svn info unknown-symlink-file) \
+ (cd gitwc && test_must_fail git svn info unknown-symlink-file) \
2> actual.info-unknown-symlink-file &&
grep unknown-symlink-file actual.info-unknown-symlink-file
"
test_expect_success 'info --url unknown-symlink-file' '
- (cd gitwc; test_must_fail git svn info --url unknown-symlink-file) \
+ (cd gitwc && test_must_fail git svn info --url unknown-symlink-file) \
2> actual.info-url-unknown-symlink-file &&
grep unknown-symlink-file actual.info-url-unknown-symlink-file
'
@@ -377,13 +377,13 @@ test_expect_success 'info unknown-symlink-directory' "
cd gitwc &&
ln -s unknown-directory unknown-symlink-directory
) &&
- (cd gitwc; test_must_fail git svn info unknown-symlink-directory) \
+ (cd gitwc && test_must_fail git svn info unknown-symlink-directory) \
2> actual.info-unknown-symlink-directory &&
grep unknown-symlink-directory actual.info-unknown-symlink-directory
"
test_expect_success 'info --url unknown-symlink-directory' '
- (cd gitwc; test_must_fail git svn info --url unknown-symlink-directory) \
+ (cd gitwc && test_must_fail git svn info --url unknown-symlink-directory) \
2> actual.info-url-unknown-symlink-directory &&
grep unknown-symlink-directory actual.info-url-unknown-symlink-directory
'
diff --git a/t/t9122-git-svn-author.sh b/t/t9122-git-svn-author.sh
index 30013b7..9e8fe38 100755
--- a/t/t9122-git-svn-author.sh
+++ b/t/t9122-git-svn-author.sh
@@ -7,8 +7,8 @@ test_expect_success 'setup svn repository' '
svn_cmd checkout "$svnrepo" work.svn &&
(
cd work.svn &&
- echo >file
- svn_cmd add file
+ echo >file &&
+ svn_cmd add file &&
svn_cmd commit -m "first commit" file
)
'
@@ -17,7 +17,7 @@ test_expect_success 'interact with it via git svn' '
mkdir work.git &&
(
cd work.git &&
- git svn init "$svnrepo"
+ git svn init "$svnrepo" &&
git svn fetch &&
echo modification >file &&
diff --git a/t/t9129-git-svn-i18n-commitencoding.sh b/t/t9129-git-svn-i18n-commitencoding.sh
index 8dbd647..2c213ae 100755
--- a/t/t9129-git-svn-i18n-commitencoding.sh
+++ b/t/t9129-git-svn-i18n-commitencoding.sh
@@ -51,7 +51,7 @@ do
git add F &&
git commit -a -F "$TEST_DIRECTORY"/t3900/$H.txt &&
E=$(git cat-file commit HEAD | sed -ne "s/^encoding //p") &&
- test "z$E" = "z$H"
+ test "z$E" = "z$H" &&
compare_git_head_with "$TEST_DIRECTORY"/t3900/$H.txt
)
'
diff --git a/t/t9130-git-svn-authors-file.sh b/t/t9130-git-svn-authors-file.sh
index d826285..cb764bc 100755
--- a/t/t9130-git-svn-authors-file.sh
+++ b/t/t9130-git-svn-authors-file.sh
@@ -25,7 +25,7 @@ test_expect_success 'start import with incomplete authors file' '
test_expect_success 'imported 2 revisions successfully' '
(
- cd x
+ cd x &&
git rev-list refs/remotes/git-svn >actual &&
test_line_count = 2 actual &&
git rev-list -1 --pretty=raw refs/remotes/git-svn >actual &&
@@ -42,7 +42,7 @@ EOF
test_expect_success 'continues to import once authors have been added' '
(
- cd x
+ cd x &&
git svn fetch --authors-file=../svn-authors &&
git rev-list refs/remotes/git-svn >actual &&
test_line_count = 4 actual &&
diff --git a/t/t9134-git-svn-ignore-paths.sh b/t/t9134-git-svn-ignore-paths.sh
index 09ff10c..fff49c4 100755
--- a/t/t9134-git-svn-ignore-paths.sh
+++ b/t/t9134-git-svn-ignore-paths.sh
@@ -82,7 +82,7 @@ test_expect_success 'update git svn-cloned repo (option ignore)' '
test_expect_success 'SVN-side change inside of ignored www' '
(
cd s &&
- echo zaq >> www/test_www.txt
+ echo zaq >> www/test_www.txt &&
svn_cmd commit -m "SVN-side change inside of www/test_www.txt" &&
svn_cmd up &&
svn_cmd log -v | fgrep "SVN-side change inside of www/test_www.txt"
@@ -114,8 +114,8 @@ test_expect_success 'update git svn-cloned repo (option ignore)' '
test_expect_success 'SVN-side change in and out of ignored www' '
(
cd s &&
- echo cvf >> www/test_www.txt
- echo ygg >> qqq/test_qqq.txt
+ echo cvf >> www/test_www.txt &&
+ echo ygg >> qqq/test_qqq.txt &&
svn_cmd commit -m "SVN-side change in and out of ignored www" &&
svn_cmd up &&
svn_cmd log -v | fgrep "SVN-side change in and out of ignored www"
diff --git a/t/t9137-git-svn-dcommit-clobber-series.sh b/t/t9137-git-svn-dcommit-clobber-series.sh
index 5fa07a3..067b15b 100755
--- a/t/t9137-git-svn-dcommit-clobber-series.sh
+++ b/t/t9137-git-svn-dcommit-clobber-series.sh
@@ -7,7 +7,7 @@ test_description='git svn dcommit clobber series'
test_expect_success 'initialize repo' '
mkdir import &&
(cd import &&
- awk "BEGIN { for (i = 1; i < 64; i++) { print i } }" > file
+ awk "BEGIN { for (i = 1; i < 64; i++) { print i } }" > file &&
svn_cmd import -m "initial" . "$svnrepo"
) &&
git svn init "$svnrepo" &&
diff --git a/t/t9138-git-svn-authors-prog.sh b/t/t9138-git-svn-authors-prog.sh
index 93ef44f..027b416 100755
--- a/t/t9138-git-svn-authors-prog.sh
+++ b/t/t9138-git-svn-authors-prog.sh
@@ -38,7 +38,7 @@ test_expect_success 'import authors with prog and file' '
test_expect_success 'imported 6 revisions successfully' '
(
- cd x
+ cd x &&
git rev-list refs/remotes/git-svn >actual &&
test_line_count = 6 actual
)
@@ -46,7 +46,7 @@ test_expect_success 'imported 6 revisions successfully' '
test_expect_success 'authors-prog ran correctly' '
(
- cd x
+ cd x &&
git rev-list -1 --pretty=raw refs/remotes/git-svn~1 >actual &&
grep "^author ee-foo <ee-foo@example\.com> " actual &&
git rev-list -1 --pretty=raw refs/remotes/git-svn~2 >actual &&
@@ -62,7 +62,7 @@ test_expect_success 'authors-prog ran correctly' '
test_expect_success 'authors-file overrode authors-prog' '
(
- cd x
+ cd x &&
git rev-list -1 --pretty=raw refs/remotes/git-svn >actual &&
grep "^author FFFFFFF FFFFFFF <fFf@other\.example\.com> " actual
)
diff --git a/t/t9146-git-svn-empty-dirs.sh b/t/t9146-git-svn-empty-dirs.sh
index 6d3130e..5f91c0d 100755
--- a/t/t9146-git-svn-empty-dirs.sh
+++ b/t/t9146-git-svn-empty-dirs.sh
@@ -21,7 +21,7 @@ test_expect_success 'empty directories exist' '
do
if ! test -d "$i"
then
- echo >&2 "$i does not exist"
+ echo >&2 "$i does not exist" &&
exit 1
fi
done
@@ -38,7 +38,7 @@ test_expect_success 'option automkdirs set to false' '
do
if test -d "$i"
then
- echo >&2 "$i exists"
+ echo >&2 "$i exists" &&
exit 1
fi
done
@@ -63,7 +63,7 @@ test_expect_success 'git svn mkdirs recreates empty directories' '
do
if ! test -d "$i"
then
- echo >&2 "$i does not exist"
+ echo >&2 "$i does not exist" &&
exit 1
fi
done
@@ -79,21 +79,21 @@ test_expect_success 'git svn mkdirs -r works' '
do
if ! test -d "$i"
then
- echo >&2 "$i does not exist"
+ echo >&2 "$i does not exist" &&
exit 1
fi
- done
+ done &&
if test -d "! !"
then
- echo >&2 "$i should not exist"
+ echo >&2 "$i should not exist" &&
exit 1
- fi
+ fi &&
git svn mkdirs -r8 &&
if ! test -d "! !"
then
- echo >&2 "$i not exist"
+ echo >&2 "$i not exist" &&
exit 1
fi
)
@@ -115,7 +115,7 @@ test_expect_success 'empty directories in trunk exist' '
do
if ! test -d "$i"
then
- echo >&2 "$i does not exist"
+ echo >&2 "$i does not exist" &&
exit 1
fi
done
@@ -148,7 +148,7 @@ test_expect_success 'git svn gc-ed files work' '
do
if ! test -d "$i"
then
- echo >&2 "$i does not exist"
+ echo >&2 "$i does not exist" &&
exit 1
fi
done
diff --git a/t/t9147-git-svn-include-paths.sh b/t/t9147-git-svn-include-paths.sh
index a90ff58..d292bf9 100755
--- a/t/t9147-git-svn-include-paths.sh
+++ b/t/t9147-git-svn-include-paths.sh
@@ -84,7 +84,7 @@ test_expect_success 'update git svn-cloned repo (option include)' '
test_expect_success 'SVN-side change inside of ignored www' '
(
cd s &&
- echo zaq >> www/test_www.txt
+ echo zaq >> www/test_www.txt &&
svn_cmd commit -m "SVN-side change inside of www/test_www.txt" &&
svn_cmd up &&
svn_cmd log -v | fgrep "SVN-side change inside of www/test_www.txt"
@@ -116,8 +116,8 @@ test_expect_success 'update git svn-cloned repo (option include)' '
test_expect_success 'SVN-side change in and out of included qqq' '
(
cd s &&
- echo cvf >> www/test_www.txt
- echo ygg >> qqq/test_qqq.txt
+ echo cvf >> www/test_www.txt &&
+ echo ygg >> qqq/test_qqq.txt &&
svn_cmd commit -m "SVN-side change in and out of ignored www" &&
svn_cmd up &&
svn_cmd log -v | fgrep "SVN-side change in and out of ignored www"
diff --git a/t/t9152-svn-empty-dirs-after-gc.sh b/t/t9152-svn-empty-dirs-after-gc.sh
index 301e779..89f285d 100755
--- a/t/t9152-svn-empty-dirs-after-gc.sh
+++ b/t/t9152-svn-empty-dirs-after-gc.sh
@@ -30,7 +30,7 @@ test_expect_success 'git svn mkdirs recreates empty directories after git svn gc
do
if ! test -d "$i"
then
- echo >&2 "$i does not exist"
+ echo >&2 "$i does not exist" &&
exit 1
fi
done
diff --git a/t/t9164-git-svn-dcommit-concurrent.sh b/t/t9164-git-svn-dcommit-concurrent.sh
index d8464d4..90346ff 100755
--- a/t/t9164-git-svn-dcommit-concurrent.sh
+++ b/t/t9164-git-svn-dcommit-concurrent.sh
@@ -12,7 +12,7 @@ test_expect_success 'setup svn repository' '
svn_cmd checkout "$svnrepo" work.svn &&
(
cd work.svn &&
- echo >file && echo > auto_updated_file
+ echo >file && echo > auto_updated_file &&
svn_cmd add file auto_updated_file &&
svn_cmd commit -m "initial commit"
) &&
diff --git a/t/t9165-git-svn-fetch-merge-branch-of-branch.sh b/t/t9165-git-svn-fetch-merge-branch-of-branch.sh
index fa3ef3b..a4813c2 100755
--- a/t/t9165-git-svn-fetch-merge-branch-of-branch.sh
+++ b/t/t9165-git-svn-fetch-merge-branch-of-branch.sh
@@ -39,7 +39,7 @@ test_expect_success 'initialize source svn repo' '
svn_cmd commit -m trunk &&
svn_cmd switch "$svnrepo"/branches/branch2 &&
svn_cmd merge "$svnrepo"/trunk &&
- svn_cmd commit -m "merge trunk"
+ svn_cmd commit -m "merge trunk" &&
svn_cmd switch "$svnrepo"/trunk &&
svn_cmd merge --reintegrate "$svnrepo"/branches/branch2 &&
svn_cmd commit -m "merge branch2"
diff --git a/t/t9200-git-cvsexportcommit.sh b/t/t9200-git-cvsexportcommit.sh
index 1319415..cd61288 100755
--- a/t/t9200-git-cvsexportcommit.sh
+++ b/t/t9200-git-cvsexportcommit.sh
@@ -187,7 +187,7 @@ test_expect_success \
git commit -a -m "Update with spaces" &&
id=$(git rev-list --max-count=1 HEAD) &&
(cd "$CVSWORK" &&
- git cvsexportcommit -c $id
+ git cvsexportcommit -c $id &&
check_entries "G g" "with spaces.png/1.2/-kb|with spaces.txt/1.2/"
)'
@@ -245,7 +245,7 @@ test_expect_success FILEMODE \
git add G/off &&
git commit -a -m "Execute test" &&
(cd "$CVSWORK" &&
- git cvsexportcommit -c HEAD
+ git cvsexportcommit -c HEAD &&
test -x G/on &&
! test -x G/off
)'
@@ -303,7 +303,7 @@ test_expect_success 're-commit a removed filename which remains in CVS attic' '
git add attic_gremlin &&
git commit -m "Added attic_gremlin" &&
git cvsexportcommit -w "$CVSWORK" -c HEAD &&
- (cd "$CVSWORK"; cvs -Q update -d) &&
+ (cd "$CVSWORK" && cvs -Q update -d) &&
test -f "$CVSWORK/attic_gremlin"
'
diff --git a/t/t9300-fast-import.sh b/t/t9300-fast-import.sh
index f8f869e..40fe7e4 100755
--- a/t/t9300-fast-import.sh
+++ b/t/t9300-fast-import.sh
@@ -3144,7 +3144,10 @@ background_import_then_checkpoint () {
echo $! >V.pid
# We don't mind if fast-import has already died by the time the test
# ends.
- test_when_finished "exec 8>&-; exec 9>&-; kill $(cat V.pid) || true"
+ test_when_finished "
+ exec 8>&-; exec 9>&-;
+ kill $(cat V.pid) && wait $(cat V.pid)
+ true"
# Start in the background to ensure we adhere strictly to (blocking)
# pipes writing sequence. We want to assume that the write below could
diff --git a/t/t9302-fast-import-unpack-limit.sh b/t/t9302-fast-import-unpack-limit.sh
index a04de14..bb1c39c 100755
--- a/t/t9302-fast-import-unpack-limit.sh
+++ b/t/t9302-fast-import-unpack-limit.sh
@@ -80,7 +80,7 @@ test_expect_success 'lookups after checkpoint works' '
do
if test $n -gt 30
then
- echo >&2 "checkpoint did not update branch"
+ echo >&2 "checkpoint did not update branch" &&
exit 1
else
n=$(($n + 1))
diff --git a/t/t9400-git-cvsserver-server.sh b/t/t9400-git-cvsserver-server.sh
index 0674274..a5e5dca 100755
--- a/t/t9400-git-cvsserver-server.sh
+++ b/t/t9400-git-cvsserver-server.sh
@@ -328,7 +328,7 @@ test_expect_success 'cvs update (subdirectories)' \
'(for dir in A A/B A/B/C A/D E; do
mkdir $dir &&
echo "test file in $dir" >"$dir/file_in_$(echo $dir|sed -e "s#/# #g")" &&
- git add $dir;
+ git add $dir
done) &&
git commit -q -m "deep sub directory structure" &&
git push gitcvs.git >/dev/null &&
@@ -371,7 +371,7 @@ test_expect_success 'cvs update (merge)' \
'echo Line 0 >expected &&
for i in 1 2 3 4 5 6 7
do
- echo Line $i >>merge
+ echo Line $i >>merge &&
echo Line $i >>expected
done &&
echo Line 8 >>expected &&
@@ -382,7 +382,7 @@ test_expect_success 'cvs update (merge)' \
GIT_CONFIG="$git_config" cvs -Q update &&
test "$(echo $(grep merge CVS/Entries|cut -d/ -f2,3,5))" = "merge/1.1/" &&
test_cmp merge ../merge &&
- ( echo Line 0; cat merge ) >merge.tmp &&
+ ( echo Line 0 && cat merge ) >merge.tmp &&
mv merge.tmp merge &&
cd "$WORKDIR" &&
echo Line 8 >>merge &&
@@ -410,7 +410,7 @@ do
done
test_expect_success 'cvs update (conflict merge)' \
- '( echo LINE 0; cat merge ) >merge.tmp &&
+ '( echo LINE 0 && cat merge ) >merge.tmp &&
mv merge.tmp merge &&
git add merge &&
git commit -q -m "Merge test (conflict)" &&
diff --git a/t/t9600-cvsimport.sh b/t/t9600-cvsimport.sh
index 804ce38..5dfee07 100755
--- a/t/t9600-cvsimport.sh
+++ b/t/t9600-cvsimport.sh
@@ -135,7 +135,7 @@ test_expect_success PERL 'second update has correct .git/cvs-revisions' '
(cd module-git &&
git log --format="o_fortuna 1.1 %H" -1 HEAD^^ &&
- git log --format="o_fortuna 1.2 %H" -1 HEAD^
+ git log --format="o_fortuna 1.2 %H" -1 HEAD^ &&
git log --format="tick 1.1 %H" -1 HEAD) > expected &&
test_cmp expected module-git/.git/cvs-revisions
'
diff --git a/t/t9800-git-p4-basic.sh b/t/t9800-git-p4-basic.sh
index 4849edc..729cd25 100755
--- a/t/t9800-git-p4-basic.sh
+++ b/t/t9800-git-p4-basic.sh
@@ -261,6 +261,35 @@ test_expect_success 'unresolvable host in P4PORT should display error' '
)
'
+# Test following scenarios:
+# - Without ".git/hooks/p4-pre-submit" , submit should continue
+# - With the hook returning 0, submit should continue
+# - With the hook returning 1, submit should abort
+test_expect_success 'run hook p4-pre-submit before submit' '
+ test_when_finished cleanup_git &&
+ git p4 clone --dest="$git" //depot &&
+ (
+ cd "$git" &&
+ echo "hello world" >hello.txt &&
+ git add hello.txt &&
+ git commit -m "add hello.txt" &&
+ git config git-p4.skipSubmitEdit true &&
+ git p4 submit --dry-run >out &&
+ grep "Would apply" out &&
+ mkdir -p .git/hooks &&
+ write_script .git/hooks/p4-pre-submit <<-\EOF &&
+ exit 0
+ EOF
+ git p4 submit --dry-run >out &&
+ grep "Would apply" out &&
+ write_script .git/hooks/p4-pre-submit <<-\EOF &&
+ exit 1
+ EOF
+ test_must_fail git p4 submit --dry-run >errs 2>&1 &&
+ ! grep "Would apply" errs
+ )
+'
+
test_expect_success 'submit from detached head' '
test_when_finished cleanup_git &&
git p4 clone --dest="$git" //depot &&
diff --git a/t/t9806-git-p4-options.sh b/t/t9806-git-p4-options.sh
index 1ab76c4..3f5291b 100755
--- a/t/t9806-git-p4-options.sh
+++ b/t/t9806-git-p4-options.sh
@@ -134,7 +134,7 @@ test_expect_success 'clone --changesfile' '
(
cd "$git" &&
git log --oneline p4/master >lines &&
- test_line_count = 2 lines
+ test_line_count = 2 lines &&
test_path_is_file file1 &&
test_path_is_missing file2 &&
test_path_is_file file3
diff --git a/t/t9810-git-p4-rcs.sh b/t/t9810-git-p4-rcs.sh
index 8134ab4..cc53deb 100755
--- a/t/t9810-git-p4-rcs.sh
+++ b/t/t9810-git-p4-rcs.sh
@@ -161,7 +161,7 @@ test_expect_success 'cleanup after failure' '
test_expect_success 'ktext expansion should not expand multi-line $File::' '
(
cd "$cli" &&
- cat >lv.pm <<-\EOF
+ cat >lv.pm <<-\EOF &&
my $wanted = sub { my $f = $File::Find::name;
if ( -f && $f =~ /foo/ ) {
EOF
diff --git a/t/t9811-git-p4-label-import.sh b/t/t9811-git-p4-label-import.sh
index decb66b..602b0a5 100755
--- a/t/t9811-git-p4-label-import.sh
+++ b/t/t9811-git-p4-label-import.sh
@@ -133,7 +133,7 @@ test_expect_success 'export git tags to p4' '
p4 labels ... | grep LIGHTWEIGHT_TAG &&
p4 label -o GIT_TAG_1 | grep "tag created in git:xyzzy" &&
p4 sync ...@GIT_TAG_1 &&
- ! test -f main/f10
+ ! test -f main/f10 &&
p4 sync ...@GIT_TAG_2 &&
test -f main/f10
)
diff --git a/t/t9814-git-p4-rename.sh b/t/t9814-git-p4-rename.sh
index e7e0268..60baa06 100755
--- a/t/t9814-git-p4-rename.sh
+++ b/t/t9814-git-p4-rename.sh
@@ -9,23 +9,11 @@ test_expect_success 'start p4d' '
'
# We rely on this behavior to detect for p4 move availability.
-test_expect_success 'p4 help unknown returns 1' '
+test_expect_success '"p4 help unknown" errors out' '
(
cd "$cli" &&
- (
- p4 help client >errs 2>&1
- echo $? >retval
- )
- echo 0 >expected &&
- test_cmp expected retval &&
- rm retval &&
- (
- p4 help nosuchcommand >errs 2>&1
- echo $? >retval
- )
- echo 1 >expected &&
- test_cmp expected retval &&
- rm retval
+ p4 help client &&
+ ! p4 help nosuchcommand
)
'
diff --git a/t/t9815-git-p4-submit-fail.sh b/t/t9815-git-p4-submit-fail.sh
index 37b42d0..eaf03a6 100755
--- a/t/t9815-git-p4-submit-fail.sh
+++ b/t/t9815-git-p4-submit-fail.sh
@@ -394,7 +394,7 @@ test_expect_success 'cleanup rename after submit cancel' '
(
cd "$cli" &&
test_path_is_missing text2 &&
- p4 fstat -T action text2 2>&1 | grep "no such file"
+ p4 fstat -T action text2 2>&1 | grep "no such file" &&
test_path_is_file text &&
! p4 fstat -T action text
)
diff --git a/t/t9830-git-p4-symlink-dir.sh b/t/t9830-git-p4-symlink-dir.sh
index 3dc528b..2ad1b08 100755
--- a/t/t9830-git-p4-symlink-dir.sh
+++ b/t/t9830-git-p4-symlink-dir.sh
@@ -30,7 +30,7 @@ test_expect_success 'symlinked directory' '
(
cd "$cli" &&
p4 sync &&
- test -L some/sub/directory/subdir2
+ test -L some/sub/directory/subdir2 &&
test_path_is_file some/sub/directory/subdir2/file.t
)
diff --git a/t/t9831-git-p4-triggers.sh b/t/t9831-git-p4-triggers.sh
index bbcf14c..be44c97 100755
--- a/t/t9831-git-p4-triggers.sh
+++ b/t/t9831-git-p4-triggers.sh
@@ -13,7 +13,7 @@ test_expect_success 'init depot' '
cd "$cli" &&
echo file1 >file1 &&
p4 add file1 &&
- p4 submit -d "change 1"
+ p4 submit -d "change 1" &&
echo file2 >file2 &&
p4 add file2 &&
p4 submit -d "change 2"
diff --git a/t/t9902-completion.sh b/t/t9902-completion.sh
index 3b3a7b6..5ff43b9 100755
--- a/t/t9902-completion.sh
+++ b/t/t9902-completion.sh
@@ -1103,7 +1103,7 @@ test_expect_success '__git_complete_refs - remote' '
master-in-other Z
EOF
(
- cur=
+ cur= &&
__git_complete_refs --remote=other &&
print_comp
) &&
@@ -1122,7 +1122,7 @@ test_expect_success '__git_complete_refs - track' '
master-in-other Z
EOF
(
- cur=
+ cur= &&
__git_complete_refs --track &&
print_comp
) &&
diff --git a/t/t9903-bash-prompt.sh b/t/t9903-bash-prompt.sh
index c3b89ae..0444068 100755
--- a/t/t9903-bash-prompt.sh
+++ b/t/t9903-bash-prompt.sh
@@ -529,7 +529,7 @@ test_expect_success 'prompt - bash color pc mode - branch name' '
printf "BEFORE: (${c_green}\${__git_ps1_branch_name}${c_clear}):AFTER\\nmaster" >expected &&
(
GIT_PS1_SHOWCOLORHINTS=y &&
- __git_ps1 "BEFORE:" ":AFTER" >"$actual"
+ __git_ps1 "BEFORE:" ":AFTER" >"$actual" &&
printf "%s\\n%s" "$PS1" "${__git_ps1_branch_name}" >"$actual"
) &&
test_cmp expected "$actual"
diff --git a/t/test-lib.sh b/t/test-lib.sh
index 2831570..78f7097 100644
--- a/t/test-lib.sh
+++ b/t/test-lib.sh
@@ -675,7 +675,8 @@ test_run_ () {
trace=
# 117 is magic because it is unlikely to match the exit
# code of other programs
- if test "OK-117" != "$(test_eval_ "(exit 117) && $1${LF}${LF}echo OK-\$?" 3>&1)"
+ if $(printf '%s\n' "$1" | sed -f "$GIT_BUILD_DIR/t/chainlint.sed" | grep -q '?![A-Z][A-Z]*?!') ||
+ test "OK-117" != "$(test_eval_ "(exit 117) && $1${LF}${LF}echo OK-\$?" 3>&1)"
then
error "bug in the test script: broken &&-chain or run-away HERE-DOC: $1"
fi
diff --git a/tag.c b/tag.c
index 3be7206..1db663d 100644
--- a/tag.c
+++ b/tag.c
@@ -6,6 +6,7 @@
#include "blob.h"
#include "alloc.h"
#include "gpg-interface.h"
+#include "packfile.h"
const char *tag_type = "tag";
@@ -64,14 +65,20 @@ int gpg_verify_tag(const struct object_id *oid, const char *name_to_report,
return ret;
}
-struct object *deref_tag(struct object *o, const char *warn, int warnlen)
+struct object *deref_tag(struct repository *r, struct object *o, const char *warn, int warnlen)
{
+ struct object_id *last_oid = NULL;
while (o && o->type == OBJ_TAG)
- if (((struct tag *)o)->tagged)
- o = parse_object(&((struct tag *)o)->tagged->oid);
- else
+ if (((struct tag *)o)->tagged) {
+ last_oid = &((struct tag *)o)->tagged->oid;
+ o = parse_object(r, last_oid);
+ } else {
+ last_oid = NULL;
o = NULL;
+ }
if (!o && warn) {
+ if (last_oid && is_promisor_object(last_oid))
+ return NULL;
if (!warnlen)
warnlen = strlen(warn);
error("missing object referenced by '%.*s'", warnlen, warn);
@@ -82,7 +89,7 @@ struct object *deref_tag(struct object *o, const char *warn, int warnlen)
struct object *deref_tag_noverify(struct object *o)
{
while (o && o->type == OBJ_TAG) {
- o = parse_object(&o->oid);
+ o = parse_object(the_repository, &o->oid);
if (o && o->type == OBJ_TAG && ((struct tag *)o)->tagged)
o = ((struct tag *)o)->tagged;
else
@@ -91,13 +98,13 @@ struct object *deref_tag_noverify(struct object *o)
return o;
}
-struct tag *lookup_tag(const struct object_id *oid)
+struct tag *lookup_tag(struct repository *r, const struct object_id *oid)
{
- struct object *obj = lookup_object(oid->hash);
+ struct object *obj = lookup_object(r, oid->hash);
if (!obj)
- return create_object(the_repository, oid->hash,
- alloc_tag_node(the_repository));
- return object_as_type(obj, OBJ_TAG, 0);
+ return create_object(r, oid->hash,
+ alloc_tag_node(r));
+ return object_as_type(r, obj, OBJ_TAG, 0);
}
static timestamp_t parse_tag_date(const char *buf, const char *tail)
@@ -125,7 +132,7 @@ void release_tag_memory(struct tag *t)
t->date = 0;
}
-int parse_tag_buffer(struct tag *item, const void *data, unsigned long size)
+int parse_tag_buffer(struct repository *r, struct tag *item, const void *data, unsigned long size)
{
struct object_id oid;
char type[20];
@@ -153,13 +160,13 @@ int parse_tag_buffer(struct tag *item, const void *data, unsigned long size)
bufptr = nl + 1;
if (!strcmp(type, blob_type)) {
- item->tagged = (struct object *)lookup_blob(&oid);
+ item->tagged = (struct object *)lookup_blob(r, &oid);
} else if (!strcmp(type, tree_type)) {
- item->tagged = (struct object *)lookup_tree(&oid);
+ item->tagged = (struct object *)lookup_tree(r, &oid);
} else if (!strcmp(type, commit_type)) {
- item->tagged = (struct object *)lookup_commit(&oid);
+ item->tagged = (struct object *)lookup_commit(r, &oid);
} else if (!strcmp(type, tag_type)) {
- item->tagged = (struct object *)lookup_tag(&oid);
+ item->tagged = (struct object *)lookup_tag(r, &oid);
} else {
error("Unknown type %s", type);
item->tagged = NULL;
@@ -202,7 +209,7 @@ int parse_tag(struct tag *item)
return error("Object %s not a tag",
oid_to_hex(&item->object.oid));
}
- ret = parse_tag_buffer(item, data, size);
+ ret = parse_tag_buffer(the_repository, item, data, size);
free(data);
return ret;
}
diff --git a/tag.h b/tag.h
index 9057d76..e669c3e 100644
--- a/tag.h
+++ b/tag.h
@@ -11,12 +11,11 @@ struct tag {
char *tag;
timestamp_t date;
};
-
-extern struct tag *lookup_tag(const struct object_id *oid);
-extern int parse_tag_buffer(struct tag *item, const void *data, unsigned long size);
+extern struct tag *lookup_tag(struct repository *r, const struct object_id *oid);
+extern int parse_tag_buffer(struct repository *r, struct tag *item, const void *data, unsigned long size);
extern int parse_tag(struct tag *item);
extern void release_tag_memory(struct tag *t);
-extern struct object *deref_tag(struct object *, const char *, int);
+extern struct object *deref_tag(struct repository *r, struct object *, const char *, int);
extern struct object *deref_tag_noverify(struct object *);
extern int gpg_verify_tag(const struct object_id *oid,
const char *name_to_report, unsigned flags);
diff --git a/transport-helper.c b/transport-helper.c
index 8b5abca..143ca00 100644
--- a/transport-helper.c
+++ b/transport-helper.c
@@ -48,7 +48,7 @@ static void sendline(struct helper_data *helper, struct strbuf *buffer)
if (debug)
fprintf(stderr, "Debug: Remote helper: -> %s", buffer->buf);
if (write_in_full(helper->helper->in, buffer->buf, buffer->len) < 0)
- die_errno("Full write to remote helper failed");
+ die_errno(_("full write to remote helper failed"));
}
static int recvline_fh(FILE *helper, struct strbuf *buffer)
@@ -77,7 +77,7 @@ static void write_constant(int fd, const char *str)
if (debug)
fprintf(stderr, "Debug: Remote helper: -> %s", str);
if (write_in_full(fd, str, strlen(str)) < 0)
- die_errno("Full write to remote helper failed");
+ die_errno(_("full write to remote helper failed"));
}
static const char *remove_ext_force(const char *url)
@@ -129,7 +129,7 @@ static struct child_process *get_helper(struct transport *transport)
code = start_command(helper);
if (code < 0 && errno == ENOENT)
- die("Unable to find remote helper for '%s'", data->name);
+ die(_("unable to find remote helper for '%s'"), data->name);
else if (code != 0)
exit(code);
@@ -145,7 +145,7 @@ static struct child_process *get_helper(struct transport *transport)
*/
duped = dup(helper->out);
if (duped < 0)
- die_errno("Can't dup helper output fd");
+ die_errno(_("can't dup helper output fd"));
data->out = xfdopen(duped, "r");
write_constant(helper->in, "capabilities\n");
@@ -196,13 +196,13 @@ static struct child_process *get_helper(struct transport *transport)
} else if (starts_with(capname, "no-private-update")) {
data->no_private_update = 1;
} else if (mandatory) {
- die("Unknown mandatory capability %s. This remote "
- "helper probably needs newer version of Git.",
+ die(_("unknown mandatory capability %s; this remote "
+ "helper probably needs newer version of Git"),
capname);
}
}
if (!data->rs.nr && (data->import || data->bidi_import || data->export)) {
- warning("This remote helper should implement refspec capability.");
+ warning(_("this remote helper should implement refspec capability"));
}
strbuf_release(&buf);
if (debug)
@@ -269,7 +269,7 @@ static int strbuf_set_helper_option(struct helper_data *data,
else if (!strcmp(buf->buf, "unsupported"))
ret = 1;
else {
- warning("%s unexpectedly said: '%s'", data->name, buf->buf);
+ warning(_("%s unexpectedly said: '%s'"), data->name, buf->buf);
ret = 1;
}
return ret;
@@ -398,7 +398,7 @@ static int fetch_with_fetch(struct transport *transport,
if (starts_with(buf.buf, "lock ")) {
const char *name = buf.buf + 5;
if (transport->pack_lockfile)
- warning("%s also locked %s", data->name, name);
+ warning(_("%s also locked %s"), data->name, name);
else
transport->pack_lockfile = xstrdup(name);
}
@@ -409,7 +409,7 @@ static int fetch_with_fetch(struct transport *transport,
else if (!buf.len)
break;
else
- warning("%s unexpectedly said: '%s'", data->name, buf.buf);
+ warning(_("%s unexpectedly said: '%s'"), data->name, buf.buf);
}
strbuf_release(&buf);
return 0;
@@ -476,7 +476,7 @@ static int fetch_with_import(struct transport *transport,
get_helper(transport);
if (get_importer(transport, &fastimport))
- die("Couldn't run fast-import");
+ die(_("couldn't run fast-import"));
for (i = 0; i < nr_heads; i++) {
posn = to_fetch[i];
@@ -499,7 +499,7 @@ static int fetch_with_import(struct transport *transport,
*/
if (finish_command(&fastimport))
- die("Error while running fast-import");
+ die(_("error while running fast-import"));
/*
* The fast-import stream of a remote helper that advertises
@@ -528,7 +528,7 @@ static int fetch_with_import(struct transport *transport,
private = xstrdup(name);
if (private) {
if (read_ref(private, &posn->old_oid) < 0)
- die("Could not read ref %s", private);
+ die(_("could not read ref %s"), private);
free(private);
}
}
@@ -554,7 +554,7 @@ static int run_connect(struct transport *transport, struct strbuf *cmdbuf)
*/
duped = dup(helper->out);
if (duped < 0)
- die_errno("Can't dup helper output fd");
+ die_errno(_("can't dup helper output fd"));
input = xfdopen(duped, "r");
setvbuf(input, NULL, _IONBF, 0);
@@ -573,8 +573,8 @@ static int run_connect(struct transport *transport, struct strbuf *cmdbuf)
fprintf(stderr, "Debug: Falling back to dumb "
"transport.\n");
} else {
- die("Unknown response to connect: %s",
- cmdbuf->buf);
+ die(_(_("unknown response to connect: %s")),
+ cmdbuf->buf);
}
fclose(input);
@@ -595,9 +595,9 @@ static int process_connect_service(struct transport *transport,
if (strcmp(name, exec)) {
int r = set_helper_option(transport, "servpath", exec);
if (r > 0)
- warning("Setting remote service path not supported by protocol.");
+ warning(_("setting remote service path not supported by protocol"));
else if (r < 0)
- warning("Invalid remote service path.");
+ warning(_("invalid remote service path"));
}
if (data->connect) {
@@ -640,10 +640,10 @@ static int connect_helper(struct transport *transport, const char *name,
/* Get_helper so connect is inited. */
get_helper(transport);
if (!data->connect)
- die("Operation not supported by protocol.");
+ die(_("operation not supported by protocol"));
if (!process_connect_service(transport, name, exec))
- die("Can't connect to subservice %s.", name);
+ die(_("can't connect to subservice %s"), name);
fd[0] = data->helper->out;
fd[1] = data->helper->in;
@@ -651,16 +651,14 @@ static int connect_helper(struct transport *transport, const char *name,
}
static int fetch(struct transport *transport,
- int nr_heads, struct ref **to_fetch,
- struct ref **fetched_refs)
+ int nr_heads, struct ref **to_fetch)
{
struct helper_data *data = transport->data;
int i, count;
if (process_connect(transport, 0)) {
do_take_over(transport);
- return transport->vtable->fetch(transport, nr_heads, to_fetch,
- fetched_refs);
+ return transport->vtable->fetch(transport, nr_heads, to_fetch);
}
count = 0;
@@ -686,6 +684,9 @@ static int fetch(struct transport *transport,
transport, "filter",
data->transport_options.filter_options.filter_spec);
+ if (data->transport_options.negotiation_tips)
+ warning("Ignoring --negotiation-tip because the protocol does not support it.");
+
if (data->fetch)
return fetch_with_fetch(transport, nr_heads, to_fetch);
@@ -709,7 +710,7 @@ static int push_update_ref_status(struct strbuf *buf,
status = REF_STATUS_REMOTE_REJECT;
refname = buf->buf + 6;
} else
- die("expected ok/error, helper said '%s'", buf->buf);
+ die(_("expected ok/error, helper said '%s'"), buf->buf);
msg = strchr(refname, ' ');
if (msg) {
@@ -762,7 +763,7 @@ static int push_update_ref_status(struct strbuf *buf,
if (!*ref)
*ref = find_ref_by_name(remote_refs, refname);
if (!*ref) {
- warning("helper reported unexpected status of %s", refname);
+ warning(_("helper reported unexpected status of %s"), refname);
return 1;
}
@@ -823,20 +824,20 @@ static void set_common_push_options(struct transport *transport,
{
if (flags & TRANSPORT_PUSH_DRY_RUN) {
if (set_helper_option(transport, "dry-run", "true") != 0)
- die("helper %s does not support dry-run", name);
+ die(_("helper %s does not support dry-run"), name);
} else if (flags & TRANSPORT_PUSH_CERT_ALWAYS) {
if (set_helper_option(transport, TRANS_OPT_PUSH_CERT, "true") != 0)
- die("helper %s does not support --signed", name);
+ die(_("helper %s does not support --signed"), name);
} else if (flags & TRANSPORT_PUSH_CERT_IF_ASKED) {
if (set_helper_option(transport, TRANS_OPT_PUSH_CERT, "if-asked") != 0)
- die("helper %s does not support --signed=if-asked", name);
+ die(_("helper %s does not support --signed=if-asked"), name);
}
if (flags & TRANSPORT_PUSH_OPTIONS) {
struct string_list_item *item;
for_each_string_list_item(item, transport->push_options)
if (set_helper_option(transport, "push-option", item->string) != 0)
- die("helper %s does not support 'push-option'", name);
+ die(_("helper %s does not support 'push-option'"), name);
}
}
@@ -928,12 +929,12 @@ static int push_refs_with_export(struct transport *transport,
struct strbuf buf = STRBUF_INIT;
if (!data->rs.nr)
- die("remote-helper doesn't support push; refspec needed");
+ die(_("remote-helper doesn't support push; refspec needed"));
set_common_push_options(transport, data->name, flags);
if (flags & TRANSPORT_PUSH_FORCE) {
if (set_helper_option(transport, "force", "true") != 0)
- warning("helper %s does not support 'force'", data->name);
+ warning(_("helper %s does not support 'force'"), data->name);
}
helper = get_helper(transport);
@@ -980,12 +981,12 @@ static int push_refs_with_export(struct transport *transport,
}
if (get_exporter(transport, &exporter, &revlist_args))
- die("Couldn't run fast-export");
+ die(_("couldn't run fast-export"));
string_list_clear(&revlist_args, 1);
if (finish_command(&exporter))
- die("Error while running fast-export");
+ die(_("error while running fast-export"));
if (push_update_refs_status(data, remote_refs, flags))
return 1;
@@ -1009,8 +1010,9 @@ static int push_refs(struct transport *transport,
}
if (!remote_refs) {
- fprintf(stderr, "No refs in common and none specified; doing nothing.\n"
- "Perhaps you should specify a branch such as 'master'.\n");
+ fprintf(stderr,
+ _("No refs in common and none specified; doing nothing.\n"
+ "Perhaps you should specify a branch such as 'master'.\n"));
return 0;
}
@@ -1072,7 +1074,7 @@ static struct ref *get_refs_list(struct transport *transport, int for_push,
eov = strchr(buf.buf, ' ');
if (!eov)
- die("Malformed response in ref list: %s", buf.buf);
+ die(_("malformed response in ref list: %s"), buf.buf);
eon = strchr(eov + 1, ' ');
*eov = '\0';
if (eon)
@@ -1086,7 +1088,7 @@ static struct ref *get_refs_list(struct transport *transport, int for_push,
if (has_attribute(eon + 1, "unchanged")) {
(*tail)->status |= REF_STATUS_UPTODATE;
if (read_ref((*tail)->name, &(*tail)->old_oid) < 0)
- die(_("Could not read ref %s"),
+ die(_("could not read ref %s"),
(*tail)->name);
}
}
@@ -1225,7 +1227,7 @@ static int udt_do_read(struct unidirectional_transfer *t)
bytes = read(t->src, t->buf + t->bufuse, BUFFERSIZE - t->bufuse);
if (bytes < 0 && errno != EWOULDBLOCK && errno != EAGAIN &&
errno != EINTR) {
- error_errno("read(%s) failed", t->src_name);
+ error_errno(_("read(%s) failed"), t->src_name);
return -1;
} else if (bytes == 0) {
transfer_debug("%s EOF (with %i bytes in buffer)",
@@ -1252,7 +1254,7 @@ static int udt_do_write(struct unidirectional_transfer *t)
transfer_debug("%s is writable", t->dest_name);
bytes = xwrite(t->dest, t->buf, t->bufuse);
if (bytes < 0 && errno != EWOULDBLOCK) {
- error_errno("write(%s) failed", t->dest_name);
+ error_errno(_("write(%s) failed"), t->dest_name);
return -1;
} else if (bytes > 0) {
t->bufuse -= bytes;
@@ -1301,11 +1303,11 @@ static int tloop_join(pthread_t thread, const char *name)
void *tret;
err = pthread_join(thread, &tret);
if (!tret) {
- error("%s thread failed", name);
+ error(_("%s thread failed"), name);
return 1;
}
if (err) {
- error("%s thread failed to join: %s", name, strerror(err));
+ error(_("%s thread failed to join: %s"), name, strerror(err));
return 1;
}
return 0;
@@ -1324,11 +1326,11 @@ static int tloop_spawnwait_tasks(struct bidirectional_transfer_state *s)
err = pthread_create(&gtp_thread, NULL, udt_copy_task_routine,
&s->gtp);
if (err)
- die("Can't start thread for copying data: %s", strerror(err));
+ die(_("can't start thread for copying data: %s"), strerror(err));
err = pthread_create(&ptg_thread, NULL, udt_copy_task_routine,
&s->ptg);
if (err)
- die("Can't start thread for copying data: %s", strerror(err));
+ die(_("can't start thread for copying data: %s"), strerror(err));
ret |= tloop_join(gtp_thread, "Git to program copy");
ret |= tloop_join(ptg_thread, "Program to git copy");
@@ -1365,11 +1367,11 @@ static int tloop_join(pid_t pid, const char *name)
{
int tret;
if (waitpid(pid, &tret, 0) < 0) {
- error_errno("%s process failed to wait", name);
+ error_errno(_("%s process failed to wait"), name);
return 1;
}
if (!WIFEXITED(tret) || WEXITSTATUS(tret)) {
- error("%s process failed", name);
+ error(_("%s process failed"), name);
return 1;
}
return 0;
@@ -1387,7 +1389,7 @@ static int tloop_spawnwait_tasks(struct bidirectional_transfer_state *s)
/* Fork thread #1: git to program. */
pid1 = fork();
if (pid1 < 0)
- die_errno("Can't start thread for copying data");
+ die_errno(_("can't start thread for copying data"));
else if (pid1 == 0) {
udt_kill_transfer(&s->ptg);
exit(udt_copy_task_routine(&s->gtp) ? 0 : 1);
@@ -1396,7 +1398,7 @@ static int tloop_spawnwait_tasks(struct bidirectional_transfer_state *s)
/* Fork thread #2: program to git. */
pid2 = fork();
if (pid2 < 0)
- die_errno("Can't start thread for copying data");
+ die_errno(_("can't start thread for copying data"));
else if (pid2 == 0) {
udt_kill_transfer(&s->gtp);
exit(udt_copy_task_routine(&s->ptg) ? 0 : 1);
diff --git a/transport-internal.h b/transport-internal.h
index eeb6c34..1cde625 100644
--- a/transport-internal.h
+++ b/transport-internal.h
@@ -36,18 +36,11 @@ struct transport_vtable {
* Fetch the objects for the given refs. Note that this gets
* an array, and should ignore the list structure.
*
- * The transport *may* provide, in fetched_refs, the list of refs that
- * it fetched. If the transport knows anything about the fetched refs
- * that the caller does not know (for example, shallow status), it
- * should provide that list of refs and include that information in the
- * list.
- *
* If the transport did not get hashes for refs in
* get_refs_list(), it should set the old_sha1 fields in the
* provided refs now.
**/
- int (*fetch)(struct transport *transport, int refs_nr, struct ref **refs,
- struct ref **fetched_refs);
+ int (*fetch)(struct transport *transport, int refs_nr, struct ref **refs);
/**
* Push the objects and refs. Send the necessary objects, and
diff --git a/transport.c b/transport.c
index fdd813f..06ffea2 100644
--- a/transport.c
+++ b/transport.c
@@ -139,7 +139,7 @@ static struct ref *get_refs_from_bundle(struct transport *transport,
close(data->fd);
data->fd = read_bundle_header(transport->url, &data->header);
if (data->fd < 0)
- die ("Could not read bundle '%s'.", transport->url);
+ die(_("could not read bundle '%s'"), transport->url);
for (i = 0; i < data->header.references.nr; i++) {
struct ref_list_entry *e = data->header.references.list + i;
struct ref *ref = alloc_ref(e->name);
@@ -151,8 +151,7 @@ static struct ref *get_refs_from_bundle(struct transport *transport,
}
static int fetch_refs_from_bundle(struct transport *transport,
- int nr_heads, struct ref **to_fetch,
- struct ref **fetched_refs)
+ int nr_heads, struct ref **to_fetch)
{
struct bundle_transport_data *data = transport->data;
return unbundle(&data->header, data->fd,
@@ -288,8 +287,7 @@ static struct ref *get_refs_via_connect(struct transport *transport, int for_pus
}
static int fetch_refs_via_pack(struct transport *transport,
- int nr_heads, struct ref **to_fetch,
- struct ref **fetched_refs)
+ int nr_heads, struct ref **to_fetch)
{
int ret = 0;
struct git_transport_data *data = transport->data;
@@ -320,6 +318,7 @@ static int fetch_refs_via_pack(struct transport *transport,
args.filter_options = data->options.filter_options;
args.stateless_rpc = transport->stateless_rpc;
args.server_options = transport->server_options;
+ args.negotiation_tips = data->options.negotiation_tips;
if (!data->got_remote_heads)
refs_tmp = get_refs_via_connect(transport, 0, NULL);
@@ -357,12 +356,8 @@ static int fetch_refs_via_pack(struct transport *transport,
if (report_unmatched_refs(to_fetch, nr_heads))
ret = -1;
- if (fetched_refs)
- *fetched_refs = refs;
- else
- free_refs(refs);
-
free_refs(refs_tmp);
+ free_refs(refs);
free(dest);
return ret;
}
@@ -661,7 +656,7 @@ static int git_transport_push(struct transport *transport, struct ref *remote_re
switch (data->version) {
case protocol_v2:
- die("support for protocol v2 not implemented yet");
+ die(_("support for protocol v2 not implemented yet"));
break;
case protocol_v1:
case protocol_v0:
@@ -787,7 +782,7 @@ static enum protocol_allow_config parse_protocol_config(const char *key,
else if (!strcasecmp(value, "user"))
return PROTOCOL_ALLOW_USER_ONLY;
- die("unknown value for config '%s': %s", key, value);
+ die(_("unknown value for config '%s': %s"), key, value);
}
static enum protocol_allow_config get_protocol_config(const char *type)
@@ -853,7 +848,7 @@ int is_transport_allowed(const char *type, int from_user)
void transport_check_allowed(const char *type)
{
if (!is_transport_allowed(type, -1))
- die("transport '%s' not allowed", type);
+ die(_("transport '%s' not allowed"), type);
}
static struct transport_vtable bundle_vtable = {
@@ -882,7 +877,7 @@ struct transport *transport_get(struct remote *remote, const char *url)
ret->progress = isatty(2);
if (!remote)
- die("No remote provided to transport_get()");
+ BUG("No remote provided to transport_get()");
ret->got_remote_refs = 0;
ret->remote = remote;
@@ -905,7 +900,7 @@ struct transport *transport_get(struct remote *remote, const char *url)
if (helper) {
transport_helper_init(ret, helper);
} else if (starts_with(url, "rsync:")) {
- die("git-over-rsync is no longer supported");
+ die(_("git-over-rsync is no longer supported"));
} else if (url_is_local_not_ssh(url) && is_file(url) && is_bundle(url, 1)) {
struct bundle_transport_data *data = xcalloc(1, sizeof(*data));
transport_check_allowed("file");
@@ -1150,7 +1145,7 @@ int transport_push(struct transport *transport,
transport->push_options,
pretend)) {
oid_array_clear(&commits);
- die("Failed to push all needed submodules!");
+ die(_("failed to push all needed submodules"));
}
oid_array_clear(&commits);
}
@@ -1222,31 +1217,19 @@ const struct ref *transport_get_remote_refs(struct transport *transport,
return transport->remote_refs;
}
-int transport_fetch_refs(struct transport *transport, struct ref *refs,
- struct ref **fetched_refs)
+int transport_fetch_refs(struct transport *transport, struct ref *refs)
{
int rc;
int nr_heads = 0, nr_alloc = 0, nr_refs = 0;
struct ref **heads = NULL;
- struct ref *nop_head = NULL, **nop_tail = &nop_head;
struct ref *rm;
for (rm = refs; rm; rm = rm->next) {
nr_refs++;
if (rm->peer_ref &&
!is_null_oid(&rm->old_oid) &&
- !oidcmp(&rm->peer_ref->old_oid, &rm->old_oid)) {
- /*
- * These need to be reported as fetched, but we don't
- * actually need to fetch them.
- */
- if (fetched_refs) {
- struct ref *nop_ref = copy_ref(rm);
- *nop_tail = nop_ref;
- nop_tail = &nop_ref->next;
- }
+ !oidcmp(&rm->peer_ref->old_oid, &rm->old_oid))
continue;
- }
ALLOC_GROW(heads, nr_heads + 1, nr_alloc);
heads[nr_heads++] = rm;
}
@@ -1264,11 +1247,7 @@ int transport_fetch_refs(struct transport *transport, struct ref *refs,
heads[nr_heads++] = rm;
}
- rc = transport->vtable->fetch(transport, nr_heads, heads, fetched_refs);
- if (fetched_refs && nop_head) {
- *nop_tail = *fetched_refs;
- *fetched_refs = nop_head;
- }
+ rc = transport->vtable->fetch(transport, nr_heads, heads);
free(heads);
return rc;
@@ -1288,7 +1267,7 @@ int transport_connect(struct transport *transport, const char *name,
if (transport->vtable->connect)
return transport->vtable->connect(transport, name, exec, fd);
else
- die("Operation not supported by protocol");
+ die(_("operation not supported by protocol"));
}
int transport_disconnect(struct transport *transport)
@@ -1370,7 +1349,7 @@ static void read_alternate_refs(const char *path,
if (get_oid_hex(line.buf, &oid) ||
line.buf[GIT_SHA1_HEXSZ] != ' ') {
- warning("invalid line while parsing alternate refs: %s",
+ warning(_("invalid line while parsing alternate refs: %s"),
line.buf);
break;
}
diff --git a/transport.h b/transport.h
index 7a9a7fc..01e717c 100644
--- a/transport.h
+++ b/transport.h
@@ -36,6 +36,16 @@ struct git_transport_options {
const char *receivepack;
struct push_cas_option *cas;
struct list_objects_filter_options filter_options;
+
+ /*
+ * This is only used during fetch. See the documentation of
+ * negotiation_tips in struct fetch_pack_args.
+ *
+ * This field is only supported by transports that support connect or
+ * stateless_connect. Set this field directly instead of using
+ * transport_set_option().
+ */
+ struct oid_array *negotiation_tips;
};
enum transport_family {
@@ -229,8 +239,7 @@ int transport_push(struct transport *connection,
const struct ref *transport_get_remote_refs(struct transport *transport,
const struct argv_array *ref_prefixes);
-int transport_fetch_refs(struct transport *transport, struct ref *refs,
- struct ref **fetched_refs);
+int transport_fetch_refs(struct transport *transport, struct ref *refs);
void transport_unlock_pack(struct transport *transport);
int transport_disconnect(struct transport *transport);
char *transport_anonymize_url(const char *url);
diff --git a/tree-walk.c b/tree-walk.c
index ecd6e53..77b37f3 100644
--- a/tree-walk.c
+++ b/tree-walk.c
@@ -27,8 +27,9 @@ static int decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned l
{
const char *path;
unsigned int mode, len;
+ const unsigned hashsz = the_hash_algo->rawsz;
- if (size < 23 || buf[size - 21]) {
+ if (size < hashsz + 3 || buf[size - (hashsz + 1)]) {
strbuf_addstr(err, _("too-short tree object"));
return -1;
}
diff --git a/tree.c b/tree.c
index bc7e990..215d3fd 100644
--- a/tree.c
+++ b/tree.c
@@ -8,6 +8,7 @@
#include "tag.h"
#include "alloc.h"
#include "tree-walk.h"
+#include "repository.h"
const char *tree_type = "tree";
@@ -18,15 +19,13 @@ static int read_one_entry_opt(struct index_state *istate,
unsigned mode, int stage, int opt)
{
int len;
- unsigned int size;
struct cache_entry *ce;
if (S_ISDIR(mode))
return READ_TREE_RECURSIVE;
len = strlen(pathname);
- size = cache_entry_size(baselen + len);
- ce = xcalloc(1, size);
+ ce = make_empty_cache_entry(istate, baselen + len);
ce->ce_mode = create_ce_mode(mode);
ce->ce_flags = create_ce_flags(stage);
@@ -100,7 +99,7 @@ static int read_tree_1(struct tree *tree, struct strbuf *base,
else if (S_ISGITLINK(entry.mode)) {
struct commit *commit;
- commit = lookup_commit(entry.oid);
+ commit = lookup_commit(the_repository, entry.oid);
if (!commit)
die("Commit %s in submodule path %s%s not found",
oid_to_hex(entry.oid),
@@ -119,7 +118,7 @@ static int read_tree_1(struct tree *tree, struct strbuf *base,
len = tree_entry_len(&entry);
strbuf_add(base, entry.path, len);
strbuf_addch(base, '/');
- retval = read_tree_1(lookup_tree(&oid),
+ retval = read_tree_1(lookup_tree(the_repository, &oid),
base, stage, pathspec,
fn, context);
strbuf_setlen(base, oldlen);
@@ -194,13 +193,13 @@ int read_tree(struct tree *tree, int stage, struct pathspec *match,
return 0;
}
-struct tree *lookup_tree(const struct object_id *oid)
+struct tree *lookup_tree(struct repository *r, const struct object_id *oid)
{
- struct object *obj = lookup_object(oid->hash);
+ struct object *obj = lookup_object(r, oid->hash);
if (!obj)
- return create_object(the_repository, oid->hash,
- alloc_tree_node(the_repository));
- return object_as_type(obj, OBJ_TREE, 0);
+ return create_object(r, oid->hash,
+ alloc_tree_node(r));
+ return object_as_type(r, obj, OBJ_TREE, 0);
}
int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size)
@@ -244,7 +243,7 @@ void free_tree_buffer(struct tree *tree)
struct tree *parse_tree_indirect(const struct object_id *oid)
{
- struct object *obj = parse_object(oid);
+ struct object *obj = parse_object(the_repository, oid);
do {
if (!obj)
return NULL;
@@ -257,6 +256,6 @@ struct tree *parse_tree_indirect(const struct object_id *oid)
else
return NULL;
if (!obj->parsed)
- parse_object(&obj->oid);
+ parse_object(the_repository, &obj->oid);
} while (1);
}
diff --git a/tree.h b/tree.h
index e2a80be..d4807dc 100644
--- a/tree.h
+++ b/tree.h
@@ -12,7 +12,7 @@ struct tree {
unsigned long size;
};
-struct tree *lookup_tree(const struct object_id *oid);
+struct tree *lookup_tree(struct repository *r, const struct object_id *oid);
int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size);
diff --git a/unpack-trees.c b/unpack-trees.c
index cd0680f..f9efee08 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -204,20 +204,11 @@ static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce,
ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE);
}
-static struct cache_entry *dup_entry(const struct cache_entry *ce)
-{
- unsigned int size = ce_size(ce);
- struct cache_entry *new_entry = xmalloc(size);
-
- memcpy(new_entry, ce, size);
- return new_entry;
-}
-
static void add_entry(struct unpack_trees_options *o,
const struct cache_entry *ce,
unsigned int set, unsigned int clear)
{
- do_add_entry(o, dup_entry(ce), set, clear);
+ do_add_entry(o, dup_cache_entry(ce, &o->result), set, clear);
}
/*
@@ -798,10 +789,17 @@ static int ce_in_traverse_path(const struct cache_entry *ce,
return (info->pathlen < ce_namelen(ce));
}
-static struct cache_entry *create_ce_entry(const struct traverse_info *info, const struct name_entry *n, int stage)
+static struct cache_entry *create_ce_entry(const struct traverse_info *info,
+ const struct name_entry *n,
+ int stage,
+ struct index_state *istate,
+ int is_transient)
{
int len = traverse_path_len(info, n);
- struct cache_entry *ce = xcalloc(1, cache_entry_size(len));
+ struct cache_entry *ce =
+ is_transient ?
+ make_empty_transient_cache_entry(len) :
+ make_empty_cache_entry(istate, len);
ce->ce_mode = create_ce_mode(n->mode);
ce->ce_flags = create_ce_flags(stage);
@@ -847,7 +845,15 @@ static int unpack_nondirectories(int n, unsigned long mask,
stage = 3;
else
stage = 2;
- src[i + o->merge] = create_ce_entry(info, names + i, stage);
+
+ /*
+ * If the merge bit is set, then the cache entries are
+ * discarded in the following block. In this case,
+ * construct "transient" cache_entries, as they are
+ * not stored in the index. otherwise construct the
+ * cache entry from the index aware logic.
+ */
+ src[i + o->merge] = create_ce_entry(info, names + i, stage, &o->result, o->merge);
}
if (o->merge) {
@@ -856,7 +862,7 @@ static int unpack_nondirectories(int n, unsigned long mask,
for (i = 0; i < n; i++) {
struct cache_entry *ce = src[i + o->merge];
if (ce != o->df_conflict_entry)
- free(ce);
+ discard_cache_entry(ce);
}
return rc;
}
@@ -1788,7 +1794,7 @@ static int merged_entry(const struct cache_entry *ce,
struct unpack_trees_options *o)
{
int update = CE_UPDATE;
- struct cache_entry *merge = dup_entry(ce);
+ struct cache_entry *merge = dup_cache_entry(ce, &o->result);
if (!old) {
/*
@@ -1808,7 +1814,7 @@ static int merged_entry(const struct cache_entry *ce,
if (verify_absent(merge,
ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {
- free(merge);
+ discard_cache_entry(merge);
return -1;
}
invalidate_ce_path(merge, o);
@@ -1834,7 +1840,7 @@ static int merged_entry(const struct cache_entry *ce,
update = 0;
} else {
if (verify_uptodate(old, o)) {
- free(merge);
+ discard_cache_entry(merge);
return -1;
}
/* Migrate old flags over */
diff --git a/upload-pack.c b/upload-pack.c
index de6106a..82b393e 100644
--- a/upload-pack.c
+++ b/upload-pack.c
@@ -3,6 +3,7 @@
#include "refs.h"
#include "pkt-line.h"
#include "sideband.h"
+#include "repository.h"
#include "object-store.h"
#include "tag.h"
#include "object.h"
@@ -312,7 +313,7 @@ static int got_oid(const char *hex, struct object_id *oid)
if (!has_object_file(oid))
return -1;
- o = parse_object(oid);
+ o = parse_object(the_repository, oid);
if (!o)
die("oops (%s)", oid_to_hex(oid));
if (o->type == OBJ_COMMIT) {
@@ -350,7 +351,7 @@ static int reachable(struct commit *want)
break;
}
if (!commit->object.parsed)
- parse_object(&commit->object.oid);
+ parse_object(the_repository, &commit->object.oid);
if (commit->object.flags & REACHABLE)
continue;
commit->object.flags |= REACHABLE;
@@ -380,7 +381,7 @@ static int ok_to_give_up(void)
if (want->flags & COMMON_KNOWN)
continue;
- want = deref_tag(want, "a want line", 0);
+ want = deref_tag(the_repository, want, "a want line", 0);
if (!want || want->type != OBJ_COMMIT) {
/* no way to tell if this is reachable by
* looking at the ancestry chain alone, so
@@ -570,7 +571,7 @@ static int get_reachable_list(struct object_array *src,
if (parse_oid_hex(namebuf, &sha1, &p) || *p != '\n')
break;
- o = lookup_object(sha1.hash);
+ o = lookup_object(the_repository, sha1.hash);
if (o && o->type == OBJ_COMMIT) {
o->flags &= ~TMP_MARK;
}
@@ -801,7 +802,7 @@ static int process_shallow(const char *line, struct object_array *shallows)
struct object *object;
if (get_oid_hex(arg, &oid))
die("invalid shallow line: %s", line);
- object = parse_object(&oid);
+ object = parse_object(the_repository, &oid);
if (!object)
return 1;
if (object->type != OBJ_COMMIT)
@@ -927,7 +928,7 @@ static void receive_needs(void)
if (allow_filter && parse_feature_request(features, "filter"))
filter_capability_requested = 1;
- o = parse_object(&oid_buf);
+ o = parse_object(the_repository, &oid_buf);
if (!o) {
packet_write_fmt(1,
"ERR upload-pack: not our ref %s",
@@ -1174,7 +1175,7 @@ static int parse_want(const char *line)
die("git upload-pack: protocol error, "
"expected to get oid, not '%s'", line);
- o = parse_object(&oid);
+ o = parse_object(the_repository, &oid);
if (!o) {
packet_write_fmt(1,
"ERR upload-pack: not our ref %s",
@@ -1316,7 +1317,7 @@ static int process_haves(struct oid_array *haves, struct oid_array *common)
oid_array_append(common, oid);
- o = parse_object(oid);
+ o = parse_object(the_repository, oid);
if (!o)
die("oops (%s)", oid_to_hex(oid));
if (o->type == OBJ_COMMIT) {
diff --git a/utf8.c b/utf8.c
index 982217e..eb78587 100644
--- a/utf8.c
+++ b/utf8.c
@@ -470,14 +470,14 @@ int utf8_fprintf(FILE *stream, const char *format, ...)
#else
typedef char * iconv_ibp;
#endif
-char *reencode_string_iconv(const char *in, size_t insz, iconv_t conv, int *outsz_p)
+char *reencode_string_iconv(const char *in, size_t insz, iconv_t conv, size_t *outsz_p)
{
size_t outsz, outalloc;
char *out, *outpos;
iconv_ibp cp;
outsz = insz;
- outalloc = outsz + 1; /* for terminating NUL */
+ outalloc = st_add(outsz, 1); /* for terminating NUL */
out = xmalloc(outalloc);
outpos = out;
cp = (iconv_ibp)in;
@@ -497,7 +497,7 @@ char *reencode_string_iconv(const char *in, size_t insz, iconv_t conv, int *outs
* converting the rest.
*/
sofar = outpos - out;
- outalloc = sofar + insz * 2 + 32;
+ outalloc = st_add3(sofar, st_mult(insz, 2), 32);
out = xrealloc(out, outalloc);
outpos = out + sofar;
outsz = outalloc - sofar - 1;
@@ -534,9 +534,9 @@ static const char *fallback_encoding(const char *name)
return name;
}
-char *reencode_string_len(const char *in, int insz,
+char *reencode_string_len(const char *in, size_t insz,
const char *out_encoding, const char *in_encoding,
- int *outsz)
+ size_t *outsz)
{
iconv_t conv;
char *out;
diff --git a/utf8.h b/utf8.h
index db73a2d..ce1c269 100644
--- a/utf8.h
+++ b/utf8.h
@@ -25,14 +25,14 @@ void strbuf_utf8_replace(struct strbuf *sb, int pos, int width,
#ifndef NO_ICONV
char *reencode_string_iconv(const char *in, size_t insz,
- iconv_t conv, int *outsz);
-char *reencode_string_len(const char *in, int insz,
+ iconv_t conv, size_t *outsz);
+char *reencode_string_len(const char *in, size_t insz,
const char *out_encoding,
const char *in_encoding,
- int *outsz);
+ size_t *outsz);
#else
-static inline char *reencode_string_len(const char *a, int b,
- const char *c, const char *d, int *e)
+static inline char *reencode_string_len(const char *a, size_t b,
+ const char *c, const char *d, size_t *e)
{ if (e) *e = 0; return NULL; }
#endif
diff --git a/walker.c b/walker.c
index 86359ab..96990d8 100644
--- a/walker.c
+++ b/walker.c
@@ -1,5 +1,6 @@
#include "cache.h"
#include "walker.h"
+#include "repository.h"
#include "object-store.h"
#include "commit.h"
#include "tree.h"
@@ -48,12 +49,14 @@ static int process_tree(struct walker *walker, struct tree *tree)
if (S_ISGITLINK(entry.mode))
continue;
if (S_ISDIR(entry.mode)) {
- struct tree *tree = lookup_tree(entry.oid);
+ struct tree *tree = lookup_tree(the_repository,
+ entry.oid);
if (tree)
obj = &tree->object;
}
else {
- struct blob *blob = lookup_blob(entry.oid);
+ struct blob *blob = lookup_blob(the_repository,
+ entry.oid);
if (blob)
obj = &blob->object;
}
@@ -178,7 +181,7 @@ static int loop(struct walker *walker)
}
}
if (!obj->type)
- parse_object(&obj->oid);
+ parse_object(the_repository, &obj->oid);
if (process_object(walker, obj))
return -1;
}
@@ -204,7 +207,8 @@ static int interpret_target(struct walker *walker, char *target, struct object_i
static int mark_complete(const char *path, const struct object_id *oid,
int flag, void *cb_data)
{
- struct commit *commit = lookup_commit_reference_gently(oid, 1);
+ struct commit *commit = lookup_commit_reference_gently(the_repository,
+ oid, 1);
if (commit) {
commit->object.flags |= COMPLETE;
diff --git a/wt-status.c b/wt-status.c
index 8827a25..6bf2fdb 100644
--- a/wt-status.c
+++ b/wt-status.c
@@ -1489,7 +1489,7 @@ static void wt_status_get_detached_from(struct wt_status_state *state)
/* sha1 is a commit? match without further lookup */
(!oidcmp(&cb.noid, &oid) ||
/* perhaps sha1 is a tag, try to dereference to a commit */
- ((commit = lookup_commit_reference_gently(&oid, 1)) != NULL &&
+ ((commit = lookup_commit_reference_gently(the_repository, &oid, 1)) != NULL &&
!oidcmp(&cb.noid, &commit->object.oid)))) {
const char *from = ref;
if (!skip_prefix(from, "refs/tags/", &from))
@@ -2340,7 +2340,17 @@ int has_uncommitted_changes(int ignore_submodules)
if (ignore_submodules)
rev_info.diffopt.flags.ignore_submodules = 1;
rev_info.diffopt.flags.quick = 1;
+
add_head_to_pending(&rev_info);
+ if (!rev_info.pending.nr) {
+ /*
+ * We have no head (or it's corrupt); use the empty tree,
+ * which will complain if the index is non-empty.
+ */
+ struct tree *tree = lookup_tree(the_repository, the_hash_algo->empty_tree);
+ add_pending_object(&rev_info, &tree->object, "");
+ }
+
diff_setup_done(&rev_info.diffopt);
result = run_diff_index(&rev_info, 1);
return diff_result_code(&rev_info.diffopt, result);
diff --git a/xdiff/xdiff.h b/xdiff/xdiff.h
index c1937a2..2356da5 100644
--- a/xdiff/xdiff.h
+++ b/xdiff/xdiff.h
@@ -52,14 +52,6 @@ extern "C" {
#define XDL_EMIT_FUNCNAMES (1 << 0)
#define XDL_EMIT_FUNCCONTEXT (1 << 2)
-#define XDL_MMB_READONLY (1 << 0)
-
-#define XDL_MMF_ATOMIC (1 << 0)
-
-#define XDL_BDOP_INS 1
-#define XDL_BDOP_CPY 2
-#define XDL_BDOP_INSB 3
-
/* merge simplification levels */
#define XDL_MERGE_MINIMAL 0
#define XDL_MERGE_EAGER 1
diff --git a/xdiff/xdiffi.c b/xdiff/xdiffi.c
index 0de1ef4..1f1f4a3 100644
--- a/xdiff/xdiffi.c
+++ b/xdiff/xdiffi.c
@@ -22,34 +22,17 @@
#include "xinclude.h"
-
-
#define XDL_MAX_COST_MIN 256
#define XDL_HEUR_MIN_COST 256
#define XDL_LINE_MAX (long)((1UL << (CHAR_BIT * sizeof(long) - 1)) - 1)
#define XDL_SNAKE_CNT 20
#define XDL_K_HEUR 4
-
-
typedef struct s_xdpsplit {
long i1, i2;
int min_lo, min_hi;
} xdpsplit_t;
-
-
-
-static long xdl_split(unsigned long const *ha1, long off1, long lim1,
- unsigned long const *ha2, long off2, long lim2,
- long *kvdf, long *kvdb, int need_min, xdpsplit_t *spl,
- xdalgoenv_t *xenv);
-static xdchange_t *xdl_add_change(xdchange_t *xscr, long i1, long i2, long chg1, long chg2);
-
-
-
-
-
/*
* See "An O(ND) Difference Algorithm and its Variations", by Eugene Myers.
* Basically considers a "box" (off1, off2, lim1, lim2) and scan from both
@@ -592,6 +575,11 @@ static void measure_split(const xdfile_t *xdf, long split,
#define INDENT_WEIGHT 60
/*
+ * How far do we slide a hunk at most?
+ */
+#define INDENT_HEURISTIC_MAX_SLIDING 100
+
+/*
* Compute a badness score for the hypothetical split whose measurements are
* stored in m. The weight factors were determined empirically using the tools and
* corpus described in
@@ -903,7 +891,12 @@ int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, long flags) {
long shift, best_shift = -1;
struct split_score best_score;
- for (shift = earliest_end; shift <= g.end; shift++) {
+ shift = earliest_end;
+ if (g.end - groupsize - 1 > shift)
+ shift = g.end - groupsize - 1;
+ if (g.end - INDENT_HEURISTIC_MAX_SLIDING > shift)
+ shift = g.end - INDENT_HEURISTIC_MAX_SLIDING;
+ for (; shift <= g.end; shift++) {
struct split_measurement m;
struct split_score score = {0, 0};
diff --git a/xdiff/xhistogram.c b/xdiff/xhistogram.c
index 73210cb..ec85f59 100644
--- a/xdiff/xhistogram.c
+++ b/xdiff/xhistogram.c
@@ -233,54 +233,31 @@ static int try_lcs(struct histindex *index, struct region *lcs, int b_ptr,
return b_next;
}
-static int find_lcs(struct histindex *index, struct region *lcs,
- int line1, int count1, int line2, int count2) {
- int b_ptr;
-
- if (scanA(index, line1, count1))
- return -1;
-
- index->cnt = index->max_chain_length + 1;
-
- for (b_ptr = line2; b_ptr <= LINE_END(2); )
- b_ptr = try_lcs(index, lcs, b_ptr, line1, count1, line2, count2);
-
- return index->has_common && index->max_chain_length < index->cnt;
-}
-
-static int fall_back_to_classic_diff(struct histindex *index,
+static int fall_back_to_classic_diff(xpparam_t const *xpp, xdfenv_t *env,
int line1, int count1, int line2, int count2)
{
- xpparam_t xpp;
- xpp.flags = index->xpp->flags & ~XDF_DIFF_ALGORITHM_MASK;
+ xpparam_t xpparam;
+ xpparam.flags = xpp->flags & ~XDF_DIFF_ALGORITHM_MASK;
- return xdl_fall_back_diff(index->env, &xpp,
+ return xdl_fall_back_diff(env, &xpparam,
line1, count1, line2, count2);
}
-static int histogram_diff(xpparam_t const *xpp, xdfenv_t *env,
- int line1, int count1, int line2, int count2)
+static inline void free_index(struct histindex *index)
{
- struct histindex index;
- struct region lcs;
- int sz;
- int result = -1;
-
- if (count1 <= 0 && count2 <= 0)
- return 0;
-
- if (LINE_END(1) >= MAX_PTR)
- return -1;
+ xdl_free(index->records);
+ xdl_free(index->line_map);
+ xdl_free(index->next_ptrs);
+ xdl_cha_free(&index->rcha);
+}
- if (!count1) {
- while(count2--)
- env->xdf2.rchg[line2++ - 1] = 1;
- return 0;
- } else if (!count2) {
- while(count1--)
- env->xdf1.rchg[line1++ - 1] = 1;
- return 0;
- }
+static int find_lcs(xpparam_t const *xpp, xdfenv_t *env,
+ struct region *lcs,
+ int line1, int count1, int line2, int count2)
+{
+ int b_ptr;
+ int sz, ret = -1;
+ struct histindex index;
memset(&index, 0, sizeof(index));
@@ -318,9 +295,55 @@ static int histogram_diff(xpparam_t const *xpp, xdfenv_t *env,
index.ptr_shift = line1;
index.max_chain_length = 64;
+ if (scanA(&index, line1, count1))
+ goto cleanup;
+
+ index.cnt = index.max_chain_length + 1;
+
+ for (b_ptr = line2; b_ptr <= LINE_END(2); )
+ b_ptr = try_lcs(&index, lcs, b_ptr, line1, count1, line2, count2);
+
+ if (index.has_common && index.max_chain_length < index.cnt)
+ ret = 1;
+ else
+ ret = 0;
+
+cleanup:
+ free_index(&index);
+ return ret;
+}
+
+static int histogram_diff(xpparam_t const *xpp, xdfenv_t *env,
+ int line1, int count1, int line2, int count2)
+{
+ struct region lcs;
+ int lcs_found;
+ int result;
+redo:
+ result = -1;
+
+ if (count1 <= 0 && count2 <= 0)
+ return 0;
+
+ if (LINE_END(1) >= MAX_PTR)
+ return -1;
+
+ if (!count1) {
+ while(count2--)
+ env->xdf2.rchg[line2++ - 1] = 1;
+ return 0;
+ } else if (!count2) {
+ while(count1--)
+ env->xdf1.rchg[line1++ - 1] = 1;
+ return 0;
+ }
+
memset(&lcs, 0, sizeof(lcs));
- if (find_lcs(&index, &lcs, line1, count1, line2, count2))
- result = fall_back_to_classic_diff(&index, line1, count1, line2, count2);
+ lcs_found = find_lcs(xpp, env, &lcs, line1, count1, line2, count2);
+ if (lcs_found < 0)
+ goto out;
+ else if (lcs_found)
+ result = fall_back_to_classic_diff(xpp, env, line1, count1, line2, count2);
else {
if (lcs.begin1 == 0 && lcs.begin2 == 0) {
while (count1--)
@@ -333,21 +356,21 @@ static int histogram_diff(xpparam_t const *xpp, xdfenv_t *env,
line1, lcs.begin1 - line1,
line2, lcs.begin2 - line2);
if (result)
- goto cleanup;
- result = histogram_diff(xpp, env,
- lcs.end1 + 1, LINE_END(1) - lcs.end1,
- lcs.end2 + 1, LINE_END(2) - lcs.end2);
- if (result)
- goto cleanup;
+ goto out;
+ /*
+ * result = histogram_diff(xpp, env,
+ * lcs.end1 + 1, LINE_END(1) - lcs.end1,
+ * lcs.end2 + 1, LINE_END(2) - lcs.end2);
+ * but let's optimize tail recursion ourself:
+ */
+ count1 = LINE_END(1) - lcs.end1;
+ line1 = lcs.end1 + 1;
+ count2 = LINE_END(2) - lcs.end2;
+ line2 = lcs.end2 + 1;
+ goto redo;
}
}
-
-cleanup:
- xdl_free(index.records);
- xdl_free(index.line_map);
- xdl_free(index.next_ptrs);
- xdl_cha_free(&index.rcha);
-
+out:
return result;
}