summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--.mailmap1
-rw-r--r--Documentation/Makefile7
-rw-r--r--Documentation/MyFirstContribution.txt10
-rw-r--r--Documentation/RelNotes/2.24.0.txt127
-rw-r--r--Documentation/SubmittingPatches4
-rw-r--r--Documentation/asciidoc.conf6
-rw-r--r--Documentation/asciidoctor-extensions.rb24
-rw-r--r--Documentation/config.txt84
-rw-r--r--Documentation/config/fetch.txt10
-rw-r--r--Documentation/config/trace2.txt6
-rw-r--r--Documentation/diff-generate-patch.txt32
-rwxr-xr-xDocumentation/doc-diff17
-rw-r--r--Documentation/fetch-options.txt13
-rw-r--r--Documentation/git-clean.txt16
-rw-r--r--Documentation/git-commit-graph.txt7
-rw-r--r--Documentation/git-commit.txt8
-rw-r--r--Documentation/git-config.txt56
-rw-r--r--Documentation/git-fast-export.txt17
-rw-r--r--Documentation/git-fast-import.txt23
-rw-r--r--Documentation/git-grep.txt17
-rw-r--r--Documentation/git-gui.txt10
-rw-r--r--Documentation/git-ls-remote.txt32
-rw-r--r--Documentation/git-merge-base.txt98
-rw-r--r--Documentation/git-merge-index.txt26
-rw-r--r--Documentation/git-receive-pack.txt52
-rw-r--r--Documentation/git-rev-list.txt54
-rw-r--r--Documentation/git-send-email.txt12
-rw-r--r--Documentation/git-status.txt18
-rw-r--r--Documentation/git-submodule.txt3
-rw-r--r--Documentation/gitmodules.txt17
-rw-r--r--Documentation/gitweb.conf.txt6
-rw-r--r--Documentation/manpage.xsl3
-rw-r--r--Documentation/pretty-formats.txt2
-rw-r--r--Documentation/technical/api-trace2.txt31
-rw-r--r--Documentation/trace2-target-values.txt4
-rw-r--r--Documentation/user-manual.txt377
-rw-r--r--Makefile58
-rw-r--r--apply.c43
-rw-r--r--apply.h1
-rw-r--r--attr.c24
-rw-r--r--azure-pipelines.yml168
-rw-r--r--bisect.c2
-rw-r--r--blame.c27
-rw-r--r--builtin/am.c19
-rw-r--r--builtin/blame.c8
-rw-r--r--builtin/checkout.c14
-rw-r--r--builtin/clean.c15
-rw-r--r--builtin/clone.c3
-rw-r--r--builtin/commit-graph.c27
-rw-r--r--builtin/commit.c4
-rw-r--r--builtin/describe.c22
-rw-r--r--builtin/difftool.c56
-rw-r--r--builtin/fast-export.c82
-rw-r--r--builtin/fetch.c174
-rw-r--r--builtin/index-pack.c4
-rw-r--r--builtin/merge-recursive.c4
-rw-r--r--builtin/merge.c13
-rw-r--r--builtin/name-rev.c15
-rw-r--r--builtin/patch-id.c11
-rw-r--r--builtin/push.c4
-rw-r--r--builtin/receive-pack.c50
-rw-r--r--builtin/repack.c2
-rw-r--r--builtin/replace.c7
-rw-r--r--builtin/rev-list.c7
-rw-r--r--builtin/rev-parse.c5
-rw-r--r--builtin/show-index.c13
-rw-r--r--builtin/stash.c17
-rw-r--r--builtin/submodule--helper.c4
-rw-r--r--builtin/worktree.c3
-rw-r--r--bundle.c4
-rw-r--r--cache-tree.c87
-rw-r--r--cache-tree.h3
-rw-r--r--cache.h44
-rwxr-xr-xci/install-dependencies.sh5
-rwxr-xr-xci/lib.sh5
-rwxr-xr-xci/run-static-analysis.sh3
-rwxr-xr-xci/test-documentation.sh2
-rw-r--r--combine-diff.c2
-rw-r--r--commit-graph.c59
-rw-r--r--commit-graph.h6
-rw-r--r--commit.c3
-rw-r--r--compat/mingw.c13
-rwxr-xr-xcompat/vcbuild/scripts/clink.pl48
-rw-r--r--compat/win32/path-utils.h5
-rw-r--r--compat/winansi.c2
-rw-r--r--config.c26
-rw-r--r--config.mak.uname19
-rw-r--r--connected.c7
-rw-r--r--contrib/buildsystems/Generators/Vcxproj.pm18
-rw-r--r--contrib/coccinelle/hashmap.cocci16
-rw-r--r--contrib/completion/git-completion.bash45
-rwxr-xr-xcontrib/hg-to-git/hg-to-git.py50
-rwxr-xr-xcontrib/svn-fe/svnrdump_sim.py2
-rw-r--r--convert.c8
-rw-r--r--date.c27
-rw-r--r--diff.c77
-rw-r--r--diff.h2
-rw-r--r--diffcore-break.c12
-rw-r--r--diffcore-rename.c17
-rw-r--r--dir.c65
-rw-r--r--dir.h8
-rw-r--r--fast-import.c92
-rw-r--r--fetch-pack.c12
-rwxr-xr-xgit-add--interactive.perl2
-rw-r--r--git-compat-util.h48
-rwxr-xr-xgit-p4.py13
-rw-r--r--git.c5
-rw-r--r--grep.c196
-rw-r--r--grep.h25
-rw-r--r--hashmap.c58
-rw-r--r--hashmap.h176
-rw-r--r--list-objects-filter-options.c14
-rw-r--r--list-objects-filter-options.h2
-rw-r--r--list-objects-filter.c14
-rw-r--r--list-objects.c4
-rw-r--r--log-tree.c1
-rw-r--r--merge-recursive.c656
-rw-r--r--merge-recursive.h164
-rw-r--r--midx.c11
-rw-r--r--name-hash.c57
-rw-r--r--object.c1
-rw-r--r--oidmap.c20
-rw-r--r--oidmap.h6
-rw-r--r--pack-bitmap.h6
-rw-r--r--pack-write.c8
-rw-r--r--packfile.c27
-rw-r--r--patch-ids.c18
-rw-r--r--perl/Git/SVN.pm4
-rw-r--r--pretty.c2
-rw-r--r--progress.c61
-rw-r--r--promisor-remote.c3
-rw-r--r--promisor-remote.h16
-rw-r--r--quote.c6
-rw-r--r--range-diff.c13
-rw-r--r--read-cache.c25
-rw-r--r--ref-filter.c33
-rw-r--r--refs.c25
-rw-r--r--remote.c21
-rw-r--r--remote.h2
-rw-r--r--repository.h3
-rw-r--r--rerere.c8
-rw-r--r--revision.c32
-rw-r--r--send-pack.c3
-rw-r--r--sequencer.c185
-rw-r--r--sha1-file.c1
-rw-r--r--sha1-lookup.c12
-rw-r--r--sha1-name.c26
-rw-r--r--shallow.c5
-rw-r--r--stable-qsort.c (renamed from compat/qsort.c)6
-rw-r--r--sub-process.c20
-rw-r--r--sub-process.h6
-rw-r--r--submodule-config.c52
-rw-r--r--t/helper/.gitignore9
-rw-r--r--t/helper/test-date.c27
-rw-r--r--t/helper/test-hashmap.c50
-rw-r--r--t/helper/test-lazy-init-name-hash.c12
-rw-r--r--t/helper/test-progress.c81
-rw-r--r--t/helper/test-run-command.c153
-rw-r--r--t/helper/test-tool.c1
-rw-r--r--t/helper/test-tool.h1
-rwxr-xr-xt/t0000-basic.sh38
-rwxr-xr-xt/t0014-alias.sh7
-rwxr-xr-xt/t0028-working-tree-encoding.sh41
-rwxr-xr-xt/t0050-filesystem.sh20
-rwxr-xr-xt/t0061-run-command.sh21
-rwxr-xr-xt/t0212-trace2-event.sh19
-rwxr-xr-xt/t0410-partial-clone.sh27
-rwxr-xr-xt/t0500-progress-display.sh286
-rwxr-xr-xt/t1450-fsck.sh16
-rwxr-xr-xt/t1506-rev-parse-diagnosis.sh8
-rwxr-xr-xt/t3030-merge-recursive.sh37
-rwxr-xr-xt/t3206-range-diff.sh44
-rw-r--r--t/t3206/history.export31
-rwxr-xr-xt/t3404-rebase-interactive.sh16
-rwxr-xr-xt/t3429-rebase-edit-todo.sh21
-rwxr-xr-xt/t3701-add-interactive.sh2
-rwxr-xr-xt/t3903-stash.sh16
-rwxr-xr-xt/t3908-stash-in-worktree.sh27
-rwxr-xr-xt/t4014-format-patch.sh5
-rwxr-xr-xt/t4038-diff-combined.sh2
-rwxr-xr-xt/t4202-log.sh15
-rwxr-xr-xt/t4210-log-i18n.sh41
-rwxr-xr-xt/t4214-log-graph-octopus.sh329
-rwxr-xr-xt/t5318-commit-graph.sh79
-rwxr-xr-xt/t5324-split-commit-graph.sh2
-rwxr-xr-xt/t5500-fetch-pack.sh38
-rwxr-xr-xt/t5514-fetch-multiple.sh11
-rwxr-xr-xt/t5541-http-push-smart.sh6
-rwxr-xr-xt/t5616-partial-clone.sh36
-rwxr-xr-xt/t5801-remote-helpers.sh1
-rwxr-xr-xt/t6036-recursive-corner-cases.sh8
-rwxr-xr-xt/t6047-diff3-conflict-markers.sh202
-rwxr-xr-xt/t6120-describe.sh15
-rwxr-xr-xt/t7008-filter-branch-null-sha1.sh (renamed from t/t7009-filter-branch-null-sha1.sh)0
-rwxr-xr-xt/t7300-clean.sh44
-rwxr-xr-xt/t7400-submodule-basic.sh2
-rwxr-xr-xt/t7505-prepare-commit-msg-hook.sh8
-rw-r--r--t/t7505/expected-rebase-i3
-rwxr-xr-xt/t7812-grep-icase-non-ascii.sh28
-rwxr-xr-xt/t7815-grep-binary.sh (renamed from t/t7008-grep-binary.sh)101
-rwxr-xr-xt/t7816-grep-binary-pattern.sh127
-rwxr-xr-xt/t9300-fast-import.sh50
-rwxr-xr-xt/t9350-fast-export.sh68
-rw-r--r--t/test-lib-functions.sh6
-rw-r--r--t/test-lib.sh38
-rw-r--r--trace2/tr2_dst.c111
-rw-r--r--trace2/tr2_dst.h1
-rw-r--r--trace2/tr2_sysenv.c3
-rw-r--r--trace2/tr2_sysenv.h2
-rw-r--r--trace2/tr2_tgt_event.c31
-rw-r--r--trace2/tr2_tgt_normal.c2
-rw-r--r--trace2/tr2_tgt_perf.c2
-rw-r--r--unpack-trees.c1
-rw-r--r--upload-pack.c2
-rw-r--r--wrapper.c21
-rw-r--r--wt-status.c2
-rw-r--r--wt-status.h2
-rw-r--r--xdiff/xdiffi.c99
219 files changed, 5278 insertions, 2310 deletions
diff --git a/.gitignore b/.gitignore
index fc445ed..89b3b79 100644
--- a/.gitignore
+++ b/.gitignore
@@ -216,6 +216,7 @@
/tags
/TAGS
/cscope*
+*.hcc
*.obj
*.lib
*.res
diff --git a/.mailmap b/.mailmap
index 9a5ff04..14fa041 100644
--- a/.mailmap
+++ b/.mailmap
@@ -18,6 +18,7 @@ Alexey Shumkin <alex.crezoff@gmail.com> <zapped@mail.ru>
Alexey Shumkin <alex.crezoff@gmail.com> <Alex.Crezoff@gmail.com>
Anders Kaseorg <andersk@MIT.EDU> <andersk@ksplice.com>
Anders Kaseorg <andersk@MIT.EDU> <andersk@mit.edu>
+Andrey Mazo <ahippo@yandex.com> Mazo, Andrey <amazo@checkvideo.com>
Aneesh Kumar K.V <aneesh.kumar@gmail.com>
Amos Waterland <apw@debian.org> <apw@rossby.metr.ou.edu>
Amos Waterland <apw@debian.org> <apw@us.ibm.com>
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 76f2ecf..06d85ad 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -123,7 +123,8 @@ ASCIIDOC_HTML = xhtml11
ASCIIDOC_DOCBOOK = docbook
ASCIIDOC_CONF = -f asciidoc.conf
ASCIIDOC_COMMON = $(ASCIIDOC) $(ASCIIDOC_EXTRA) $(ASCIIDOC_CONF) \
- -agit_version=$(GIT_VERSION)
+ -amanversion=$(GIT_VERSION) \
+ -amanmanual='Git Manual' -amansource='Git'
TXT_TO_HTML = $(ASCIIDOC_COMMON) -b $(ASCIIDOC_HTML)
TXT_TO_XML = $(ASCIIDOC_COMMON) -b $(ASCIIDOC_DOCBOOK)
MANPAGE_XSL = manpage-normal.xsl
@@ -197,11 +198,13 @@ ifdef USE_ASCIIDOCTOR
ASCIIDOC = asciidoctor
ASCIIDOC_CONF =
ASCIIDOC_HTML = xhtml5
-ASCIIDOC_DOCBOOK = docbook45
+ASCIIDOC_DOCBOOK = docbook5
ASCIIDOC_EXTRA += -acompat-mode -atabsize=8
ASCIIDOC_EXTRA += -I. -rasciidoctor-extensions
ASCIIDOC_EXTRA += -alitdd='&\#x2d;&\#x2d;'
DBLATEX_COMMON =
+XMLTO_EXTRA += --skip-validation
+XMLTO_EXTRA += -x manpage.xsl
endif
SHELL_PATH ?= $(SHELL)
diff --git a/Documentation/MyFirstContribution.txt b/Documentation/MyFirstContribution.txt
index f867037..5e9b808 100644
--- a/Documentation/MyFirstContribution.txt
+++ b/Documentation/MyFirstContribution.txt
@@ -97,8 +97,8 @@ int cmd_psuh(int argc, const char **argv, const char *prefix)
----
We'll also need to add the declaration of psuh; open up `builtin.h`, find the
-declaration for `cmd_push`, and add a new line for `psuh` immediately before it,
-in order to keep the declarations sorted:
+declaration for `cmd_pull`, and add a new line for `psuh` immediately before it,
+in order to keep the declarations alphabetically sorted:
----
int cmd_psuh(int argc, const char **argv, const char *prefix);
@@ -123,7 +123,7 @@ int cmd_psuh(int argc, const char **argv, const char *prefix)
}
----
-Let's try to build it. Open `Makefile`, find where `builtin/push.o` is added
+Let's try to build it. Open `Makefile`, find where `builtin/pull.o` is added
to `BUILTIN_OBJS`, and add `builtin/psuh.o` in the same way next to it in
alphabetical order. Once you've done so, move to the top-level directory and
build simply with `make`. Also add the `DEVELOPER=1` variable to turn on
@@ -149,7 +149,7 @@ a `cmd_struct` to the `commands[]` array. `struct cmd_struct` takes a string
with the command name, a function pointer to the command implementation, and a
setup option flag. For now, let's keep mimicking `push`. Find the line where
`cmd_push` is registered, copy it, and modify it for `cmd_psuh`, placing the new
-line in alphabetical order.
+line in alphabetical order (immediately before `cmd_pull`).
The options are documented in `builtin.h` under "Adding a new built-in." Since
we hope to print some data about the user's current workspace context later,
@@ -167,7 +167,7 @@ Check it out! You've got a command! Nice work! Let's commit this.
`git status` reveals modified `Makefile`, `builtin.h`, and `git.c` as well as
untracked `builtin/psuh.c` and `git-psuh`. First, let's take care of the binary,
-which should be ignored. Open `.gitignore` in your editor, find `/git-push`, and
+which should be ignored. Open `.gitignore` in your editor, find `/git-pull`, and
add an entry for your new command in alphabetical order:
----
diff --git a/Documentation/RelNotes/2.24.0.txt b/Documentation/RelNotes/2.24.0.txt
index 7f44e7a..bda29d2 100644
--- a/Documentation/RelNotes/2.24.0.txt
+++ b/Documentation/RelNotes/2.24.0.txt
@@ -68,6 +68,9 @@ UI, Workflows & Features
* A configuration variable tells "git fetch" to write the commit
graph after finishing.
+ * "git add -i" has been taught to show the total number of hunks and
+ the hunks that has been processed so far when showing prompts.
+
Performance, Internal Implementation, Development Support etc.
@@ -107,6 +110,23 @@ Performance, Internal Implementation, Development Support etc.
things like sparse checkout specification that want to check if a
path is "included".
+ * "git stash" learned to write refreshed index back to disk.
+
+ * Coccinelle checks are done on more source files than before now.
+
+ * The cache-tree code has been taught to be less aggressive in
+ attempting to see if a tree object it computed already exists in
+ the repository.
+
+ * The code to parse and use the commit-graph file has been made more
+ robust against corrupted input.
+
+ * The hg-to-git script (in contrib/) has been updated to work with
+ Python 3.
+
+ * Update the way build artifacts in t/helper/ directory are ignored.
+
+ * Preparation for SHA-256 upgrade continues.
Fixes since v2.23
@@ -192,6 +212,91 @@ Fixes since v2.23
current branch, which has been corrected.
(merge bf1e28e0ad bw/rebase-autostash-keep-current-branch later to maint).
+ * Update support for Asciidoctor documentation toolchain.
+ (merge 83b0b8953e ma/asciidoctor-refmiscinfo later to maint).
+
+ * Start using DocBook 5 (instead of DocBook 4.5) as Asciidoctor 2.0
+ no longer works with the older one.
+ (merge f6461b82b9 bc/doc-use-docbook-5 later to maint).
+
+ * The markup used in user-manual has been updated to work better with
+ asciidoctor.
+ (merge c4d2f6143a ma/user-manual-markup-update later to maint).
+
+ * Make sure the grep machinery does not abort when seeing a payload
+ that is not UTF-8 even when JIT is not in use with PCRE1.
+ (merge ad7c543e3b cb/skip-utf8-check-with-pcre1 later to maint).
+
+ * The name of the blob object that stores the filter specification
+ for sparse cloning/fetching was interpreted in a wrong place in the
+ code, causing Git to abort.
+
+ * "git log --decorate-refs-exclude=<pattern>" was incorrectly
+ overruled when the "--simplify-by-decoration" option is used, which
+ has been corrected.
+ (merge 0cc7380d88 rs/simplify-by-deco-with-deco-refs-exclude later to maint).
+
+ * The "upload-pack" (the counterpart of "git fetch") needs to disable
+ commit-graph when responding to a shallow clone/fetch request, but
+ the way this was done made Git panic, which has been corrected.
+
+ * The object traversal machinery has been optimized not to load tree
+ objects when we are only interested in commit history.
+ (merge 72ed80c784 jk/list-objects-optim-wo-trees later to maint).
+
+ * The object name parser for "Nth parent" syntax has been made more
+ robust against integer overflows.
+ (merge 59fa5f5a25 rs/nth-parent-parse later to maint).
+
+ * The code used in following tags in "git fetch" has been optimized.
+ (merge b7e2d8bca5 ms/fetch-follow-tag-optim later to maint).
+
+ * Regression fix for progress output.
+ (merge 2bb74b53a4 sg/progress-fix later to maint).
+
+ * A bug in merge-recursive code that triggers when a branch with a
+ symbolic link is merged with a branch that replaces it with a
+ directory has been fixed.
+ (merge 83e3ad3b12 jt/merge-recursive-symlink-is-not-a-dir-in-way later to maint).
+
+ * The rename detection logic sorts a list of rename source candidates
+ by similarity to pick the best candidate, which means that a tie
+ between sources with the same similarity is broken by the original
+ location in the original candidate list (which is sorted by path).
+ Force the sorting by similarity done with a stable sort, which is
+ not promised by system supplied qsort(3), to ensure consistent
+ results across platforms.
+ (merge 2049b8dc65 js/diff-rename-force-stable-sort later to maint).
+
+ * The code to skip "UTF" and "UTF-" prefix, when computing an advice
+ message, did not work correctly when the prefix was "UTF", which
+ has been fixed.
+ (merge b181676ce9 rs/convert-fix-utf-without-dash later to maint).
+
+ * The author names taken from SVN repositories may have extra leading
+ or trailing whitespaces, which are now munged away.
+ (merge 4ddd4bddb1 tk/git-svn-trim-author-name later to maint).
+
+ * "git rebase -i" showed a wrong HEAD while "reword" open the editor.
+ (merge b0a3186140 pw/rebase-i-show-HEAD-to-reword later to maint).
+
+ * A few simplification and bugfixes to PCRE interface.
+ (merge c581e4a749 ab/pcre-jit-fixes later to maint).
+
+ * PCRE fixes.
+ (merge ff61681b46 cb/pcre1-cleanup later to maint).
+
+ * "git range-diff" segfaulted when diff.noprefix configuration was
+ used, as it blindly expected the patch it internally generates to
+ have the standard a/ and b/ prefixes. The command now forces the
+ internal patch to be built without any prefix, not to be affected
+ by any end-user configuration.
+ (merge 937b76ed49 js/range-diff-noprefix later to maint).
+
+ * "git stash apply" in a subdirectory of a secondary worktree failed
+ to access the worktree correctly, which has been corrected.
+ (merge dfd557c978 js/stash-apply-in-secondary-worktree later to maint).
+
* Other code cleanup, docfix, build fix, etc.
(merge d1387d3895 en/fast-import-merge-doc later to maint).
(merge 1c24a54ea4 bm/repository-layout-typofix later to maint).
@@ -208,3 +313,25 @@ Fixes since v2.23
(merge 0a8bc7068f dt/remote-helper-doc-re-lock-option later to maint).
(merge 27fd1e4ea7 en/merge-options-ff-and-friends later to maint).
(merge 502c386ff9 sg/clean-nested-repo-with-ignored later to maint).
+ (merge 26e3d1cbea am/mailmap-andrey-mazo later to maint).
+ (merge 47b27c96fa ss/get-time-cleanup later to maint).
+ (merge dd2e50a84e jk/commit-graph-cleanup later to maint).
+ (merge 4fd39c76e6 cs/pretty-formats-doc-typofix later to maint).
+ (merge 40e747e89d dl/submodule-set-branch later to maint).
+ (merge 689a146c91 rs/commit-graph-use-list-count later to maint).
+ (merge 0eb7c37a8a js/doc-patch-text later to maint).
+ (merge 4b3aa170d1 rs/nth-switch-code-simplification later to maint).
+ (merge 0d4304c124 ah/doc-submodule-ignore-submodules later to maint).
+ (merge af78249463 cc/svn-fe-py-shebang later to maint).
+ (merge 7bd97d6dff rs/alias-use-copy-array later to maint).
+ (merge c46ebc2496 sg/travis-help-debug later to maint).
+ (merge 24c681794f ps/my-first-contribution-alphasort later to maint).
+ (merge 75b2c15435 cb/do-not-use-test-cmp-with-a later to maint).
+ (merge cda0d497e3 bw/submodule-helper-usage-fix later to maint).
+ (merge fe0ed5d5e9 am/visual-studio-config-fix later to maint).
+ (merge 2e09c01232 sg/name-rev-cutoff-underflow-fix later to maint).
+ (merge ddb3c856f3 as/shallow-slab-use-fix later to maint).
+ (merge 71f4960b91 js/mingw-spawn-with-spaces-in-path later to maint).
+ (merge 53d687bf5f ah/cleanups later to maint).
+ (merge f537485fa5 rs/test-remove-useless-debugging-cat later to maint).
+ (merge 11a3d3aadd dl/rev-list-doc-cleanup later to maint).
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 6d589e1..1a60cc1 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -372,9 +372,9 @@ such as "Thanks-to:", "Based-on-patch-by:", or "Mentored-by:".
Some parts of the system have dedicated maintainers with their own
repositories.
-- `git-gui/` comes from git-gui project, maintained by Pat Thoyts:
+- `git-gui/` comes from git-gui project, maintained by Pratyush Yadav:
- git://repo.or.cz/git-gui.git
+ https://github.com/prati0100/git-gui.git
- `gitk-git/` comes from Paul Mackerras's gitk project:
diff --git a/Documentation/asciidoc.conf b/Documentation/asciidoc.conf
index 2c16c53..8fc4b67 100644
--- a/Documentation/asciidoc.conf
+++ b/Documentation/asciidoc.conf
@@ -78,9 +78,9 @@ template::[header-declarations]
<refmeta>
<refentrytitle>{mantitle}</refentrytitle>
<manvolnum>{manvolnum}</manvolnum>
-<refmiscinfo class="source">Git</refmiscinfo>
-<refmiscinfo class="version">{git_version}</refmiscinfo>
-<refmiscinfo class="manual">Git Manual</refmiscinfo>
+<refmiscinfo class="source">{mansource}</refmiscinfo>
+<refmiscinfo class="version">{manversion}</refmiscinfo>
+<refmiscinfo class="manual">{manmanual}</refmiscinfo>
</refmeta>
<refnamediv>
<refname>{manname}</refname>
diff --git a/Documentation/asciidoctor-extensions.rb b/Documentation/asciidoctor-extensions.rb
index 0089e0c..d906a00 100644
--- a/Documentation/asciidoctor-extensions.rb
+++ b/Documentation/asciidoctor-extensions.rb
@@ -9,8 +9,11 @@ module Git
named :chrome
def process(parent, target, attrs)
- if parent.document.basebackend? 'html'
- prefix = parent.document.attr('git-relative-html-prefix')
+ prefix = parent.document.attr('git-relative-html-prefix')
+ if parent.document.doctype == 'book'
+ "<ulink url=\"#{prefix}#{target}.html\">" \
+ "#{target}(#{attrs[1]})</ulink>"
+ elsif parent.document.basebackend? 'html'
%(<a href="#{prefix}#{target}.html">#{target}(#{attrs[1]})</a>)
elsif parent.document.basebackend? 'docbook'
"<citerefentry>\n" \
@@ -20,9 +23,26 @@ module Git
end
end
end
+
+ class DocumentPostProcessor < Asciidoctor::Extensions::Postprocessor
+ def process document, output
+ if document.basebackend? 'docbook'
+ mansource = document.attributes['mansource']
+ manversion = document.attributes['manversion']
+ manmanual = document.attributes['manmanual']
+ new_tags = "" \
+ "<refmiscinfo class=\"source\">#{mansource}</refmiscinfo>\n" \
+ "<refmiscinfo class=\"version\">#{manversion}</refmiscinfo>\n" \
+ "<refmiscinfo class=\"manual\">#{manmanual}</refmiscinfo>\n"
+ output = output.sub(/<\/refmeta>/, new_tags + "</refmeta>")
+ end
+ output
+ end
+ end
end
end
Asciidoctor::Extensions.register do
inline_macro Git::Documentation::LinkGitProcessor, :linkgit
+ postprocessor Git::Documentation::DocumentPostProcessor
end
diff --git a/Documentation/config.txt b/Documentation/config.txt
index 77f3b14..f50f1b4 100644
--- a/Documentation/config.txt
+++ b/Documentation/config.txt
@@ -178,47 +178,49 @@ to either specify only the realpath version, or both versions.
Example
~~~~~~~
- # Core variables
- [core]
- ; Don't trust file modes
- filemode = false
-
- # Our diff algorithm
- [diff]
- external = /usr/local/bin/diff-wrapper
- renames = true
-
- [branch "devel"]
- remote = origin
- merge = refs/heads/devel
-
- # Proxy settings
- [core]
- gitProxy="ssh" for "kernel.org"
- gitProxy=default-proxy ; for the rest
-
- [include]
- path = /path/to/foo.inc ; include by absolute path
- path = foo.inc ; find "foo.inc" relative to the current file
- path = ~/foo.inc ; find "foo.inc" in your `$HOME` directory
-
- ; include if $GIT_DIR is /path/to/foo/.git
- [includeIf "gitdir:/path/to/foo/.git"]
- path = /path/to/foo.inc
-
- ; include for all repositories inside /path/to/group
- [includeIf "gitdir:/path/to/group/"]
- path = /path/to/foo.inc
-
- ; include for all repositories inside $HOME/to/group
- [includeIf "gitdir:~/to/group/"]
- path = /path/to/foo.inc
-
- ; relative paths are always relative to the including
- ; file (if the condition is true); their location is not
- ; affected by the condition
- [includeIf "gitdir:/path/to/group/"]
- path = foo.inc
+----
+# Core variables
+[core]
+ ; Don't trust file modes
+ filemode = false
+
+# Our diff algorithm
+[diff]
+ external = /usr/local/bin/diff-wrapper
+ renames = true
+
+[branch "devel"]
+ remote = origin
+ merge = refs/heads/devel
+
+# Proxy settings
+[core]
+ gitProxy="ssh" for "kernel.org"
+ gitProxy=default-proxy ; for the rest
+
+[include]
+ path = /path/to/foo.inc ; include by absolute path
+ path = foo.inc ; find "foo.inc" relative to the current file
+ path = ~/foo.inc ; find "foo.inc" in your `$HOME` directory
+
+; include if $GIT_DIR is /path/to/foo/.git
+[includeIf "gitdir:/path/to/foo/.git"]
+ path = /path/to/foo.inc
+
+; include for all repositories inside /path/to/group
+[includeIf "gitdir:/path/to/group/"]
+ path = /path/to/foo.inc
+
+; include for all repositories inside $HOME/to/group
+[includeIf "gitdir:~/to/group/"]
+ path = /path/to/foo.inc
+
+; relative paths are always relative to the including
+; file (if the condition is true); their location is not
+; affected by the condition
+[includeIf "gitdir:/path/to/group/"]
+ path = foo.inc
+----
; include only if we are in a worktree where foo-branch is
; currently checked out
diff --git a/Documentation/config/fetch.txt b/Documentation/config/fetch.txt
index e8cb205..f119402 100644
--- a/Documentation/config/fetch.txt
+++ b/Documentation/config/fetch.txt
@@ -70,6 +70,16 @@ fetch.showForcedUpdates::
linkgit:git-fetch[1] and linkgit:git-pull[1] commands.
Defaults to true.
+fetch.parallel::
+ Specifies the maximal number of fetch operations to be run in parallel
+ at a time (submodules, or remotes when the `--multiple` option of
+ linkgit:git-fetch[1] is in effect).
++
+A value of 0 will give some reasonable default. If unset, it defaults to 1.
++
+For submodules, this setting can be overridden using the `submodule.fetchJobs`
+config setting.
+
fetch.writeCommitGraph::
Set to true to write a commit-graph after every `git fetch` command
that downloads a pack-file from a remote. Using the `--split` option,
diff --git a/Documentation/config/trace2.txt b/Documentation/config/trace2.txt
index 2edbfb0..4ce0b9a 100644
--- a/Documentation/config/trace2.txt
+++ b/Documentation/config/trace2.txt
@@ -54,3 +54,9 @@ trace2.destinationDebug::
By default, these errors are suppressed and tracing is
silently disabled. May be overridden by the
`GIT_TRACE2_DST_DEBUG` environment variable.
+
+trace2.maxFiles::
+ Integer. When writing trace files to a target directory, do not
+ write additional traces if we would exceed this many files. Instead,
+ write a sentinel file that will block further tracing to this
+ directory. Defaults to 0, which disables this check.
diff --git a/Documentation/diff-generate-patch.txt b/Documentation/diff-generate-patch.txt
index f10ca41..e8ed647 100644
--- a/Documentation/diff-generate-patch.txt
+++ b/Documentation/diff-generate-patch.txt
@@ -1,11 +1,15 @@
-Generating patches with -p
---------------------------
-
-When "git-diff-index", "git-diff-tree", or "git-diff-files" are run
-with a `-p` option, "git diff" without the `--raw` option, or
-"git log" with the "-p" option, they
-do not produce the output described above; instead they produce a
-patch file. You can customize the creation of such patches via the
+Generating patch text with -p
+-----------------------------
+
+Running
+linkgit:git-diff[1],
+linkgit:git-log[1],
+linkgit:git-show[1],
+linkgit:git-diff-index[1],
+linkgit:git-diff-tree[1], or
+linkgit:git-diff-files[1]
+with the `-p` option produces patch text.
+You can customize the creation of patch text via the
`GIT_EXTERNAL_DIFF` and the `GIT_DIFF_OPTS` environment variables.
What the -p option produces is slightly different from the traditional
@@ -49,7 +53,7 @@ similarity index value of 100% is thus reserved for two equal
files, while 100% dissimilarity means that no line from the old
file made it into the new one.
+
-The index line includes the SHA-1 checksum before and after the change.
+The index line includes the blob object names before and after the change.
The <mode> is included if the file mode does not change; otherwise,
separate lines indicate the old and the new mode.
@@ -70,7 +74,7 @@ separate lines indicate the old and the new mode.
rename to a
-combined diff format
+Combined diff format
--------------------
Any diff-generating command can take the `-c` or `--cc` option to
@@ -80,7 +84,7 @@ linkgit:git-show[1]. Note also that you can give the `-m` option to any
of these commands to force generation of diffs with individual parents
of a merge.
-A 'combined diff' format looks like this:
+A "combined diff" format looks like this:
------------
diff --combined describe.c
@@ -113,11 +117,11 @@ index fabadb8,cc95eb0..4866510
------------
1. It is preceded with a "git diff" header, that looks like
- this (when `-c` option is used):
+ this (when the `-c` option is used):
diff --combined file
+
-or like this (when `--cc` option is used):
+or like this (when the `--cc` option is used):
diff --cc file
@@ -160,7 +164,7 @@ parents.
4. Chunk header format is modified to prevent people from
accidentally feeding it to `patch -p1`. Combined diff format
was created for review of merge commit changes, and was not
- meant for apply. The change is similar to the change in the
+ meant to be applied. The change is similar to the change in the
extended 'index' header:
@@@ <from-file-range> <from-file-range> <to-file-range> @@@
diff --git a/Documentation/doc-diff b/Documentation/doc-diff
index 3355be4..88a9b20 100755
--- a/Documentation/doc-diff
+++ b/Documentation/doc-diff
@@ -21,7 +21,7 @@ asciidoc use asciidoc with both commits
to-asciidoc use asciidoc with the 'to'-commit
to-asciidoctor use asciidoctor with the 'to'-commit
asciidoctor use asciidoctor with both commits
-cut-header-footer cut away header and footer
+cut-footer cut away footer
"
SUBDIRECTORY_OK=1
. "$(git --exec-path)/git-sh-setup"
@@ -31,7 +31,7 @@ force=
clean=
from_program=
to_program=
-cut_header_footer=
+cut_footer=
while test $# -gt 0
do
case "$1" in
@@ -55,8 +55,8 @@ do
--asciidoc)
from_program=-asciidoc
to_program=-asciidoc ;;
- --cut-header-footer)
- cut_header_footer=-cut-header-footer ;;
+ --cut-footer)
+ cut_footer=-cut-footer ;;
--)
shift; break ;;
*)
@@ -118,8 +118,8 @@ construct_makemanflags () {
from_makemanflags=$(construct_makemanflags "$from_program") &&
to_makemanflags=$(construct_makemanflags "$to_program") &&
-from_dir=$from_oid$from_program$cut_header_footer &&
-to_dir=$to_oid$to_program$cut_header_footer &&
+from_dir=$from_oid$from_program$cut_footer &&
+to_dir=$to_oid$to_program$cut_footer &&
# generate_render_makefile <srcdir> <dstdir>
generate_render_makefile () {
@@ -169,12 +169,11 @@ render_tree () {
make -j$parallel -f - &&
mv "$tmp/rendered/$dname+" "$tmp/rendered/$dname"
- if test "$cut_header_footer" = "-cut-header-footer"
+ if test "$cut_footer" = "-cut-footer"
then
for f in $(find "$tmp/rendered/$dname" -type f)
do
- tail -n +3 "$f" | head -n -2 |
- sed -e '1{/^$/d}' -e '${/^$/d}' >"$f+" &&
+ head -n -2 "$f" | sed -e '${/^$/d}' >"$f+" &&
mv "$f+" "$f" ||
return 1
done
diff --git a/Documentation/fetch-options.txt b/Documentation/fetch-options.txt
index 99df1f3..43b9ff3 100644
--- a/Documentation/fetch-options.txt
+++ b/Documentation/fetch-options.txt
@@ -160,10 +160,15 @@ ifndef::git-pull[]
-j::
--jobs=<n>::
- Number of parallel children to be used for fetching submodules.
- Each will fetch from different submodules, such that fetching many
- submodules will be faster. By default submodules will be fetched
- one at a time.
+ Number of parallel children to be used for all forms of fetching.
++
+If the `--multiple` option was specified, the different remotes will be fetched
+in parallel. If multiple submodules are fetched, they will be fetched in
+parallel. To control them independently, use the config settings
+`fetch.parallel` and `submodule.fetchJobs` (see linkgit:git-config[1]).
++
+Typically, parallel recursive and multi-remote fetches will be faster. By
+default fetches are performed sequentially, not in parallel.
--no-recurse-submodules::
Disable recursive fetching of submodules (this has the same effect as
diff --git a/Documentation/git-clean.txt b/Documentation/git-clean.txt
index 0028ff1..a7f309d 100644
--- a/Documentation/git-clean.txt
+++ b/Documentation/git-clean.txt
@@ -26,18 +26,20 @@ are affected.
OPTIONS
-------
-d::
- Remove untracked directories in addition to untracked files.
- If an untracked directory is managed by a different Git
- repository, it is not removed by default. Use -f option twice
- if you really want to remove such a directory.
+ Normally, when no <path> is specified, git clean will not
+ recurse into untracked directories to avoid removing too much.
+ Specify -d to have it recurse into such directories as well.
+ If any paths are specified, -d is irrelevant; all untracked
+ files matching the specified paths (with exceptions for nested
+ git directories mentioned under `--force`) will be removed.
-f::
--force::
If the Git configuration variable clean.requireForce is not set
to false, 'git clean' will refuse to delete files or directories
- unless given -f, -n or -i. Git will refuse to delete directories
- with .git sub directory or file unless a second -f
- is given.
+ unless given -f or -i. Git will refuse to modify untracked
+ nested git repositories (directories with a .git subdirectory)
+ unless a second -f is given.
-i::
--interactive::
diff --git a/Documentation/git-commit-graph.txt b/Documentation/git-commit-graph.txt
index eb5e786..8c708a7 100644
--- a/Documentation/git-commit-graph.txt
+++ b/Documentation/git-commit-graph.txt
@@ -10,8 +10,8 @@ SYNOPSIS
--------
[verse]
'git commit-graph read' [--object-dir <dir>]
-'git commit-graph verify' [--object-dir <dir>] [--shallow]
-'git commit-graph write' <options> [--object-dir <dir>]
+'git commit-graph verify' [--object-dir <dir>] [--shallow] [--[no-]progress]
+'git commit-graph write' <options> [--object-dir <dir>] [--[no-]progress]
DESCRIPTION
@@ -29,6 +29,9 @@ OPTIONS
commit-graph file is expected to be in the `<dir>/info` directory and
the packfiles are expected to be in `<dir>/pack`.
+--[no-]progress::
+ Turn progress on/off explicitly. If neither is specified, progress is
+ shown if standard error is connected to a terminal.
COMMANDS
--------
diff --git a/Documentation/git-commit.txt b/Documentation/git-commit.txt
index 7628193..afa7b75 100644
--- a/Documentation/git-commit.txt
+++ b/Documentation/git-commit.txt
@@ -282,18 +282,20 @@ FROM UPSTREAM REBASE" section in linkgit:git-rebase[1].)
--untracked-files[=<mode>]::
Show untracked files.
+
+--
The mode parameter is optional (defaults to 'all'), and is used to
specify the handling of untracked files; when -u is not used, the
default is 'normal', i.e. show untracked files and directories.
-+
+
The possible options are:
-+
+
- 'no' - Show no untracked files
- 'normal' - Shows untracked files and directories
- 'all' - Also shows individual files in untracked directories.
-+
+
The default can be changed using the status.showUntrackedFiles
configuration variable documented in linkgit:git-config[1].
+--
-v::
--verbose::
diff --git a/Documentation/git-config.txt b/Documentation/git-config.txt
index ff9310f..899e92a 100644
--- a/Documentation/git-config.txt
+++ b/Documentation/git-config.txt
@@ -339,33 +339,35 @@ EXAMPLES
Given a .git/config like this:
- #
- # This is the config file, and
- # a '#' or ';' character indicates
- # a comment
- #
-
- ; core variables
- [core]
- ; Don't trust file modes
- filemode = false
-
- ; Our diff algorithm
- [diff]
- external = /usr/local/bin/diff-wrapper
- renames = true
-
- ; Proxy settings
- [core]
- gitproxy=proxy-command for kernel.org
- gitproxy=default-proxy ; for all the rest
-
- ; HTTP
- [http]
- sslVerify
- [http "https://weak.example.com"]
- sslVerify = false
- cookieFile = /tmp/cookie.txt
+------------
+#
+# This is the config file, and
+# a '#' or ';' character indicates
+# a comment
+#
+
+; core variables
+[core]
+ ; Don't trust file modes
+ filemode = false
+
+; Our diff algorithm
+[diff]
+ external = /usr/local/bin/diff-wrapper
+ renames = true
+
+; Proxy settings
+[core]
+ gitproxy=proxy-command for kernel.org
+ gitproxy=default-proxy ; for all the rest
+
+; HTTP
+[http]
+ sslVerify
+[http "https://weak.example.com"]
+ sslVerify = false
+ cookieFile = /tmp/cookie.txt
+------------
you can set the filemode to true with
diff --git a/Documentation/git-fast-export.txt b/Documentation/git-fast-export.txt
index 784e934..37634bf 100644
--- a/Documentation/git-fast-export.txt
+++ b/Documentation/git-fast-export.txt
@@ -75,11 +75,20 @@ produced incorrect results if you gave these options.
Before processing any input, load the marks specified in
<file>. The input file must exist, must be readable, and
must use the same format as produced by --export-marks.
+
+--mark-tags::
+ In addition to labelling blobs and commits with mark ids, also
+ label tags. This is useful in conjunction with
+ `--export-marks` and `--import-marks`, and is also useful (and
+ necessary) for exporting of nested tags. It does not hurt
+ other cases and would be the default, but many fast-import
+ frontends are not prepared to accept tags with mark
+ identifiers.
+
-Any commits that have already been marked will not be exported again.
-If the backend uses a similar --import-marks file, this allows for
-incremental bidirectional exporting of the repository by keeping the
-marks the same across runs.
+Any commits (or tags) that have already been marked will not be
+exported again. If the backend uses a similar --import-marks file,
+this allows for incremental bidirectional exporting of the repository
+by keeping the marks the same across runs.
--fake-missing-tagger::
Some old repositories have tags without a tagger. The
diff --git a/Documentation/git-fast-import.txt b/Documentation/git-fast-import.txt
index 0bb2762..a3f1e0c 100644
--- a/Documentation/git-fast-import.txt
+++ b/Documentation/git-fast-import.txt
@@ -337,6 +337,13 @@ and control the current import process. More detailed discussion
`commit` command. This command is optional and is not
needed to perform an import.
+`alias`::
+ Record that a mark refers to a given object without first
+ creating any new object. Using --import-marks and referring
+ to missing marks will cause fast-import to fail, so aliases
+ can provide a way to set otherwise pruned commits to a valid
+ value (e.g. the nearest non-pruned ancestor).
+
`checkpoint`::
Forces fast-import to close the current packfile, generate its
unique SHA-1 checksum and index, and start a new packfile.
@@ -774,6 +781,7 @@ lightweight (non-annotated) tags see the `reset` command below.
....
'tag' SP <name> LF
+ mark?
'from' SP <commit-ish> LF
original-oid?
'tagger' (SP <name>)? SP LT <email> GT SP <when> LF
@@ -913,6 +921,21 @@ a data chunk which does not have an LF as its last byte.
+
The `LF` after `<delim> LF` is optional (it used to be required).
+`alias`
+~~~~~~~
+Record that a mark refers to a given object without first creating any
+new object.
+
+....
+ 'alias' LF
+ mark
+ 'to' SP <commit-ish> LF
+ LF?
+....
+
+For a detailed description of `<commit-ish>` see above under `from`.
+
+
`checkpoint`
~~~~~~~~~~~~
Forces fast-import to close the current packfile, start a new one, and to
diff --git a/Documentation/git-grep.txt b/Documentation/git-grep.txt
index 2d27969..c89fb56 100644
--- a/Documentation/git-grep.txt
+++ b/Documentation/git-grep.txt
@@ -271,6 +271,23 @@ providing this option will cause it to die.
-f <file>::
Read patterns from <file>, one per line.
++
+Passing the pattern via <file> allows for providing a search pattern
+containing a \0.
++
+Not all pattern types support patterns containing \0. Git will error
+out if a given pattern type can't support such a pattern. The
+`--perl-regexp` pattern type when compiled against the PCRE v2 backend
+has the widest support for these types of patterns.
++
+In versions of Git before 2.23.0 patterns containing \0 would be
+silently considered fixed. This was never documented, there were also
+odd and undocumented interactions between e.g. non-ASCII patterns
+containing \0 and `--ignore-case`.
++
+In future versions we may learn to support patterns containing \0 for
+more search backends, until then we'll die when the pattern type in
+question doesn't support them.
-e::
The next parameter is the pattern. This option has to be
diff --git a/Documentation/git-gui.txt b/Documentation/git-gui.txt
index 5f93f80..c9d7e96 100644
--- a/Documentation/git-gui.txt
+++ b/Documentation/git-gui.txt
@@ -112,15 +112,9 @@ Other
versions are distributed as part of the Git suite for the convenience
of end users.
-A 'git gui' development repository can be obtained from:
+The official repository of the 'git gui' project can be found at:
- git clone git://repo.or.cz/git-gui.git
-
-or
-
- git clone http://repo.or.cz/r/git-gui.git
-
-or browsed online at http://repo.or.cz/w/git-gui.git/[].
+ https://github.com/prati0100/git-gui.git/
GIT
---
diff --git a/Documentation/git-ls-remote.txt b/Documentation/git-ls-remote.txt
index 0b057cb..a2ea1fd 100644
--- a/Documentation/git-ls-remote.txt
+++ b/Documentation/git-ls-remote.txt
@@ -92,21 +92,23 @@ OPTIONS
EXAMPLES
--------
- $ git ls-remote --tags ./.
- d6602ec5194c87b0fc87103ca4d67251c76f233a refs/tags/v0.99
- f25a265a342aed6041ab0cc484224d9ca54b6f41 refs/tags/v0.99.1
- 7ceca275d047c90c0c7d5afb13ab97efdf51bd6e refs/tags/v0.99.3
- c5db5456ae3b0873fc659c19fafdde22313cc441 refs/tags/v0.99.2
- 0918385dbd9656cab0d1d81ba7453d49bbc16250 refs/tags/junio-gpg-pub
- $ git ls-remote http://www.kernel.org/pub/scm/git/git.git master pu rc
- 5fe978a5381f1fbad26a80e682ddd2a401966740 refs/heads/master
- c781a84b5204fb294c9ccc79f8b3baceeb32c061 refs/heads/pu
- $ git remote add korg http://www.kernel.org/pub/scm/git/git.git
- $ git ls-remote --tags korg v\*
- d6602ec5194c87b0fc87103ca4d67251c76f233a refs/tags/v0.99
- f25a265a342aed6041ab0cc484224d9ca54b6f41 refs/tags/v0.99.1
- c5db5456ae3b0873fc659c19fafdde22313cc441 refs/tags/v0.99.2
- 7ceca275d047c90c0c7d5afb13ab97efdf51bd6e refs/tags/v0.99.3
+----
+$ git ls-remote --tags ./.
+d6602ec5194c87b0fc87103ca4d67251c76f233a refs/tags/v0.99
+f25a265a342aed6041ab0cc484224d9ca54b6f41 refs/tags/v0.99.1
+7ceca275d047c90c0c7d5afb13ab97efdf51bd6e refs/tags/v0.99.3
+c5db5456ae3b0873fc659c19fafdde22313cc441 refs/tags/v0.99.2
+0918385dbd9656cab0d1d81ba7453d49bbc16250 refs/tags/junio-gpg-pub
+$ git ls-remote http://www.kernel.org/pub/scm/git/git.git master pu rc
+5fe978a5381f1fbad26a80e682ddd2a401966740 refs/heads/master
+c781a84b5204fb294c9ccc79f8b3baceeb32c061 refs/heads/pu
+$ git remote add korg http://www.kernel.org/pub/scm/git/git.git
+$ git ls-remote --tags korg v\*
+d6602ec5194c87b0fc87103ca4d67251c76f233a refs/tags/v0.99
+f25a265a342aed6041ab0cc484224d9ca54b6f41 refs/tags/v0.99.1
+c5db5456ae3b0873fc659c19fafdde22313cc441 refs/tags/v0.99.2
+7ceca275d047c90c0c7d5afb13ab97efdf51bd6e refs/tags/v0.99.3
+----
SEE ALSO
--------
diff --git a/Documentation/git-merge-base.txt b/Documentation/git-merge-base.txt
index 261d5c1..2d944e0 100644
--- a/Documentation/git-merge-base.txt
+++ b/Documentation/git-merge-base.txt
@@ -80,9 +80,11 @@ which is reachable from both 'A' and 'B' through the parent relationship.
For example, with this topology:
- o---o---o---B
- /
- ---o---1---o---o---o---A
+....
+ o---o---o---B
+ /
+---o---1---o---o---o---A
+....
the merge base between 'A' and 'B' is '1'.
@@ -90,21 +92,25 @@ Given three commits 'A', 'B' and 'C', `git merge-base A B C` will compute the
merge base between 'A' and a hypothetical commit 'M', which is a merge
between 'B' and 'C'. For example, with this topology:
- o---o---o---o---C
- /
- / o---o---o---B
- / /
- ---2---1---o---o---o---A
+....
+ o---o---o---o---C
+ /
+ / o---o---o---B
+ / /
+---2---1---o---o---o---A
+....
the result of `git merge-base A B C` is '1'. This is because the
equivalent topology with a merge commit 'M' between 'B' and 'C' is:
- o---o---o---o---o
- / \
- / o---o---o---o---M
- / /
- ---2---1---o---o---o---A
+....
+ o---o---o---o---o
+ / \
+ / o---o---o---o---M
+ / /
+---2---1---o---o---o---A
+....
and the result of `git merge-base A M` is '1'. Commit '2' is also a
common ancestor between 'A' and 'M', but '1' is a better common ancestor,
@@ -116,11 +122,13 @@ the best common ancestor of all commits.
When the history involves criss-cross merges, there can be more than one
'best' common ancestor for two commits. For example, with this topology:
- ---1---o---A
- \ /
- X
- / \
- ---2---o---o---B
+....
+---1---o---A
+ \ /
+ X
+ / \
+---2---o---o---B
+....
both '1' and '2' are merge-bases of A and B. Neither one is better than
the other (both are 'best' merge bases). When the `--all` option is not given,
@@ -131,18 +139,22 @@ and B is (or at least used to be) to compute the merge base between
A and B, and check if it is the same as A, in which case, A is an
ancestor of B. You will see this idiom used often in older scripts.
- A=$(git rev-parse --verify A)
- if test "$A" = "$(git merge-base A B)"
- then
- ... A is an ancestor of B ...
- fi
+....
+A=$(git rev-parse --verify A)
+if test "$A" = "$(git merge-base A B)"
+then
+ ... A is an ancestor of B ...
+fi
+....
In modern git, you can say this in a more direct way:
- if git merge-base --is-ancestor A B
- then
- ... A is an ancestor of B ...
- fi
+....
+if git merge-base --is-ancestor A B
+then
+ ... A is an ancestor of B ...
+fi
+....
instead.
@@ -154,13 +166,15 @@ topic origin/master`, the history of remote-tracking branch
`origin/master` may have been rewound and rebuilt, leading to a
history of this shape:
- o---B2
- /
- ---o---o---B1--o---o---o---B (origin/master)
- \
- B0
- \
- D0---D1---D (topic)
+....
+ o---B2
+ /
+---o---o---B1--o---o---o---B (origin/master)
+ \
+ B0
+ \
+ D0---D1---D (topic)
+....
where `origin/master` used to point at commits B0, B1, B2 and now it
points at B, and your `topic` branch was started on top of it back
@@ -193,13 +207,15 @@ will find B0, and
will replay D0, D1 and D on top of B to create a new history of this
shape:
- o---B2
- /
- ---o---o---B1--o---o---o---B (origin/master)
- \ \
- B0 D0'--D1'--D' (topic - updated)
- \
- D0---D1---D (topic - old)
+....
+ o---B2
+ /
+---o---o---B1--o---o---o---B (origin/master)
+ \ \
+ B0 D0'--D1'--D' (topic - updated)
+ \
+ D0---D1---D (topic - old)
+....
A caveat is that older reflog entries in your repository may be
expired by `git gc`. If B0 no longer appears in the reflog of the
diff --git a/Documentation/git-merge-index.txt b/Documentation/git-merge-index.txt
index 02676fb..2ab84a9 100644
--- a/Documentation/git-merge-index.txt
+++ b/Documentation/git-merge-index.txt
@@ -54,20 +54,24 @@ original is first. But the argument order to the 3-way merge program
Examples:
- torvalds@ppc970:~/merge-test> git merge-index cat MM
- This is MM from the original tree. # original
- This is modified MM in the branch A. # merge1
- This is modified MM in the branch B. # merge2
- This is modified MM in the branch B. # current contents
+----
+torvalds@ppc970:~/merge-test> git merge-index cat MM
+This is MM from the original tree. # original
+This is modified MM in the branch A. # merge1
+This is modified MM in the branch B. # merge2
+This is modified MM in the branch B. # current contents
+----
or
- torvalds@ppc970:~/merge-test> git merge-index cat AA MM
- cat: : No such file or directory
- This is added AA in the branch A.
- This is added AA in the branch B.
- This is added AA in the branch B.
- fatal: merge program failed
+----
+torvalds@ppc970:~/merge-test> git merge-index cat AA MM
+cat: : No such file or directory
+This is added AA in the branch A.
+This is added AA in the branch B.
+This is added AA in the branch B.
+fatal: merge program failed
+----
where the latter example shows how 'git merge-index' will stop trying to
merge once anything has returned an error (i.e., `cat` returned an error
diff --git a/Documentation/git-receive-pack.txt b/Documentation/git-receive-pack.txt
index dedf97e..25702ed 100644
--- a/Documentation/git-receive-pack.txt
+++ b/Documentation/git-receive-pack.txt
@@ -165,29 +165,31 @@ ref listing the commits pushed to the repository, and logs the push
certificates of signed pushes with good signatures to a logger
service:
- #!/bin/sh
- # mail out commit update information.
- while read oval nval ref
- do
- if expr "$oval" : '0*$' >/dev/null
- then
- echo "Created a new ref, with the following commits:"
- git rev-list --pretty "$nval"
- else
- echo "New commits:"
- git rev-list --pretty "$nval" "^$oval"
- fi |
- mail -s "Changes to ref $ref" commit-list@mydomain
- done
- # log signed push certificate, if any
- if test -n "${GIT_PUSH_CERT-}" && test ${GIT_PUSH_CERT_STATUS} = G
+----
+#!/bin/sh
+# mail out commit update information.
+while read oval nval ref
+do
+ if expr "$oval" : '0*$' >/dev/null
then
- (
- echo expected nonce is ${GIT_PUSH_NONCE}
- git cat-file blob ${GIT_PUSH_CERT}
- ) | mail -s "push certificate from $GIT_PUSH_CERT_SIGNER" push-log@mydomain
- fi
- exit 0
+ echo "Created a new ref, with the following commits:"
+ git rev-list --pretty "$nval"
+ else
+ echo "New commits:"
+ git rev-list --pretty "$nval" "^$oval"
+ fi |
+ mail -s "Changes to ref $ref" commit-list@mydomain
+done
+# log signed push certificate, if any
+if test -n "${GIT_PUSH_CERT-}" && test ${GIT_PUSH_CERT_STATUS} = G
+then
+ (
+ echo expected nonce is ${GIT_PUSH_NONCE}
+ git cat-file blob ${GIT_PUSH_CERT}
+ ) | mail -s "push certificate from $GIT_PUSH_CERT_SIGNER" push-log@mydomain
+fi
+exit 0
+----
The exit code from this hook invocation is ignored, however a
non-zero exit code will generate an error message.
@@ -212,8 +214,10 @@ anyway.
This hook can be used, for example, to run `git update-server-info`
if the repository is packed and is served via a dumb transport.
- #!/bin/sh
- exec git update-server-info
+----
+#!/bin/sh
+exec git update-server-info
+----
QUARANTINE ENVIRONMENT
diff --git a/Documentation/git-rev-list.txt b/Documentation/git-rev-list.txt
index 9392760..025c911 100644
--- a/Documentation/git-rev-list.txt
+++ b/Documentation/git-rev-list.txt
@@ -9,59 +9,7 @@ git-rev-list - Lists commit objects in reverse chronological order
SYNOPSIS
--------
[verse]
-'git rev-list' [ --max-count=<number> ]
- [ --skip=<number> ]
- [ --max-age=<timestamp> ]
- [ --min-age=<timestamp> ]
- [ --sparse ]
- [ --merges ]
- [ --no-merges ]
- [ --min-parents=<number> ]
- [ --no-min-parents ]
- [ --max-parents=<number> ]
- [ --no-max-parents ]
- [ --first-parent ]
- [ --remove-empty ]
- [ --full-history ]
- [ --not ]
- [ --all ]
- [ --branches[=<pattern>] ]
- [ --tags[=<pattern>] ]
- [ --remotes[=<pattern>] ]
- [ --glob=<glob-pattern> ]
- [ --ignore-missing ]
- [ --stdin ]
- [ --quiet ]
- [ --topo-order ]
- [ --parents ]
- [ --timestamp ]
- [ --left-right ]
- [ --left-only ]
- [ --right-only ]
- [ --cherry-mark ]
- [ --cherry-pick ]
- [ --encoding=<encoding> ]
- [ --(author|committer|grep)=<pattern> ]
- [ --regexp-ignore-case | -i ]
- [ --extended-regexp | -E ]
- [ --fixed-strings | -F ]
- [ --date=<format>]
- [ [ --objects | --objects-edge | --objects-edge-aggressive ]
- [ --unpacked ]
- [ --object-names | --no-object-names ]
- [ --filter=<filter-spec> [ --filter-print-omitted ] ] ]
- [ --missing=<missing-action> ]
- [ --pretty | --header ]
- [ --bisect ]
- [ --bisect-vars ]
- [ --bisect-all ]
- [ --merge ]
- [ --reverse ]
- [ --walk-reflogs ]
- [ --no-walk ] [ --do-walk ]
- [ --count ]
- [ --use-bitmap-index ]
- <commit>... [ \-- <paths>... ]
+'git rev-list' [<options>] <commit>... [[--] <path>...]
DESCRIPTION
-----------
diff --git a/Documentation/git-send-email.txt b/Documentation/git-send-email.txt
index d93e5d0..0a69810 100644
--- a/Documentation/git-send-email.txt
+++ b/Documentation/git-send-email.txt
@@ -486,11 +486,13 @@ Use gmail as the smtp server
To use 'git send-email' to send your patches through the GMail SMTP server,
edit ~/.gitconfig to specify your account settings:
- [sendemail]
- smtpEncryption = tls
- smtpServer = smtp.gmail.com
- smtpUser = yourname@gmail.com
- smtpServerPort = 587
+----
+[sendemail]
+ smtpEncryption = tls
+ smtpServer = smtp.gmail.com
+ smtpUser = yourname@gmail.com
+ smtpServerPort = 587
+----
If you have multifactor authentication setup on your gmail account, you will
need to generate an app-specific password for use with 'git send-email'. Visit
diff --git a/Documentation/git-status.txt b/Documentation/git-status.txt
index d4e8f24..7731b45 100644
--- a/Documentation/git-status.txt
+++ b/Documentation/git-status.txt
@@ -59,16 +59,17 @@ This is optional and defaults to the original version 'v1' format.
--untracked-files[=<mode>]::
Show untracked files.
+
+--
The mode parameter is used to specify the handling of untracked files.
It is optional: it defaults to 'all', and if specified, it must be
stuck to the option (e.g. `-uno`, but not `-u no`).
-+
+
The possible options are:
-+
+
- 'no' - Show no untracked files.
- 'normal' - Shows untracked files and directories.
- 'all' - Also shows individual files in untracked directories.
-+
+
When `-u` option is not used, untracked files and directories are
shown (i.e. the same as specifying `normal`), to help you avoid
forgetting to add newly created files. Because it takes extra work
@@ -78,9 +79,10 @@ Consider enabling untracked cache and split index if supported (see
`git update-index --untracked-cache` and `git update-index
--split-index`), Otherwise you can use `no` to have `git status`
return more quickly without showing untracked files.
-+
+
The default can be changed using the status.showUntrackedFiles
configuration variable documented in linkgit:git-config[1].
+--
--ignore-submodules[=<when>]::
Ignore changes to submodules when looking for changes. <when> can be
@@ -100,11 +102,12 @@ configuration variable documented in linkgit:git-config[1].
--ignored[=<mode>]::
Show ignored files as well.
+
+--
The mode parameter is used to specify the handling of ignored files.
It is optional: it defaults to 'traditional'.
-+
+
The possible options are:
-+
+
- 'traditional' - Shows ignored files and directories, unless
--untracked-files=all is specified, in which case
individual files in ignored directories are
@@ -112,12 +115,13 @@ The possible options are:
- 'no' - Show no ignored files.
- 'matching' - Shows ignored files and directories matching an
ignore pattern.
-+
+
When 'matching' mode is specified, paths that explicitly match an
ignored pattern are shown. If a directory matches an ignore pattern,
then it is shown, but not paths contained in the ignored directory. If
a directory does not match an ignore pattern, but all contents are
ignored, then the directory is not shown, but all contents are shown.
+--
-z::
Terminate entries with NUL, instead of LF. This implies
diff --git a/Documentation/git-submodule.txt b/Documentation/git-submodule.txt
index 0ed5c24..1f46380 100644
--- a/Documentation/git-submodule.txt
+++ b/Documentation/git-submodule.txt
@@ -173,7 +173,8 @@ submodule with the `--init` option.
If `--recursive` is specified, this command will recurse into the
registered submodules, and update any nested submodules within.
--
-set-branch ((-d|--default)|(-b|--branch <branch>)) [--] <path>::
+set-branch (-b|--branch) <branch> [--] <path>::
+set-branch (-d|--default) [--] <path>::
Sets the default remote tracking branch for the submodule. The
`--branch` option allows the remote branch to be specified. The
`--default` option removes the submodule.<name>.branch configuration
diff --git a/Documentation/gitmodules.txt b/Documentation/gitmodules.txt
index a66e95b..f2a65ba 100644
--- a/Documentation/gitmodules.txt
+++ b/Documentation/gitmodules.txt
@@ -90,7 +90,7 @@ of the superproject, the setting there will override the one found in
.gitmodules.
Both settings can be overridden on the command line by using the
-"--ignore-submodule" option. The 'git submodule' commands are not
+"--ignore-submodules" option. The 'git submodule' commands are not
affected by this setting.
--
@@ -105,14 +105,15 @@ EXAMPLES
Consider the following .gitmodules file:
- [submodule "libfoo"]
- path = include/foo
- url = git://foo.com/git/lib.git
-
- [submodule "libbar"]
- path = include/bar
- url = git://bar.com/git/lib.git
+----
+[submodule "libfoo"]
+ path = include/foo
+ url = git://foo.com/git/lib.git
+[submodule "libbar"]
+ path = include/bar
+ url = git://bar.com/git/lib.git
+----
This defines two submodules, `libfoo` and `libbar`. These are expected to
be checked out in the paths `include/foo` and `include/bar`, and for both
diff --git a/Documentation/gitweb.conf.txt b/Documentation/gitweb.conf.txt
index 35317e7..7963a79 100644
--- a/Documentation/gitweb.conf.txt
+++ b/Documentation/gitweb.conf.txt
@@ -786,9 +786,9 @@ forks::
subdirectories of project root (basename) to be forks of existing
projects. For each project +$projname.git+, projects in the
+$projname/+ directory and its subdirectories will not be
- shown in the main projects list. Instead, a \'\+' mark is shown
- next to +$projname+, which links to a "forks" view that lists all
- the forks (all projects in +$projname/+ subdirectory). Additionally
+ shown in the main projects list. Instead, a \'+' mark is shown
+ next to `$projname`, which links to a "forks" view that lists all
+ the forks (all projects in `$projname/` subdirectory). Additionally
a "forks" view for a project is linked from project summary page.
+
If the project list is taken from a file (+$projects_list+ points to a
diff --git a/Documentation/manpage.xsl b/Documentation/manpage.xsl
new file mode 100644
index 0000000..ef64bab
--- /dev/null
+++ b/Documentation/manpage.xsl
@@ -0,0 +1,3 @@
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+ <xsl:import href="http://docbook.sourceforge.net/release/xsl-ns/current/manpages/docbook.xsl" />
+</xsl:stylesheet>
diff --git a/Documentation/pretty-formats.txt b/Documentation/pretty-formats.txt
index 0795983..b87e2e8 100644
--- a/Documentation/pretty-formats.txt
+++ b/Documentation/pretty-formats.txt
@@ -208,7 +208,7 @@ endif::git-rev-list[]
'%GP':: show the fingerprint of the primary key whose subkey was used
to sign a signed commit
'%gD':: reflog selector, e.g., `refs/stash@{1}` or `refs/stash@{2
- minutes ago`}; the format follows the rules described for the
+ minutes ago}`; the format follows the rules described for the
`-g` option. The portion before the `@` is the refname as
given on the command line (so `git log -g refs/heads/master`
would yield `refs/heads/master@{0}`).
diff --git a/Documentation/technical/api-trace2.txt b/Documentation/technical/api-trace2.txt
index 71eb081..a045dbe 100644
--- a/Documentation/technical/api-trace2.txt
+++ b/Documentation/technical/api-trace2.txt
@@ -128,7 +128,7 @@ yields
------------
$ cat ~/log.event
-{"event":"version","sid":"sid":"20190408T191610.507018Z-H9b68c35f-P000059a8","thread":"main","time":"2019-01-16T17:28:42.620713Z","file":"common-main.c","line":38,"evt":"1","exe":"2.20.1.155.g426c96fcdb"}
+{"event":"version","sid":"sid":"20190408T191610.507018Z-H9b68c35f-P000059a8","thread":"main","time":"2019-01-16T17:28:42.620713Z","file":"common-main.c","line":38,"evt":"2","exe":"2.20.1.155.g426c96fcdb"}
{"event":"start","sid":"20190408T191610.507018Z-H9b68c35f-P000059a8","thread":"main","time":"2019-01-16T17:28:42.621027Z","file":"common-main.c","line":39,"t_abs":0.001173,"argv":["git","version"]}
{"event":"cmd_name","sid":"20190408T191610.507018Z-H9b68c35f-P000059a8","thread":"main","time":"2019-01-16T17:28:42.621122Z","file":"git.c","line":432,"name":"version","hierarchy":"version"}
{"event":"exit","sid":"20190408T191610.507018Z-H9b68c35f-P000059a8","thread":"main","time":"2019-01-16T17:28:42.621236Z","file":"git.c","line":662,"t_abs":0.001227,"code":0}
@@ -142,10 +142,9 @@ system or global config value to one of the following:
include::../trace2-target-values.txt[]
-If the target already exists and is a directory, the traces will be
-written to files (one per process) underneath the given directory. They
-will be named according to the last component of the SID (optionally
-followed by a counter to avoid filename collisions).
+When trace files are written to a target directory, they will be named according
+to the last component of the SID (optionally followed by a counter to avoid
+filename collisions).
== Trace2 API
@@ -605,17 +604,35 @@ only present on the "start" and "atexit" events.
==== Event-Specific Key/Value Pairs
`"version"`::
- This event gives the version of the executable and the EVENT format.
+ This event gives the version of the executable and the EVENT format. It
+ should always be the first event in a trace session. The EVENT format
+ version will be incremented if new event types are added, if existing
+ fields are removed, or if there are significant changes in
+ interpretation of existing events or fields. Smaller changes, such as
+ adding a new field to an existing event, will not require an increment
+ to the EVENT format version.
+
------------
{
"event":"version",
...
- "evt":"1", # EVENT format version
+ "evt":"2", # EVENT format version
"exe":"2.20.1.155.g426c96fcdb" # git version
}
------------
+`"discard"`::
+ This event is written to the git-trace2-discard sentinel file if there
+ are too many files in the target trace directory (see the
+ trace2.maxFiles config option).
++
+------------
+{
+ "event":"discard",
+ ...
+}
+------------
+
`"start"`::
This event contains the complete argv received by main().
+
diff --git a/Documentation/trace2-target-values.txt b/Documentation/trace2-target-values.txt
index 27d3c64..3985b6d 100644
--- a/Documentation/trace2-target-values.txt
+++ b/Documentation/trace2-target-values.txt
@@ -2,7 +2,9 @@
* `0` or `false` - Disables the target.
* `1` or `true` - Writes to `STDERR`.
* `[2-9]` - Writes to the already opened file descriptor.
-* `<absolute-pathname>` - Writes to the file in append mode.
+* `<absolute-pathname>` - Writes to the file in append mode. If the target
+already exists and is a directory, the traces will be written to files (one
+per process) underneath the given directory.
* `af_unix:[<socket_type>:]<absolute-pathname>` - Write to a
Unix DomainSocket (on platforms that support them). Socket
type can be either `stream` or `dgram`; if omitted Git will
diff --git a/Documentation/user-manual.txt b/Documentation/user-manual.txt
index 8bce75b..06bd899 100644
--- a/Documentation/user-manual.txt
+++ b/Documentation/user-manual.txt
@@ -1,5 +1,4 @@
-Git User Manual
-===============
+= Git User Manual
Git is a fast distributed revision control system.
@@ -41,12 +40,10 @@ complete.
[[repositories-and-branches]]
-Repositories and Branches
-=========================
+== Repositories and Branches
[[how-to-get-a-git-repository]]
-How to get a Git repository
----------------------------
+=== How to get a Git repository
It will be useful to have a Git repository to experiment with as you
read this manual.
@@ -73,8 +70,7 @@ top-level directory named `.git`, which contains all the information
about the history of the project.
[[how-to-check-out]]
-How to check out a different version of a project
--------------------------------------------------
+=== How to check out a different version of a project
Git is best thought of as a tool for storing the history of a collection
of files. It stores the history as a compressed collection of
@@ -151,8 +147,7 @@ with no way to find the history it used to point to; so use this command
carefully.
[[understanding-commits]]
-Understanding History: Commits
-------------------------------
+=== Understanding History: Commits
Every change in the history of a project is represented by a commit.
The linkgit:git-show[1] command shows the most recent commit on the
@@ -202,8 +197,7 @@ history, including file data and directory contents, is stored in an object
with a name that is a hash of its contents.
[[understanding-reachability]]
-Understanding history: commits, parents, and reachability
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Understanding history: commits, parents, and reachability
Every commit (except the very first commit in a project) also has a
parent commit which shows what happened before this commit.
@@ -227,8 +221,7 @@ that Y is a descendant of X, or that there is a chain of parents
leading from commit Y to commit X.
[[history-diagrams]]
-Understanding history: History diagrams
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Understanding history: History diagrams
We will sometimes represent Git history using diagrams like the one
below. Commits are shown as "o", and the links between them with
@@ -247,8 +240,7 @@ If we need to talk about a particular commit, the character "o" may
be replaced with another letter or number.
[[what-is-a-branch]]
-Understanding history: What is a branch?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Understanding history: What is a branch?
When we need to be precise, we will use the word "branch" to mean a line
of development, and "branch head" (or just "head") to mean a reference
@@ -261,8 +253,7 @@ However, when no confusion will result, we often just use the term
"branch" both for branches and for branch heads.
[[manipulating-branches]]
-Manipulating branches
----------------------
+=== Manipulating branches
Creating, deleting, and modifying branches is quick and easy; here's
a summary of the commands:
@@ -299,8 +290,7 @@ ref: refs/heads/master
------------------------------------------------
[[detached-head]]
-Examining an old version without creating a new branch
-------------------------------------------------------
+=== Examining an old version without creating a new branch
The `git switch` command normally expects a branch head, but will also
accept an arbitrary commit when invoked with --detach; for example,
@@ -340,8 +330,7 @@ make up a name for the new branch. You can still create a new branch
(or tag) for this version later if you decide to.
[[examining-remote-branches]]
-Examining branches from a remote repository
--------------------------------------------
+=== Examining branches from a remote repository
The "master" branch that was created at the time you cloned is a copy
of the HEAD in the repository that you cloned from. That repository
@@ -383,8 +372,7 @@ Note that the name "origin" is just the name that Git uses by default
to refer to the repository that you cloned from.
[[how-git-stores-references]]
-Naming branches, tags, and other references
--------------------------------------------
+=== Naming branches, tags, and other references
Branches, remote-tracking branches, and tags are all references to
commits. All references are named with a slash-separated path name
@@ -413,8 +401,7 @@ references with the same shorthand name, see the "SPECIFYING
REVISIONS" section of linkgit:gitrevisions[7].
[[Updating-a-repository-With-git-fetch]]
-Updating a repository with git fetch
-------------------------------------
+=== Updating a repository with git fetch
After you clone a repository and commit a few changes of your own, you
may wish to check the original repository for updates.
@@ -425,8 +412,7 @@ repository. It will not touch any of your own branches--not even the
"master" branch that was created for you on clone.
[[fetching-branches]]
-Fetching branches from other repositories
------------------------------------------
+=== Fetching branches from other repositories
You can also track branches from repositories other than the one you
cloned from, using linkgit:git-remote[1]:
@@ -474,8 +460,7 @@ text editor. (See the "CONFIGURATION FILE" section of
linkgit:git-config[1] for details.)
[[exploring-git-history]]
-Exploring Git history
-=====================
+== Exploring Git history
Git is best thought of as a tool for storing the history of a
collection of files. It does this by storing compressed snapshots of
@@ -489,8 +474,7 @@ We start with one specialized tool that is useful for finding the
commit that introduced a bug into a project.
[[using-bisect]]
-How to use bisect to find a regression
---------------------------------------
+=== How to use bisect to find a regression
Suppose version 2.6.18 of your project worked, but the version at
"master" crashes. Sometimes the best way to find the cause of such a
@@ -572,8 +556,7 @@ linkgit:git-bisect[1] for more information about this and other `git
bisect` features.
[[naming-commits]]
-Naming commits
---------------
+=== Naming commits
We have seen several ways of naming commits already:
@@ -637,8 +620,7 @@ e05db0fd4f31dde7005f075a84f96b360d05984b
-------------------------------------------------
[[creating-tags]]
-Creating tags
--------------
+=== Creating tags
We can also create a tag to refer to a particular commit; after
running
@@ -655,8 +637,7 @@ should create a tag object instead; see the linkgit:git-tag[1] man page
for details.
[[browsing-revisions]]
-Browsing revisions
-------------------
+=== Browsing revisions
The linkgit:git-log[1] command can show lists of commits. On its
own, it shows all commits reachable from the parent commit; but you
@@ -697,8 +678,7 @@ multiple independent lines of development, the particular order that
commits are listed in may be somewhat arbitrary.
[[generating-diffs]]
-Generating diffs
-----------------
+=== Generating diffs
You can generate diffs between any two versions using
linkgit:git-diff[1]:
@@ -726,8 +706,7 @@ will generate a file with a patch for each commit reachable from test
but not from master.
[[viewing-old-file-versions]]
-Viewing old file versions
--------------------------
+=== Viewing old file versions
You can always view an old version of a file by just checking out the
correct revision first. But sometimes it is more convenient to be
@@ -742,12 +721,10 @@ Before the colon may be anything that names a commit, and after it
may be any path to a file tracked by Git.
[[history-examples]]
-Examples
---------
+=== Examples
[[counting-commits-on-a-branch]]
-Counting the number of commits on a branch
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Counting the number of commits on a branch
Suppose you want to know how many commits you've made on `mybranch`
since it diverged from `origin`:
@@ -765,8 +742,7 @@ $ git rev-list origin..mybranch | wc -l
-------------------------------------------------
[[checking-for-equal-branches]]
-Check whether two branches point at the same history
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Check whether two branches point at the same history
Suppose you want to check whether two branches point at the same point
in history.
@@ -798,8 +774,7 @@ $ git log origin...master
will return no commits when the two branches are equal.
[[finding-tagged-descendants]]
-Find first tagged version including a given fix
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Find first tagged version including a given fix
Suppose you know that the commit e05db0fd fixed a certain problem.
You'd like to find the earliest tagged release that contains that
@@ -883,8 +858,7 @@ shows that e05db0fd is reachable from itself, from v1.5.0-rc1,
and from v1.5.0-rc2, and not from v1.5.0-rc0.
[[showing-commits-unique-to-a-branch]]
-Showing commits unique to a given branch
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Showing commits unique to a given branch
Suppose you would like to see all the commits reachable from the branch
head named `master` but not from any other head in your repository.
@@ -931,8 +905,7 @@ $ gitk $( git show-ref --heads ) --not $( git show-ref --tags )
syntax such as `--not`.)
[[making-a-release]]
-Creating a changelog and tarball for a software release
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Creating a changelog and tarball for a software release
The linkgit:git-archive[1] command can create a tar or zip archive from
any version of a project; for example:
@@ -983,8 +956,7 @@ and then he just cut-and-pastes the output commands after verifying that
they look OK.
[[Finding-commits-With-given-Content]]
-Finding commits referencing a file with given content
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Finding commits referencing a file with given content
Somebody hands you a copy of a file, and asks which commits modified a
file such that it contained the given content either before or after the
@@ -1000,12 +972,10 @@ student. The linkgit:git-log[1], linkgit:git-diff-tree[1], and
linkgit:git-hash-object[1] man pages may prove helpful.
[[Developing-With-git]]
-Developing with Git
-===================
+== Developing with Git
[[telling-git-your-name]]
-Telling Git your name
----------------------
+=== Telling Git your name
Before creating any commits, you should introduce yourself to Git.
The easiest way to do so is to use linkgit:git-config[1]:
@@ -1030,8 +1000,7 @@ also edit it with your favorite editor.
[[creating-a-new-repository]]
-Creating a new repository
--------------------------
+=== Creating a new repository
Creating a new repository from scratch is very easy:
@@ -1052,8 +1021,7 @@ $ git commit
-------------------------------------------------
[[how-to-make-a-commit]]
-How to make a commit
---------------------
+=== How to make a commit
Creating a new commit takes three steps:
@@ -1148,8 +1116,7 @@ for inclusion in the index (by right-clicking on the diff hunk and
choosing "Stage Hunk For Commit").
[[creating-good-commit-messages]]
-Creating good commit messages
------------------------------
+=== Creating good commit messages
Though not required, it's a good idea to begin the commit message
with a single short (less than 50 character) line summarizing the
@@ -1162,8 +1129,7 @@ rest of the commit in the body.
[[ignoring-files]]
-Ignoring files
---------------
+=== Ignoring files
A project will often generate files that you do 'not' want to track with Git.
This typically includes files generated by a build process or temporary
@@ -1205,8 +1171,7 @@ Some Git commands can also take exclude patterns directly on the
command line. See linkgit:gitignore[5] for the details.
[[how-to-merge]]
-How to merge
-------------
+=== How to merge
You can rejoin two diverging branches of development using
linkgit:git-merge[1]:
@@ -1254,8 +1219,7 @@ has two parents, one pointing to the top of the current branch, and
one to the top of the other branch.
[[resolving-a-merge]]
-Resolving a merge
------------------
+=== Resolving a merge
When a merge isn't resolved automatically, Git leaves the index and
the working tree in a special state that gives you all the
@@ -1297,8 +1261,7 @@ The above is all you need to know to resolve a simple merge. But Git
also provides more information to help resolve conflicts:
[[conflict-resolution]]
-Getting conflict-resolution help during a merge
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Getting conflict-resolution help during a merge
All of the changes that Git was able to merge automatically are
already added to the index file, so linkgit:git-diff[1] shows only
@@ -1401,8 +1364,7 @@ the different stages of that file will be "collapsed", after which
`git diff` will (by default) no longer show diffs for that file.
[[undoing-a-merge]]
-Undoing a merge
----------------
+=== Undoing a merge
If you get stuck and decide to just give up and throw the whole mess
away, you can always return to the pre-merge state with
@@ -1423,8 +1385,7 @@ itself have been merged into another branch, as doing so may confuse
further merges.
[[fast-forwards]]
-Fast-forward merges
--------------------
+=== Fast-forward merges
There is one special case not mentioned above, which is treated
differently. Normally, a merge results in a merge commit, with two
@@ -1438,8 +1399,7 @@ to point at the head of the merged-in branch, without any new commits being
created.
[[fixing-mistakes]]
-Fixing mistakes
----------------
+=== Fixing mistakes
If you've messed up the working tree, but haven't yet committed your
mistake, you can return the entire working tree to the last committed
@@ -1463,8 +1423,7 @@ fundamentally different ways to fix the problem:
a branch that has had its history changed.
[[reverting-a-commit]]
-Fixing a mistake with a new commit
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Fixing a mistake with a new commit
Creating a new commit that reverts an earlier change is very easy;
just pass the linkgit:git-revert[1] command a reference to the bad
@@ -1490,8 +1449,7 @@ conflicts manually, just as in the case of <<resolving-a-merge,
resolving a merge>>.
[[fixing-a-mistake-by-rewriting-history]]
-Fixing a mistake by rewriting history
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Fixing a mistake by rewriting history
If the problematic commit is the most recent commit, and you have not
yet made that commit public, then you may just
@@ -1518,8 +1476,7 @@ this is an advanced topic to be left for
<<cleaning-up-history,another chapter>>.
[[checkout-of-path]]
-Checking out an old version of a file
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Checking out an old version of a file
In the process of undoing a previous bad change, you may find it
useful to check out an older version of a particular file using
@@ -1543,8 +1500,7 @@ $ git show HEAD^:path/to/file
which will display the given version of the file.
[[interrupted-work]]
-Temporarily setting aside work in progress
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Temporarily setting aside work in progress
While you are in the middle of working on something complicated, you
find an unrelated but obvious and trivial bug. You would like to fix it
@@ -1575,8 +1531,7 @@ $ git stash pop
[[ensuring-good-performance]]
-Ensuring good performance
--------------------------
+=== Ensuring good performance
On large repositories, Git depends on compression to keep the history
information from taking up too much space on disk or in memory. Some
@@ -1587,12 +1542,10 @@ to avoid automatic compression kicking in when it is not convenient.
[[ensuring-reliability]]
-Ensuring reliability
---------------------
+=== Ensuring reliability
[[checking-for-corruption]]
-Checking the repository for corruption
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Checking the repository for corruption
The linkgit:git-fsck[1] command runs a number of self-consistency checks
on the repository, and reports on any problems. This may take some
@@ -1618,12 +1571,10 @@ You can run `git fsck --no-dangling` to suppress these messages, and still
view real errors.
[[recovering-lost-changes]]
-Recovering lost changes
-~~~~~~~~~~~~~~~~~~~~~~~
+==== Recovering lost changes
[[reflogs]]
-Reflogs
-^^^^^^^
+===== Reflogs
Say you modify a branch with <<fixing-mistakes,`git reset --hard`>>,
and then realize that the branch was the only reference you had to
@@ -1670,8 +1621,7 @@ same project, the reflog history is not shared: it tells you only about
how the branches in your local repository have changed over time.
[[dangling-object-recovery]]
-Examining dangling objects
-^^^^^^^^^^^^^^^^^^^^^^^^^^
+===== Examining dangling objects
In some situations the reflog may not be able to save you. For example,
suppose you delete a branch, then realize you need the history it
@@ -1715,12 +1665,10 @@ dangling objects can arise in other situations.
[[sharing-development]]
-Sharing development with others
-===============================
+== Sharing development with others
[[getting-updates-With-git-pull]]
-Getting updates with git pull
------------------------------
+=== Getting updates with git pull
After you clone a repository and commit a few changes of your own, you
may wish to check the original repository for updates and merge them
@@ -1783,8 +1731,7 @@ $ git merge branch
are roughly equivalent.
[[submitting-patches]]
-Submitting patches to a project
--------------------------------
+=== Submitting patches to a project
If you just have a few changes, the simplest way to submit them may
just be to send them as patches in email:
@@ -1812,8 +1759,7 @@ Consult the mailing list for your project first to determine
their requirements for submitting patches.
[[importing-patches]]
-Importing patches to a project
-------------------------------
+=== Importing patches to a project
Git also provides a tool called linkgit:git-am[1] (am stands for
"apply mailbox"), for importing such an emailed series of patches.
@@ -1845,8 +1791,7 @@ the original mailbox, with authorship and commit log message each
taken from the message containing each patch.
[[public-repositories]]
-Public Git repositories
------------------------
+=== Public Git repositories
Another way to submit changes to a project is to tell the maintainer
of that project to pull the changes from your repository using
@@ -1886,21 +1831,22 @@ pull from that repository. So the flow of changes, in a situation
where there is one other developer with a public repository, looks
like this:
- you push
- your personal repo ------------------> your public repo
- ^ |
- | |
- | you pull | they pull
- | |
- | |
- | they push V
- their public repo <------------------- their repo
+....
+ you push
+your personal repo ------------------> your public repo
+ ^ |
+ | |
+ | you pull | they pull
+ | |
+ | |
+ | they push V
+their public repo <------------------- their repo
+....
We explain how to do this in the following sections.
[[setting-up-a-public-repository]]
-Setting up a public repository
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Setting up a public repository
Assume your personal repository is in the directory `~/proj`. We
first create a new clone of the repository and tell `git daemon` that it
@@ -1920,8 +1866,7 @@ public repository. You can use scp, rsync, or whatever is most
convenient.
[[exporting-via-git]]
-Exporting a Git repository via the Git protocol
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Exporting a Git repository via the Git protocol
This is the preferred method.
@@ -1942,8 +1887,7 @@ linkgit:git-daemon[1] man page for details. (See especially the
examples section.)
[[exporting-via-http]]
-Exporting a git repository via HTTP
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Exporting a git repository via HTTP
The Git protocol gives better performance and reliability, but on a
host with a web server set up, HTTP exports may be simpler to set up.
@@ -1975,8 +1919,7 @@ for a slightly more sophisticated setup using WebDAV which also
allows pushing over HTTP.)
[[pushing-changes-to-a-public-repository]]
-Pushing changes to a public repository
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Pushing changes to a public repository
Note that the two techniques outlined above (exporting via
<<exporting-via-http,http>> or <<exporting-via-git,git>>) allow other
@@ -2035,8 +1978,7 @@ See the explanations of the `remote.<name>.url`,
linkgit:git-config[1] for details.
[[forcing-push]]
-What to do when a push fails
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== What to do when a push fails
If a push would not result in a <<fast-forwards,fast-forward>> of the
remote branch, then it will fail with an error like:
@@ -2090,8 +2032,7 @@ pull, or by a fetch followed by a rebase; see the
linkgit:gitcvs-migration[7] for more.
[[setting-up-a-shared-repository]]
-Setting up a shared repository
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Setting up a shared repository
Another way to collaborate is by using a model similar to that
commonly used in CVS, where several developers with special rights
@@ -2121,8 +2062,7 @@ advantages over the central shared repository:
"out".
[[setting-up-gitweb]]
-Allowing web browsing of a repository
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Allowing web browsing of a repository
The gitweb cgi script provides users an easy way to browse your
project's revisions, file contents and logs without having to install
@@ -2138,8 +2078,7 @@ linkgit:gitweb[1] for instructions on details setting up a permanent
installation with a CGI or Perl capable server.
[[how-to-get-a-git-repository-with-minimal-history]]
-How to get a Git repository with minimal history
-------------------------------------------------
+=== How to get a Git repository with minimal history
A <<def_shallow_clone,shallow clone>>, with its truncated
history, is useful when one is interested only in recent history
@@ -2158,12 +2097,10 @@ have to result in huge conflicts. This limitation may make such
a repository unsuitable to be used in merge based workflows.
[[sharing-development-examples]]
-Examples
---------
+=== Examples
[[maintaining-topic-branches]]
-Maintaining topic branches for a Linux subsystem maintainer
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Maintaining topic branches for a Linux subsystem maintainer
This describes how Tony Luck uses Git in his role as maintainer of the
IA64 architecture for the Linux kernel.
@@ -2459,8 +2396,7 @@ done
[[cleaning-up-history]]
-Rewriting history and maintaining patch series
-==============================================
+== Rewriting history and maintaining patch series
Normally commits are only added to a project, never taken away or
replaced. Git is designed with this assumption, and violating it will
@@ -2470,8 +2406,7 @@ However, there is a situation in which it can be useful to violate this
assumption.
[[patch-series]]
-Creating the perfect patch series
----------------------------------
+=== Creating the perfect patch series
Suppose you are a contributor to a large project, and you want to add a
complicated feature, and to present it to the other developers in a way
@@ -2503,8 +2438,7 @@ use them, and then explain some of the problems that can arise because
you are rewriting history.
[[using-git-rebase]]
-Keeping a patch series up to date using git rebase
---------------------------------------------------
+=== Keeping a patch series up to date using git rebase
Suppose that you create a branch `mywork` on a remote-tracking branch
`origin`, and create some commits on top of it:
@@ -2591,8 +2525,7 @@ the rebase. See <<interactive-rebase>> for details, and
<<reordering-patch-series>> for alternatives.
[[rewriting-one-commit]]
-Rewriting a single commit
--------------------------
+=== Rewriting a single commit
We saw in <<fixing-a-mistake-by-rewriting-history>> that you can replace the
most recent commit using
@@ -2610,8 +2543,7 @@ If you need to amend commits from deeper in your history, you can
use <<interactive-rebase,interactive rebase's `edit` instruction>>.
[[reordering-patch-series]]
-Reordering or selecting from a patch series
--------------------------------------------
+=== Reordering or selecting from a patch series
Sometimes you want to edit a commit deeper in your history. One
approach is to use `git format-patch` to create a series of patches
@@ -2630,8 +2562,7 @@ $ git am *.patch
-------------------------------------------------
[[interactive-rebase]]
-Using interactive rebases
--------------------------
+=== Using interactive rebases
You can also edit a patch series with an interactive rebase. This is
the same as <<reordering-patch-series,reordering a patch series using
@@ -2688,16 +2619,14 @@ For a more detailed discussion of the procedure and additional tips,
see the "INTERACTIVE MODE" section of linkgit:git-rebase[1].
[[patch-series-tools]]
-Other tools
------------
+=== Other tools
There are numerous other tools, such as StGit, which exist for the
purpose of maintaining a patch series. These are outside of the scope of
this manual.
[[problems-With-rewriting-history]]
-Problems with rewriting history
--------------------------------
+=== Problems with rewriting history
The primary problem with rewriting the history of a branch has to do
with merging. Suppose somebody fetches your branch and merges it into
@@ -2745,8 +2674,7 @@ For true distributed development that supports proper merging,
published branches should never be rewritten.
[[bisect-merges]]
-Why bisecting merge commits can be harder than bisecting linear history
------------------------------------------------------------------------
+=== Why bisecting merge commits can be harder than bisecting linear history
The linkgit:git-bisect[1] command correctly handles history that
includes merge commits. However, when the commit that it finds is a
@@ -2811,12 +2739,10 @@ linear by rebasing against the latest upstream version before
publishing.
[[advanced-branch-management]]
-Advanced branch management
-==========================
+== Advanced branch management
[[fetching-individual-branches]]
-Fetching individual branches
-----------------------------
+=== Fetching individual branches
Instead of using linkgit:git-remote[1], you can also choose just
to update one branch at a time, and to store it locally under an
@@ -2844,8 +2770,7 @@ already have a branch named example-master, it will attempt to
master branch. In more detail:
[[fetch-fast-forwards]]
-git fetch and fast-forwards
----------------------------
+=== git fetch and fast-forwards
In the previous example, when updating an existing branch, `git fetch`
checks to make sure that the most recent commit on the remote
@@ -2882,8 +2807,7 @@ unless you've already created a reference of your own pointing to
them.
[[forcing-fetch]]
-Forcing git fetch to do non-fast-forward updates
-------------------------------------------------
+=== Forcing git fetch to do non-fast-forward updates
If git fetch fails because the new head of a branch is not a
descendant of the old head, you may force the update with:
@@ -2903,8 +2827,7 @@ Be aware that commits that the old version of example/master pointed at
may be lost, as we saw in the previous section.
[[remote-branch-configuration]]
-Configuring remote-tracking branches
-------------------------------------
+=== Configuring remote-tracking branches
We saw above that `origin` is just a shortcut to refer to the
repository that you originally cloned from. This information is
@@ -2955,8 +2878,7 @@ the refspec syntax.
[[git-concepts]]
-Git concepts
-============
+== Git concepts
Git is built on a small number of simple but powerful ideas. While it
is possible to get things done without understanding them, you will find
@@ -2966,8 +2888,7 @@ We start with the most important, the <<def_object_database,object
database>> and the <<def_index,index>>.
[[the-object-database]]
-The Object Database
--------------------
+=== The Object Database
We already saw in <<understanding-commits>> that all commits are stored
@@ -3011,8 +2932,7 @@ There are four different types of objects: "blob", "tree", "commit", and
The object types in some more detail:
[[commit-object]]
-Commit Object
-~~~~~~~~~~~~~
+==== Commit Object
The "commit" object links a physical state of a tree with a description
of how we got there and why. Use the `--pretty=raw` option to
@@ -3064,8 +2984,7 @@ commit whose parent is normally the current HEAD, and whose tree is
taken from the content currently stored in the index.
[[tree-object]]
-Tree Object
-~~~~~~~~~~~
+==== Tree Object
The ever-versatile linkgit:git-show[1] command can also be used to
examine tree objects, but linkgit:git-ls-tree[1] will give you more
@@ -3104,8 +3023,7 @@ Note that the files all have mode 644 or 755: Git actually only pays
attention to the executable bit.
[[blob-object]]
-Blob Object
-~~~~~~~~~~~
+==== Blob Object
You can use linkgit:git-show[1] to examine the contents of a blob; take,
for example, the blob in the entry for `COPYING` from the tree above:
@@ -3134,8 +3052,7 @@ sometimes be useful for browsing the contents of a tree that is not
currently checked out.
[[trust]]
-Trust
-~~~~~
+==== Trust
If you receive the SHA-1 name of a blob from one source, and its contents
from another (possibly untrusted) source, you can still trust that those
@@ -3164,8 +3081,7 @@ like GPG/PGP.
To assist in this, Git also provides the tag object...
[[tag-object]]
-Tag Object
-~~~~~~~~~~
+==== Tag Object
A tag object contains an object, object type, tag name, the name of the
person ("tagger") who created the tag, and a message, which may contain
@@ -3194,8 +3110,7 @@ objects. (Note that linkgit:git-tag[1] can also be used to create
references whose names begin with `refs/tags/`).
[[pack-files]]
-How Git stores objects efficiently: pack files
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== How Git stores objects efficiently: pack files
Newly created objects are initially created in a file named after the
object's SHA-1 hash (stored in `.git/objects`).
@@ -3253,8 +3168,7 @@ The linkgit:git-gc[1] command performs packing, pruning, and more for
you, so is normally the only high-level command you need.
[[dangling-objects]]
-Dangling objects
-~~~~~~~~~~~~~~~~
+==== Dangling objects
The linkgit:git-fsck[1] command will sometimes complain about dangling
objects. They are not a problem.
@@ -3334,8 +3248,7 @@ don't want to do that while the filesystem is mounted.
accesses to a repository but you might receive confusing or scary messages.)
[[recovering-from-repository-corruption]]
-Recovering from repository corruption
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== Recovering from repository corruption
By design, Git treats data trusted to it with caution. However, even in
the absence of bugs in Git itself, it is still possible that hardware or
@@ -3452,8 +3365,7 @@ whole thing. It's up to you--Git does *have* a lot of information, it is
just missing one particular blob version.
[[the-index]]
-The index
----------
+=== The index
The index is a binary file (generally kept in `.git/index`) containing a
sorted list of path names, each with permissions and the SHA-1 of a blob
@@ -3511,8 +3423,7 @@ If you blow the index away entirely, you generally haven't lost any
information as long as you have the name of the tree that it described.
[[submodules]]
-Submodules
-==========
+== Submodules
Large projects are often composed of smaller, self-contained modules. For
example, an embedded Linux distribution's source tree would include every
@@ -3698,8 +3609,8 @@ $ git push
You have to run `git submodule update` after `git pull` if you want to update
submodules, too.
-Pitfalls with submodules
-------------------------
+[[pitfalls-with-submodules]]
+=== Pitfalls with submodules
Always publish the submodule change before publishing the change to the
superproject that references it. If you forget to publish the submodule change,
@@ -3768,8 +3679,7 @@ submodule update` will not overwrite them. Instead, you get the usual
warning about not being able switch from a dirty branch.
[[low-level-operations]]
-Low-level Git operations
-========================
+== Low-level Git operations
Many of the higher-level commands were originally implemented as shell
scripts using a smaller core of low-level Git commands. These can still
@@ -3777,8 +3687,7 @@ be useful when doing unusual things with Git, or just as a way to
understand its inner workings.
[[object-manipulation]]
-Object access and manipulation
-------------------------------
+=== Object access and manipulation
The linkgit:git-cat-file[1] command can show the contents of any object,
though the higher-level linkgit:git-show[1] is usually more useful.
@@ -3795,8 +3704,7 @@ verified by linkgit:git-verify-tag[1], though it is normally simpler to
use linkgit:git-tag[1] for both.
[[the-workflow]]
-The Workflow
-------------
+=== The Workflow
High-level operations such as linkgit:git-commit[1] and
linkgit:git-restore[1] work by moving data
@@ -3811,8 +3719,7 @@ the database or the working directory. Thus there are four main
combinations:
[[working-directory-to-index]]
-working directory -> index
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== working directory -> index
The linkgit:git-update-index[1] command updates the index with
information from the working directory. You generally update the
@@ -3848,8 +3755,7 @@ The previously introduced linkgit:git-add[1] is just a wrapper for
linkgit:git-update-index[1].
[[index-to-object-database]]
-index -> object database
-~~~~~~~~~~~~~~~~~~~~~~~~
+==== index -> object database
You write your current index file to a "tree" object with the program
@@ -3864,8 +3770,7 @@ use that tree to re-generate the index at any time by going in the
other direction:
[[object-database-to-index]]
-object database -> index
-~~~~~~~~~~~~~~~~~~~~~~~~
+==== object database -> index
You read a "tree" file from the object database, and use that to
populate (and overwrite--don't do this if your index contains any
@@ -3881,8 +3786,7 @@ earlier. However, that is only your 'index' file: your working
directory contents have not been modified.
[[index-to-working-directory]]
-index -> working directory
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+==== index -> working directory
You update your working directory from the index by "checking out"
files. This is not a very common operation, since normally you'd just
@@ -3911,8 +3815,7 @@ Finally, there are a few odds and ends which are not purely moving
from one representation to the other:
[[tying-it-all-together]]
-Tying it all together
-~~~~~~~~~~~~~~~~~~~~~
+==== Tying it all together
To commit a tree you have instantiated with `git write-tree`, you'd
create a "commit" object that refers to that tree and the history
@@ -3986,8 +3889,7 @@ Here is a picture that illustrates how various pieces fit together:
[[examining-the-data]]
-Examining the data
-------------------
+=== Examining the data
You can examine the data represented in the object database and the
index with various helper tools. For every object, you can use
@@ -4022,8 +3924,7 @@ $ git cat-file commit HEAD
to see what the top commit was.
[[merging-multiple-trees]]
-Merging multiple trees
-----------------------
+=== Merging multiple trees
Git can help you perform a three-way merge, which can in turn be
used for a many-way merge by repeating the merge procedure several
@@ -4073,8 +3974,7 @@ index file, and you can just write the result out with
[[merging-multiple-trees-2]]
-Merging multiple trees, continued
----------------------------------
+=== Merging multiple trees, continued
Sadly, many merges aren't trivial. If there are files that have
been added, moved or removed, or if both branches have modified the
@@ -4144,15 +4044,13 @@ $ git merge-index git-merge-one-file hello.c
and that is what higher level `git merge -s resolve` is implemented with.
[[hacking-git]]
-Hacking Git
-===========
+== Hacking Git
This chapter covers internal details of the Git implementation which
probably only Git developers need to understand.
[[object-details]]
-Object storage format
----------------------
+=== Object storage format
All objects have a statically determined "type" which identifies the
format of the object (i.e. how it is used, and how it can refer to other
@@ -4182,8 +4080,7 @@ of all objects, and verifies their internal consistency (in addition
to just verifying their superficial consistency through the hash).
[[birdview-on-the-source-code]]
-A birds-eye view of Git's source code
--------------------------------------
+=== A birds-eye view of Git's source code
It is not always easy for new developers to find their way through Git's
source code. This section gives you a little guidance to show where to
@@ -4392,25 +4289,22 @@ You see, Git is actually the best tool to find out about the source of Git
itself!
[[glossary]]
-Git Glossary
-============
+== Git Glossary
[[git-explained]]
-Git explained
--------------
+=== Git explained
include::glossary-content.txt[]
[[git-quick-start]]
-Appendix A: Git Quick Reference
-===============================
+[appendix]
+== Git Quick Reference
This is a quick summary of the major commands; the previous chapters
explain how these work in more detail.
[[quick-creating-a-new-repository]]
-Creating a new repository
--------------------------
+=== Creating a new repository
From a tarball:
@@ -4431,8 +4325,7 @@ $ cd project
-----------------------------------------------
[[managing-branches]]
-Managing branches
------------------
+=== Managing branches
-----------------------------------------------
$ git branch # list all local branches in this repo
@@ -4496,8 +4389,7 @@ $ git branch -r # list all remote branches
[[exploring-history]]
-Exploring history
------------------
+=== Exploring history
-----------------------------------------------
$ gitk # visualize and browse history
@@ -4532,8 +4424,7 @@ $ git bisect bad # if this revision is bad.
-----------------------------------------------
[[making-changes]]
-Making changes
---------------
+=== Making changes
Make sure Git knows who to blame:
@@ -4563,8 +4454,7 @@ $ git commit -a # use latest content of all tracked files
-----------------------------------------------
[[merging]]
-Merging
--------
+=== Merging
-----------------------------------------------
$ git merge test # merge branch "test" into the current branch
@@ -4574,8 +4464,7 @@ $ git pull . test # equivalent to git merge test
-----------------------------------------------
[[sharing-your-changes]]
-Sharing your changes
---------------------
+=== Sharing your changes
Importing or exporting patches:
@@ -4620,8 +4509,7 @@ $ git push example test
-----------------------------------------------
[[repository-maintenance]]
-Repository maintenance
-----------------------
+=== Repository maintenance
Check for corruption:
@@ -4637,12 +4525,11 @@ $ git gc
[[todo]]
-Appendix B: Notes and todo list for this manual
-===============================================
+[appendix]
+== Notes and todo list for this manual
[[todo-list]]
-Todo list
----------
+=== Todo list
This is a work in progress.
diff --git a/Makefile b/Makefile
index f879697..de60c8e 100644
--- a/Makefile
+++ b/Makefile
@@ -34,13 +34,8 @@ all::
# library. Support for version 1 will likely be removed in some future
# release of Git, as upstream has all but abandoned it.
#
-# When using USE_LIBPCRE1, define NO_LIBPCRE1_JIT if the PCRE v1
-# library is compiled without --enable-jit. We will auto-detect
-# whether the version of the PCRE v1 library in use has JIT support at
-# all, but we unfortunately can't auto-detect whether JIT support
-# hasn't been compiled in in an otherwise JIT-supporting version. If
-# you have link-time errors about a missing `pcre_jit_exec` define
-# this, or recompile PCRE v1 with --enable-jit.
+# When using USE_LIBPCRE1, define NO_LIBPCRE1_JIT if you want to
+# disable JIT even if supported by your library.
#
# Define LIBPCREDIR=/foo/bar if your PCRE header and library files are
# in /foo/bar/include and /foo/bar/lib directories. Which version of
@@ -598,6 +593,7 @@ SCRIPT_SH =
SCRIPT_LIB =
TEST_BUILTINS_OBJS =
TEST_PROGRAMS_NEED_X =
+THIRD_PARTY_SOURCES =
# Having this variable in your environment would break pipelines because
# you cause "cd" to echo its destination to stdout. It can also take
@@ -728,6 +724,7 @@ TEST_BUILTINS_OBJS += test-parse-options.o
TEST_BUILTINS_OBJS += test-path-utils.o
TEST_BUILTINS_OBJS += test-pkt-line.o
TEST_BUILTINS_OBJS += test-prio-queue.o
+TEST_BUILTINS_OBJS += test-progress.o
TEST_BUILTINS_OBJS += test-reach.o
TEST_BUILTINS_OBJS += test-read-cache.o
TEST_BUILTINS_OBJS += test-read-midx.o
@@ -818,12 +815,12 @@ VCSSVN_LIB = vcs-svn/lib.a
GENERATED_H += command-list.h
-LIB_H := $(sort $(shell git ls-files '*.h' ':!t/' ':!Documentation/' 2>/dev/null || \
+LIB_H := $(sort $(patsubst ./%,%,$(shell git ls-files '*.h' ':!t/' ':!Documentation/' 2>/dev/null || \
$(FIND) . \
-name .git -prune -o \
-name t -prune -o \
-name Documentation -prune -o \
- -name '*.h' -print))
+ -name '*.h' -print)))
LIB_OBJS += abspath.o
LIB_OBJS += advice.o
@@ -984,6 +981,7 @@ LIB_OBJS += shallow.o
LIB_OBJS += sideband.o
LIB_OBJS += sigchain.o
LIB_OBJS += split-index.o
+LIB_OBJS += stable-qsort.o
LIB_OBJS += strbuf.o
LIB_OBJS += streaming.o
LIB_OBJS += string-list.o
@@ -1146,6 +1144,20 @@ BUILTIN_OBJS += builtin/verify-tag.o
BUILTIN_OBJS += builtin/worktree.o
BUILTIN_OBJS += builtin/write-tree.o
+# THIRD_PARTY_SOURCES is a list of patterns compatible with the
+# $(filter) and $(filter-out) family of functions. They specify source
+# files which are taken from some third-party source where we want to be
+# less strict about issues such as coding style so we don't diverge from
+# upstream unnecessarily (making merging in future changes easier).
+THIRD_PARTY_SOURCES += compat/inet_ntop.c
+THIRD_PARTY_SOURCES += compat/inet_pton.c
+THIRD_PARTY_SOURCES += compat/nedmalloc/%
+THIRD_PARTY_SOURCES += compat/obstack.%
+THIRD_PARTY_SOURCES += compat/poll/%
+THIRD_PARTY_SOURCES += compat/regex/%
+THIRD_PARTY_SOURCES += sha1collisiondetection/%
+THIRD_PARTY_SOURCES += sha1dc/%
+
GITLIBS = common-main.o $(LIB_FILE) $(XDIFF_LIB)
EXTLIBS =
@@ -1715,7 +1727,6 @@ ifdef NO_GETPAGESIZE
endif
ifdef INTERNAL_QSORT
COMPAT_CFLAGS += -DINTERNAL_QSORT
- COMPAT_OBJS += compat/qsort.o
endif
ifdef HAVE_ISO_QSORT_S
COMPAT_CFLAGS += -DHAVE_ISO_QSORT_S
@@ -1872,7 +1883,7 @@ ifndef V
QUIET_MSGFMT = @echo ' ' MSGFMT $@;
QUIET_GCOV = @echo ' ' GCOV $@;
QUIET_SP = @echo ' ' SP $<;
- QUIET_HDR = @echo ' ' HDR $<;
+ QUIET_HDR = @echo ' ' HDR $(<:hcc=h);
QUIET_RC = @echo ' ' RC $@;
QUIET_SUBDIR0 = +@subdir=
QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \
@@ -2600,6 +2611,7 @@ FIND_SOURCE_FILES = ( \
-o \( -name 'trash*' -type d -prune \) \
-o \( -name '*.[hcS]' -type f -print \) \
-o \( -name '*.sh' -type f -print \) \
+ | sed -e 's|^\./||' \
)
$(ETAGS_TARGET): FORCE
@@ -2769,11 +2781,16 @@ EXCEPT_HDRS := $(GEN_HDRS) compat/% xdiff/%
ifndef GCRYPT_SHA256
EXCEPT_HDRS += sha256/gcrypt.h
endif
-CHK_HDRS = $(filter-out $(EXCEPT_HDRS),$(patsubst ./%,%,$(LIB_H)))
+CHK_HDRS = $(filter-out $(EXCEPT_HDRS),$(LIB_H))
HCO = $(patsubst %.h,%.hco,$(CHK_HDRS))
+HCC = $(HCO:hco=hcc)
+
+%.hcc: %.h
+ @echo '#include "git-compat-util.h"' >$@
+ @echo '#include "$<"' >>$@
-$(HCO): %.hco: %.h FORCE
- $(QUIET_HDR)$(CC) -include git-compat-util.h -I. -o /dev/null -c -xc $<
+$(HCO): %.hco: %.hcc FORCE
+ $(QUIET_HDR)$(CC) $(ALL_CFLAGS) -o /dev/null -c -xc $<
.PHONY: hdr-check $(HCO)
hdr-check: $(HCO)
@@ -2792,12 +2809,8 @@ check: command-list.h
exit 1; \
fi
-C_SOURCES = $(patsubst %.o,%.c,$(C_OBJ))
-ifdef DC_SHA1_SUBMODULE
-COCCI_SOURCES = $(filter-out sha1collisiondetection/%,$(C_SOURCES))
-else
-COCCI_SOURCES = $(filter-out sha1dc/%,$(C_SOURCES))
-endif
+FOUND_C_SOURCES = $(filter %.c,$(shell $(FIND_SOURCE_FILES)))
+COCCI_SOURCES = $(filter-out $(THIRD_PARTY_SOURCES),$(FOUND_C_SOURCES))
%.cocci.patch: %.cocci $(COCCI_SOURCES)
@echo ' ' SPATCH $<; \
@@ -3029,6 +3042,10 @@ rpm::
@false
.PHONY: rpm
+ifneq ($(INCLUDE_DLLS_IN_ARTIFACTS),)
+OTHER_PROGRAMS += $(shell echo *.dll t/helper/*.dll)
+endif
+
artifacts-tar:: $(ALL_PROGRAMS) $(SCRIPT_LIB) $(BUILT_INS) $(OTHER_PROGRAMS) \
GIT-BUILD-OPTIONS $(TEST_PROGRAMS) $(test_bindir_programs) \
$(MOFILES)
@@ -3082,6 +3099,7 @@ clean: profile-clean coverage-clean cocciclean
$(RM) $(ALL_PROGRAMS) $(SCRIPT_LIB) $(BUILT_INS) git$X
$(RM) $(TEST_PROGRAMS)
$(RM) $(FUZZ_PROGRAMS)
+ $(RM) $(HCC)
$(RM) -r bin-wrappers $(dep_dirs)
$(RM) -r po/build/
$(RM) *.pyc *.pyo */*.pyc */*.pyo command-list.h $(ETAGS_TARGET) tags cscope*
diff --git a/apply.c b/apply.c
index 57a61f2..f8a046a 100644
--- a/apply.c
+++ b/apply.c
@@ -1361,11 +1361,32 @@ int parse_git_diff_header(struct strbuf *root,
if (check_header_line(*linenr, patch))
return -1;
if (res > 0)
- return offset;
+ goto done;
break;
}
}
+done:
+ if (!patch->old_name && !patch->new_name) {
+ if (!patch->def_name) {
+ error(Q_("git diff header lacks filename information when removing "
+ "%d leading pathname component (line %d)",
+ "git diff header lacks filename information when removing "
+ "%d leading pathname components (line %d)",
+ parse_hdr_state.p_value),
+ parse_hdr_state.p_value, *linenr);
+ return -128;
+ }
+ patch->old_name = xstrdup(patch->def_name);
+ patch->new_name = xstrdup(patch->def_name);
+ }
+ if ((!patch->new_name && !patch->is_delete) ||
+ (!patch->old_name && !patch->is_new)) {
+ error(_("git diff header lacks filename information "
+ "(line %d)"), *linenr);
+ return -128;
+ }
+ patch->is_toplevel_relative = 1;
return offset;
}
@@ -1546,26 +1567,6 @@ static int find_header(struct apply_state *state,
return -128;
if (git_hdr_len <= len)
continue;
- if (!patch->old_name && !patch->new_name) {
- if (!patch->def_name) {
- error(Q_("git diff header lacks filename information when removing "
- "%d leading pathname component (line %d)",
- "git diff header lacks filename information when removing "
- "%d leading pathname components (line %d)",
- state->p_value),
- state->p_value, state->linenr);
- return -128;
- }
- patch->old_name = xstrdup(patch->def_name);
- patch->new_name = xstrdup(patch->def_name);
- }
- if ((!patch->new_name && !patch->is_delete) ||
- (!patch->old_name && !patch->is_new)) {
- error(_("git diff header lacks filename information "
- "(line %d)"), state->linenr);
- return -128;
- }
- patch->is_toplevel_relative = 1;
*hdrsize = git_hdr_len;
return offset;
}
diff --git a/apply.h b/apply.h
index a795193..da3d95f 100644
--- a/apply.h
+++ b/apply.h
@@ -1,6 +1,7 @@
#ifndef APPLY_H
#define APPLY_H
+#include "hash.h"
#include "lockfile.h"
#include "string-list.h"
diff --git a/attr.c b/attr.c
index d02d081..11f19b5 100644
--- a/attr.c
+++ b/attr.c
@@ -62,7 +62,7 @@ static struct attr_hashmap g_attr_hashmap;
/* The container for objects stored in "struct attr_hashmap" */
struct attr_hash_entry {
- struct hashmap_entry ent; /* must be the first member! */
+ struct hashmap_entry ent;
const char *key; /* the key; memory should be owned by value */
size_t keylen; /* length of the key */
void *value; /* the stored value */
@@ -70,12 +70,14 @@ struct attr_hash_entry {
/* attr_hashmap comparison function */
static int attr_hash_entry_cmp(const void *unused_cmp_data,
- const void *entry,
- const void *entry_or_key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *unused_keydata)
{
- const struct attr_hash_entry *a = entry;
- const struct attr_hash_entry *b = entry_or_key;
+ const struct attr_hash_entry *a, *b;
+
+ a = container_of(eptr, const struct attr_hash_entry, ent);
+ b = container_of(entry_or_key, const struct attr_hash_entry, ent);
return (a->keylen != b->keylen) || strncmp(a->key, b->key, a->keylen);
}
@@ -98,10 +100,10 @@ static void *attr_hashmap_get(struct attr_hashmap *map,
if (!map->map.tablesize)
attr_hashmap_init(map);
- hashmap_entry_init(&k, memhash(key, keylen));
+ hashmap_entry_init(&k.ent, memhash(key, keylen));
k.key = key;
k.keylen = keylen;
- e = hashmap_get(&map->map, &k, NULL);
+ e = hashmap_get_entry(&map->map, &k, ent, NULL);
return e ? e->value : NULL;
}
@@ -117,12 +119,12 @@ static void attr_hashmap_add(struct attr_hashmap *map,
attr_hashmap_init(map);
e = xmalloc(sizeof(struct attr_hash_entry));
- hashmap_entry_init(e, memhash(key, keylen));
+ hashmap_entry_init(&e->ent, memhash(key, keylen));
e->key = key;
e->keylen = keylen;
e->value = value;
- hashmap_add(&map->map, e);
+ hashmap_add(&map->map, &e->ent);
}
struct all_attrs_item {
@@ -161,12 +163,12 @@ static void all_attrs_init(struct attr_hashmap *map, struct attr_check *check)
if (size != check->all_attrs_nr) {
struct attr_hash_entry *e;
struct hashmap_iter iter;
- hashmap_iter_init(&map->map, &iter);
REALLOC_ARRAY(check->all_attrs, size);
check->all_attrs_nr = size;
- while ((e = hashmap_iter_next(&iter))) {
+ hashmap_for_each_entry(&map->map, &iter, e,
+ ent /* member name */) {
const struct git_attr *a = e->value;
check->all_attrs[a->attr_nr].attr = a;
}
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index c329b72..9f099b9 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -1,6 +1,5 @@
-resources:
-- repo: self
- fetchDepth: 1
+variables:
+ Agent.Source.Git.ShallowFetchDepth: 1
jobs:
- job: windows_build
@@ -131,6 +130,165 @@ jobs:
PathtoPublish: t/failed-test-artifacts
ArtifactName: failed-test-artifacts
+- job: vs_build
+ displayName: Visual Studio Build
+ condition: succeeded()
+ pool: Hosted VS2017
+ timeoutInMinutes: 240
+ steps:
+ - powershell: |
+ if ("$GITFILESHAREPWD" -ne "" -and "$GITFILESHAREPWD" -ne "`$`(gitfileshare.pwd)") {
+ net use s: \\gitfileshare.file.core.windows.net\test-cache "$GITFILESHAREPWD" /user:AZURE\gitfileshare /persistent:no
+ cmd /c mklink /d "$(Build.SourcesDirectory)\test-cache" S:\
+ }
+ displayName: 'Mount test-cache'
+ env:
+ GITFILESHAREPWD: $(gitfileshare.pwd)
+ - powershell: |
+ $urlbase = "https://dev.azure.com/git-for-windows/git/_apis/build/builds"
+ $id = ((Invoke-WebRequest -UseBasicParsing "${urlbase}?definitions=22&statusFilter=completed&resultFilter=succeeded&`$top=1").content | ConvertFrom-JSON).value[0].id
+ $downloadUrl = ((Invoke-WebRequest -UseBasicParsing "${urlbase}/$id/artifacts").content | ConvertFrom-JSON).value[1].resource.downloadUrl
+ (New-Object Net.WebClient).DownloadFile($downloadUrl,"git-sdk-64-minimal.zip")
+ Expand-Archive git-sdk-64-minimal.zip -DestinationPath . -Force
+ Remove-Item git-sdk-64-minimal.zip
+
+ # Let Git ignore the SDK and the test-cache
+ "/git-sdk-64-minimal/`n/test-cache/`n" | Out-File -NoNewLine -Encoding ascii -Append "$(Build.SourcesDirectory)\.git\info\exclude"
+ displayName: 'Download git-sdk-64-minimal'
+ - powershell: |
+ & git-sdk-64-minimal\usr\bin\bash.exe -lc @"
+ make vcxproj
+ "@
+ if (!$?) { exit(1) }
+ displayName: Generate Visual Studio Solution
+ env:
+ HOME: $(Build.SourcesDirectory)
+ MSYSTEM: MINGW64
+ DEVELOPER: 1
+ NO_PERL: 1
+ GIT_CONFIG_PARAMETERS: "'user.name=CI' 'user.email=ci@git'"
+ - powershell: |
+ $urlbase = "https://dev.azure.com/git/git/_apis/build/builds"
+ $id = ((Invoke-WebRequest -UseBasicParsing "${urlbase}?definitions=9&statusFilter=completed&resultFilter=succeeded&`$top=1").content | ConvertFrom-JSON).value[0].id
+ $downloadUrl = ((Invoke-WebRequest -UseBasicParsing "${urlbase}/$id/artifacts").content | ConvertFrom-JSON).value[0].resource.downloadUrl
+ (New-Object Net.WebClient).DownloadFile($downloadUrl, "compat.zip")
+ Expand-Archive compat.zip -DestinationPath . -Force
+ Remove-Item compat.zip
+ displayName: 'Download vcpkg artifacts'
+ - task: MSBuild@1
+ inputs:
+ solution: git.sln
+ platform: x64
+ configuration: Release
+ maximumCpuCount: 4
+ - powershell: |
+ & compat\vcbuild\vcpkg_copy_dlls.bat release
+ if (!$?) { exit(1) }
+ & git-sdk-64-minimal\usr\bin\bash.exe -lc @"
+ mkdir -p artifacts &&
+ eval \"`$(make -n artifacts-tar INCLUDE_DLLS_IN_ARTIFACTS=YesPlease ARTIFACTS_DIRECTORY=artifacts | grep ^tar)\"
+ "@
+ if (!$?) { exit(1) }
+ displayName: Bundle artifact tar
+ env:
+ HOME: $(Build.SourcesDirectory)
+ MSYSTEM: MINGW64
+ DEVELOPER: 1
+ NO_PERL: 1
+ MSVC: 1
+ VCPKG_ROOT: $(Build.SourcesDirectory)\compat\vcbuild\vcpkg
+ - powershell: |
+ $tag = (Invoke-WebRequest -UseBasicParsing "https://gitforwindows.org/latest-tag.txt").content
+ $version = (Invoke-WebRequest -UseBasicParsing "https://gitforwindows.org/latest-version.txt").content
+ $url = "https://github.com/git-for-windows/git/releases/download/${tag}/PortableGit-${version}-64-bit.7z.exe"
+ (New-Object Net.WebClient).DownloadFile($url,"PortableGit.exe")
+ & .\PortableGit.exe -y -oartifacts\PortableGit
+ # Wait until it is unpacked
+ while (-not @(Remove-Item -ErrorAction SilentlyContinue PortableGit.exe; $?)) { sleep 1 }
+ displayName: Download & extract portable Git
+ - task: PublishPipelineArtifact@0
+ displayName: 'Publish Pipeline Artifact: MSVC test artifacts'
+ inputs:
+ artifactName: 'vs-artifacts'
+ targetPath: '$(Build.SourcesDirectory)\artifacts'
+ - powershell: |
+ if ("$GITFILESHAREPWD" -ne "" -and "$GITFILESHAREPWD" -ne "`$`(gitfileshare.pwd)") {
+ cmd /c rmdir "$(Build.SourcesDirectory)\test-cache"
+ }
+ displayName: 'Unmount test-cache'
+ condition: true
+ env:
+ GITFILESHAREPWD: $(gitfileshare.pwd)
+
+- job: vs_test
+ displayName: Visual Studio Test
+ dependsOn: vs_build
+ condition: succeeded()
+ pool: Hosted
+ timeoutInMinutes: 240
+ strategy:
+ parallel: 10
+ steps:
+ - powershell: |
+ if ("$GITFILESHAREPWD" -ne "" -and "$GITFILESHAREPWD" -ne "`$`(gitfileshare.pwd)") {
+ net use s: \\gitfileshare.file.core.windows.net\test-cache "$GITFILESHAREPWD" /user:AZURE\gitfileshare /persistent:no
+ cmd /c mklink /d "$(Build.SourcesDirectory)\test-cache" S:\
+ }
+ displayName: 'Mount test-cache'
+ env:
+ GITFILESHAREPWD: $(gitfileshare.pwd)
+ - task: DownloadPipelineArtifact@0
+ displayName: 'Download Pipeline Artifact: VS test artifacts'
+ inputs:
+ artifactName: 'vs-artifacts'
+ targetPath: '$(Build.SourcesDirectory)'
+ - powershell: |
+ & PortableGit\git-cmd.exe --command=usr\bin\bash.exe -lc @"
+ test -f artifacts.tar.gz || {
+ echo No test artifacts found\; skipping >&2
+ exit 0
+ }
+ tar xf artifacts.tar.gz || exit 1
+
+ # Let Git ignore the SDK and the test-cache
+ printf '%s\n' /PortableGit/ /test-cache/ >>.git/info/exclude
+
+ cd t &&
+ PATH=\"`$PWD/helper:`$PATH\" &&
+ test-tool.exe run-command testsuite -V -x --write-junit-xml \
+ `$(test-tool.exe path-utils slice-tests \
+ `$SYSTEM_JOBPOSITIONINPHASE `$SYSTEM_TOTALJOBSINPHASE t[0-9]*.sh)
+ "@
+ if (!$?) { exit(1) }
+ displayName: 'Test (parallel)'
+ env:
+ HOME: $(Build.SourcesDirectory)
+ MSYSTEM: MINGW64
+ NO_SVN_TESTS: 1
+ GIT_TEST_SKIP_REBASE_P: 1
+ - powershell: |
+ if ("$GITFILESHAREPWD" -ne "" -and "$GITFILESHAREPWD" -ne "`$`(gitfileshare.pwd)") {
+ cmd /c rmdir "$(Build.SourcesDirectory)\test-cache"
+ }
+ displayName: 'Unmount test-cache'
+ condition: true
+ env:
+ GITFILESHAREPWD: $(gitfileshare.pwd)
+ - task: PublishTestResults@2
+ displayName: 'Publish Test Results **/TEST-*.xml'
+ inputs:
+ mergeTestResults: true
+ testRunTitle: 'vs'
+ platform: Windows
+ publishRunAttachments: false
+ condition: succeededOrFailed()
+ - task: PublishBuildArtifacts@1
+ displayName: 'Publish trash directories of failed tests'
+ condition: failed()
+ inputs:
+ PathtoPublish: t/failed-test-artifacts
+ ArtifactName: failed-vs-test-artifacts
+
- job: linux_clang
displayName: linux-clang
condition: succeeded()
@@ -354,7 +512,7 @@ jobs:
test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || ci/mount-fileshare.sh //gitfileshare.file.core.windows.net/test-cache gitfileshare "$GITFILESHAREPWD" "$HOME/test-cache" || exit 1
sudo apt-get update &&
- sudo apt-get install -y coccinelle &&
+ sudo apt-get install -y coccinelle libcurl4-openssl-dev libssl-dev libexpat-dev gettext &&
export jobname=StaticAnalysis &&
@@ -374,7 +532,7 @@ jobs:
test "$GITFILESHAREPWD" = '$(gitfileshare.pwd)' || ci/mount-fileshare.sh //gitfileshare.file.core.windows.net/test-cache gitfileshare "$GITFILESHAREPWD" "$HOME/test-cache" || exit 1
sudo apt-get update &&
- sudo apt-get install -y asciidoc xmlto asciidoctor &&
+ sudo apt-get install -y asciidoc xmlto asciidoctor docbook-xsl-ns &&
export ALREADY_HAVE_ASCIIDOCTOR=yes. &&
export jobname=Documentation &&
diff --git a/bisect.c b/bisect.c
index e87ac29..e81c91d 100644
--- a/bisect.c
+++ b/bisect.c
@@ -707,7 +707,7 @@ static int bisect_checkout(const struct object_id *bisect_rev, int no_checkout)
{
char bisect_rev_hex[GIT_MAX_HEXSZ + 1];
- memcpy(bisect_rev_hex, oid_to_hex(bisect_rev), GIT_SHA1_HEXSZ + 1);
+ memcpy(bisect_rev_hex, oid_to_hex(bisect_rev), the_hash_algo->hexsz + 1);
update_ref(NULL, "BISECT_EXPECTED_REV", bisect_rev, NULL, 0, UPDATE_REFS_DIE_ON_ERR);
argv_checkout[2] = bisect_rev_hex;
diff --git a/blame.c b/blame.c
index 36a2e7e..29770e5 100644
--- a/blame.c
+++ b/blame.c
@@ -144,7 +144,7 @@ static void append_merge_parents(struct repository *r,
while (!strbuf_getwholeline_fd(&line, merge_head, '\n')) {
struct object_id oid;
- if (line.len < GIT_SHA1_HEXSZ || get_oid_hex(line.buf, &oid))
+ if (get_oid_hex(line.buf, &oid))
die("unknown line in '%s': %s",
git_path_merge_head(r), line.buf);
tail = append_parent(r, tail, &oid);
@@ -417,14 +417,15 @@ static void get_fingerprint(struct fingerprint *result,
/* Ignore whitespace pairs */
if (hash == 0)
continue;
- hashmap_entry_init(entry, hash);
+ hashmap_entry_init(&entry->entry, hash);
- found_entry = hashmap_get(&result->map, entry, NULL);
+ found_entry = hashmap_get_entry(&result->map, entry,
+ /* member name */ entry, NULL);
if (found_entry) {
found_entry->count += 1;
} else {
entry->count = 1;
- hashmap_add(&result->map, entry);
+ hashmap_add(&result->map, &entry->entry);
++entry;
}
}
@@ -432,7 +433,7 @@ static void get_fingerprint(struct fingerprint *result,
static void free_fingerprint(struct fingerprint *f)
{
- hashmap_free(&f->map, 0);
+ hashmap_free(&f->map);
free(f->entries);
}
@@ -449,10 +450,10 @@ static int fingerprint_similarity(struct fingerprint *a, struct fingerprint *b)
struct hashmap_iter iter;
const struct fingerprint_entry *entry_a, *entry_b;
- hashmap_iter_init(&b->map, &iter);
-
- while ((entry_b = hashmap_iter_next(&iter))) {
- if ((entry_a = hashmap_get(&a->map, entry_b, NULL))) {
+ hashmap_for_each_entry(&b->map, &iter, entry_b,
+ entry /* member name */) {
+ entry_a = hashmap_get_entry(&a->map, entry_b, entry, NULL);
+ if (entry_a) {
intersection += entry_a->count < entry_b->count ?
entry_a->count : entry_b->count;
}
@@ -470,10 +471,12 @@ static void fingerprint_subtract(struct fingerprint *a, struct fingerprint *b)
hashmap_iter_init(&b->map, &iter);
- while ((entry_b = hashmap_iter_next(&iter))) {
- if ((entry_a = hashmap_get(&a->map, entry_b, NULL))) {
+ hashmap_for_each_entry(&b->map, &iter, entry_b,
+ entry /* member name */) {
+ entry_a = hashmap_get_entry(&a->map, entry_b, entry, NULL);
+ if (entry_a) {
if (entry_a->count <= entry_b->count)
- hashmap_remove(&a->map, entry_b, NULL);
+ hashmap_remove(&a->map, &entry_b->entry, NULL);
else
entry_a->count -= entry_b->count;
}
diff --git a/builtin/am.c b/builtin/am.c
index ee7305e..8181c2a 100644
--- a/builtin/am.c
+++ b/builtin/am.c
@@ -24,7 +24,6 @@
#include "sequencer.h"
#include "revision.h"
#include "merge-recursive.h"
-#include "revision.h"
#include "log-tree.h"
#include "notes-utils.h"
#include "rerere.h"
@@ -1072,19 +1071,6 @@ static const char *msgnum(const struct am_state *state)
}
/**
- * Refresh and write index.
- */
-static void refresh_and_write_cache(void)
-{
- struct lock_file lock_file = LOCK_INIT;
-
- hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
- refresh_cache(REFRESH_QUIET);
- if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
- die(_("unable to write index file"));
-}
-
-/**
* Dies with a user-friendly message on how to proceed after resolving the
* problem. This message can be overridden with state->resolvemsg.
*/
@@ -1540,7 +1526,7 @@ static int fall_back_threeway(const struct am_state *state, const char *index_pa
o.branch1 = "HEAD";
their_tree_name = xstrfmt("%.*s", linelen(state->msg), state->msg);
o.branch2 = their_tree_name;
- o.detect_directory_renames = 0;
+ o.detect_directory_renames = MERGE_DIRECTORY_RENAMES_NONE;
if (state->quiet)
o.verbosity = 0;
@@ -1705,7 +1691,8 @@ static void am_run(struct am_state *state, int resume)
unlink(am_path(state, "dirtyindex"));
- refresh_and_write_cache();
+ if (refresh_and_write_cache(REFRESH_QUIET, 0, 0) < 0)
+ die(_("unable to write index file"));
if (repo_index_has_changes(the_repository, NULL, &sb)) {
write_state_bool(state, "dirtyindex", 1);
diff --git a/builtin/blame.c b/builtin/blame.c
index b6534d4..e946ba6 100644
--- a/builtin/blame.c
+++ b/builtin/blame.c
@@ -26,7 +26,6 @@
#include "progress.h"
#include "object-store.h"
#include "blame.h"
-#include "string-list.h"
#include "refs.h"
static char blame_usage[] = N_("git blame [<options>] [<rev-opts>] [<rev>] [--] <file>");
@@ -460,7 +459,7 @@ static void emit_other(struct blame_scoreboard *sb, struct blame_entry *ent, int
for (cnt = 0; cnt < ent->num_lines; cnt++) {
char ch;
- int length = (opt & OUTPUT_LONG_OBJECT_NAME) ? GIT_SHA1_HEXSZ : abbrev;
+ int length = (opt & OUTPUT_LONG_OBJECT_NAME) ? the_hash_algo->hexsz : abbrev;
if (opt & OUTPUT_COLOR_LINE) {
if (cnt > 0) {
@@ -885,6 +884,7 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
struct range_set ranges;
unsigned int range_i;
long anchor;
+ const int hexsz = the_hash_algo->hexsz;
setup_default_color_by_age();
git_config(git_blame_config, &output_option);
@@ -931,11 +931,11 @@ parse_done:
} else if (show_progress < 0)
show_progress = isatty(2);
- if (0 < abbrev && abbrev < GIT_SHA1_HEXSZ)
+ if (0 < abbrev && abbrev < hexsz)
/* one more abbrev length is needed for the boundary commit */
abbrev++;
else if (!abbrev)
- abbrev = GIT_SHA1_HEXSZ;
+ abbrev = hexsz;
if (revs_file && read_ancestry(revs_file))
die_errno("reading graft file '%s' failed", revs_file);
diff --git a/builtin/checkout.c b/builtin/checkout.c
index 1283727..3634a3d 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -709,11 +709,11 @@ static int merge_working_tree(const struct checkout_opts *opts,
* give up or do a real merge, depending on
* whether the merge flag was used.
*/
- struct tree *result;
struct tree *work;
struct tree *old_tree;
struct merge_options o;
struct strbuf sb = STRBUF_INIT;
+ struct strbuf old_commit_shortname = STRBUF_INIT;
if (!opts->merge)
return 1;
@@ -754,7 +754,7 @@ static int merge_working_tree(const struct checkout_opts *opts,
*/
init_merge_options(&o, the_repository);
o.verbosity = 0;
- work = write_tree_from_memory(&o);
+ work = write_in_core_index_as_tree(the_repository);
ret = reset_tree(new_tree,
opts, 1,
@@ -762,19 +762,25 @@ static int merge_working_tree(const struct checkout_opts *opts,
if (ret)
return ret;
o.ancestor = old_branch_info->name;
+ if (old_branch_info->name == NULL) {
+ strbuf_add_unique_abbrev(&old_commit_shortname,
+ &old_branch_info->commit->object.oid,
+ DEFAULT_ABBREV);
+ o.ancestor = old_commit_shortname.buf;
+ }
o.branch1 = new_branch_info->name;
o.branch2 = "local";
ret = merge_trees(&o,
new_tree,
work,
- old_tree,
- &result);
+ old_tree);
if (ret < 0)
exit(128);
ret = reset_tree(new_tree,
opts, 0,
writeout_error);
strbuf_release(&o.obuf);
+ strbuf_release(&old_commit_shortname);
if (ret)
return ret;
}
diff --git a/builtin/clean.c b/builtin/clean.c
index 851beb7..5abf087 100644
--- a/builtin/clean.c
+++ b/builtin/clean.c
@@ -158,7 +158,8 @@ static int remove_dirs(struct strbuf *path, const char *prefix, int force_flag,
*dir_gone = 1;
- if ((force_flag & REMOVE_DIR_KEEP_NESTED_GIT) && is_nonbare_repository_dir(path)) {
+ if ((force_flag & REMOVE_DIR_KEEP_NESTED_GIT) &&
+ is_nonbare_repository_dir(path)) {
if (!quiet) {
quote_path_relative(path->buf, prefix, &quoted);
printf(dry_run ? _(msg_would_skip_git_dir) : _(msg_skip_git_dir),
@@ -946,9 +947,19 @@ int cmd_clean(int argc, const char **argv, const char *prefix)
if (force > 1)
rm_flags = 0;
+ else
+ dir.flags |= DIR_SKIP_NESTED_GIT;
dir.flags |= DIR_SHOW_OTHER_DIRECTORIES;
+ if (argc) {
+ /*
+ * Remaining args implies pathspecs specified, and we should
+ * recurse within those.
+ */
+ remove_directories = 1;
+ }
+
if (remove_directories)
dir.flags |= DIR_SHOW_IGNORED_TOO | DIR_KEEP_UNTRACKED_CONTENTS;
@@ -1007,6 +1018,7 @@ int cmd_clean(int argc, const char **argv, const char *prefix)
for_each_string_list_item(item, &del_list) {
struct stat st;
+ strbuf_reset(&abs_path);
if (prefix)
strbuf_addstr(&abs_path, prefix);
@@ -1040,7 +1052,6 @@ int cmd_clean(int argc, const char **argv, const char *prefix)
printf(dry_run ? _(msg_would_remove) : _(msg_remove), qname);
}
}
- strbuf_reset(&abs_path);
}
strbuf_release(&abs_path);
diff --git a/builtin/clone.c b/builtin/clone.c
index 2048b67..c46ee29 100644
--- a/builtin/clone.c
+++ b/builtin/clone.c
@@ -32,7 +32,6 @@
#include "connected.h"
#include "packfile.h"
#include "list-objects-filter-options.h"
-#include "object-store.h"
/*
* Overall FIXMEs:
@@ -785,7 +784,7 @@ static int checkout(int submodule_progress)
if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
- err |= run_hook_le(NULL, "post-checkout", sha1_to_hex(null_sha1),
+ err |= run_hook_le(NULL, "post-checkout", oid_to_hex(&null_oid),
oid_to_hex(&oid), "1", NULL);
if (!err && (option_recurse_submodules.nr > 0)) {
diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c
index 5786361..addc8d4 100644
--- a/builtin/commit-graph.c
+++ b/builtin/commit-graph.c
@@ -10,13 +10,13 @@
static char const * const builtin_commit_graph_usage[] = {
N_("git commit-graph [--object-dir <objdir>]"),
N_("git commit-graph read [--object-dir <objdir>]"),
- N_("git commit-graph verify [--object-dir <objdir>] [--shallow]"),
- N_("git commit-graph write [--object-dir <objdir>] [--append|--split] [--reachable|--stdin-packs|--stdin-commits] <split options>"),
+ N_("git commit-graph verify [--object-dir <objdir>] [--shallow] [--[no-]progress]"),
+ N_("git commit-graph write [--object-dir <objdir>] [--append|--split] [--reachable|--stdin-packs|--stdin-commits] [--[no-]progress] <split options>"),
NULL
};
static const char * const builtin_commit_graph_verify_usage[] = {
- N_("git commit-graph verify [--object-dir <objdir>] [--shallow]"),
+ N_("git commit-graph verify [--object-dir <objdir>] [--shallow] [--[no-]progress]"),
NULL
};
@@ -26,7 +26,7 @@ static const char * const builtin_commit_graph_read_usage[] = {
};
static const char * const builtin_commit_graph_write_usage[] = {
- N_("git commit-graph write [--object-dir <objdir>] [--append|--split] [--reachable|--stdin-packs|--stdin-commits] <split options>"),
+ N_("git commit-graph write [--object-dir <objdir>] [--append|--split] [--reachable|--stdin-packs|--stdin-commits] [--[no-]progress] <split options>"),
NULL
};
@@ -38,6 +38,7 @@ static struct opts_commit_graph {
int append;
int split;
int shallow;
+ int progress;
} opts;
static int graph_verify(int argc, const char **argv)
@@ -55,9 +56,13 @@ static int graph_verify(int argc, const char **argv)
N_("The object directory to store the graph")),
OPT_BOOL(0, "shallow", &opts.shallow,
N_("if the commit-graph is split, only verify the tip file")),
+ OPT_BOOL(0, "progress", &opts.progress, N_("force progress reporting")),
OPT_END(),
};
+ trace2_cmd_mode("verify");
+
+ opts.progress = isatty(2);
argc = parse_options(argc, argv, NULL,
builtin_commit_graph_verify_options,
builtin_commit_graph_verify_usage, 0);
@@ -66,6 +71,8 @@ static int graph_verify(int argc, const char **argv)
opts.obj_dir = get_object_directory();
if (opts.shallow)
flags |= COMMIT_GRAPH_VERIFY_SHALLOW;
+ if (opts.progress)
+ flags |= COMMIT_GRAPH_WRITE_PROGRESS;
graph_name = get_commit_graph_filename(opts.obj_dir);
open_ok = open_commit_graph(graph_name, &fd, &st);
@@ -102,6 +109,8 @@ static int graph_read(int argc, const char **argv)
OPT_END(),
};
+ trace2_cmd_mode("read");
+
argc = parse_options(argc, argv, NULL,
builtin_commit_graph_read_options,
builtin_commit_graph_read_usage, 0);
@@ -154,7 +163,7 @@ static int graph_write(int argc, const char **argv)
struct string_list *commit_hex = NULL;
struct string_list lines;
int result = 0;
- enum commit_graph_write_flags flags = COMMIT_GRAPH_WRITE_PROGRESS;
+ enum commit_graph_write_flags flags = 0;
static struct option builtin_commit_graph_write_options[] = {
OPT_STRING(0, "object-dir", &opts.obj_dir,
@@ -168,6 +177,7 @@ static int graph_write(int argc, const char **argv)
N_("start walk at commits listed by stdin")),
OPT_BOOL(0, "append", &opts.append,
N_("include all commits already in the commit-graph file")),
+ OPT_BOOL(0, "progress", &opts.progress, N_("force progress reporting")),
OPT_BOOL(0, "split", &opts.split,
N_("allow writing an incremental commit-graph file")),
OPT_INTEGER(0, "max-commits", &split_opts.max_commits,
@@ -179,10 +189,13 @@ static int graph_write(int argc, const char **argv)
OPT_END(),
};
+ opts.progress = isatty(2);
split_opts.size_multiple = 2;
split_opts.max_commits = 0;
split_opts.expire_time = 0;
+ trace2_cmd_mode("write");
+
argc = parse_options(argc, argv, NULL,
builtin_commit_graph_write_options,
builtin_commit_graph_write_usage, 0);
@@ -195,6 +208,8 @@ static int graph_write(int argc, const char **argv)
flags |= COMMIT_GRAPH_WRITE_APPEND;
if (opts.split)
flags |= COMMIT_GRAPH_WRITE_SPLIT;
+ if (opts.progress)
+ flags |= COMMIT_GRAPH_WRITE_PROGRESS;
read_replace_refs = 0;
@@ -251,6 +266,8 @@ int cmd_commit_graph(int argc, const char **argv, const char *prefix)
builtin_commit_graph_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
+ save_commit_buffer = 0;
+
if (argc > 0) {
if (!strcmp(argv[0], "read"))
return graph_read(argc, argv);
diff --git a/builtin/commit.c b/builtin/commit.c
index ae7aaf6..e588bc6 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -510,7 +510,7 @@ static int run_status(FILE *fp, const char *index_file, const char *prefix, int
s->nowarn = nowarn;
s->is_initial = get_oid(s->reference, &oid) ? 1 : 0;
if (!s->is_initial)
- hashcpy(s->sha1_commit, oid.hash);
+ oidcpy(&s->oid_commit, &oid);
s->status_format = status_format;
s->ignore_submodule_arg = ignore_submodule_arg;
@@ -1406,7 +1406,7 @@ int cmd_status(int argc, const char **argv, const char *prefix)
s.is_initial = get_oid(s.reference, &oid) ? 1 : 0;
if (!s.is_initial)
- hashcpy(s.sha1_commit, oid.hash);
+ oidcpy(&s.oid_commit, &oid);
s.ignore_submodule_arg = ignore_submodule_arg;
s.status_format = status_format;
diff --git a/builtin/describe.c b/builtin/describe.c
index e048f85..b6df81d 100644
--- a/builtin/describe.c
+++ b/builtin/describe.c
@@ -15,7 +15,6 @@
#include "argv-array.h"
#include "run-command.h"
#include "object-store.h"
-#include "revision.h"
#include "list-objects.h"
#include "commit-slab.h"
@@ -64,19 +63,22 @@ static const char *prio_names[] = {
};
static int commit_name_neq(const void *unused_cmp_data,
- const void *entry,
- const void *entry_or_key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *peeled)
{
- const struct commit_name *cn1 = entry;
- const struct commit_name *cn2 = entry_or_key;
+ const struct commit_name *cn1, *cn2;
+
+ cn1 = container_of(eptr, const struct commit_name, entry);
+ cn2 = container_of(entry_or_key, const struct commit_name, entry);
return !oideq(&cn1->peeled, peeled ? peeled : &cn2->peeled);
}
static inline struct commit_name *find_commit_name(const struct object_id *peeled)
{
- return hashmap_get_from_hash(&names, oidhash(peeled), peeled);
+ return hashmap_get_entry_from_hash(&names, oidhash(peeled), peeled,
+ struct commit_name, entry);
}
static int replace_name(struct commit_name *e,
@@ -123,8 +125,8 @@ static void add_to_known_names(const char *path,
if (!e) {
e = xmalloc(sizeof(struct commit_name));
oidcpy(&e->peeled, peeled);
- hashmap_entry_init(e, oidhash(peeled));
- hashmap_add(&names, e);
+ hashmap_entry_init(&e->entry, oidhash(peeled));
+ hashmap_add(&names, &e->entry);
e->path = NULL;
}
e->tag = tag;
@@ -330,8 +332,8 @@ static void describe_commit(struct object_id *oid, struct strbuf *dst)
struct commit_name *n;
init_commit_names(&commit_names);
- n = hashmap_iter_first(&names, &iter);
- for (; n; n = hashmap_iter_next(&iter)) {
+ hashmap_for_each_entry(&names, &iter, n,
+ entry /* member name */) {
c = lookup_commit_reference_gently(the_repository,
&n->peeled, 1);
if (c)
diff --git a/builtin/difftool.c b/builtin/difftool.c
index 16eb8b7..c280e68 100644
--- a/builtin/difftool.c
+++ b/builtin/difftool.c
@@ -125,12 +125,15 @@ struct working_tree_entry {
};
static int working_tree_entry_cmp(const void *unused_cmp_data,
- const void *entry,
- const void *entry_or_key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *unused_keydata)
{
- const struct working_tree_entry *a = entry;
- const struct working_tree_entry *b = entry_or_key;
+ const struct working_tree_entry *a, *b;
+
+ a = container_of(eptr, const struct working_tree_entry, entry);
+ b = container_of(entry_or_key, const struct working_tree_entry, entry);
+
return strcmp(a->path, b->path);
}
@@ -145,12 +148,14 @@ struct pair_entry {
};
static int pair_cmp(const void *unused_cmp_data,
- const void *entry,
- const void *entry_or_key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *unused_keydata)
{
- const struct pair_entry *a = entry;
- const struct pair_entry *b = entry_or_key;
+ const struct pair_entry *a, *b;
+
+ a = container_of(eptr, const struct pair_entry, entry);
+ b = container_of(entry_or_key, const struct pair_entry, entry);
return strcmp(a->path, b->path);
}
@@ -161,14 +166,14 @@ static void add_left_or_right(struct hashmap *map, const char *path,
struct pair_entry *e, *existing;
FLEX_ALLOC_STR(e, path, path);
- hashmap_entry_init(e, strhash(path));
- existing = hashmap_get(map, e, NULL);
+ hashmap_entry_init(&e->entry, strhash(path));
+ existing = hashmap_get_entry(map, e, entry, NULL);
if (existing) {
free(e);
e = existing;
} else {
e->left[0] = e->right[0] = '\0';
- hashmap_add(map, e);
+ hashmap_add(map, &e->entry);
}
strlcpy(is_right ? e->right : e->left, content, PATH_MAX);
}
@@ -179,12 +184,14 @@ struct path_entry {
};
static int path_entry_cmp(const void *unused_cmp_data,
- const void *entry,
- const void *entry_or_key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *key)
{
- const struct path_entry *a = entry;
- const struct path_entry *b = entry_or_key;
+ const struct path_entry *a, *b;
+
+ a = container_of(eptr, const struct path_entry, entry);
+ b = container_of(entry_or_key, const struct path_entry, entry);
return strcmp(a->path, key ? key : b->path);
}
@@ -234,8 +241,8 @@ static void changed_files(struct hashmap *result, const char *index_path,
while (!strbuf_getline_nul(&buf, fp)) {
struct path_entry *entry;
FLEX_ALLOC_STR(entry, path, buf.buf);
- hashmap_entry_init(entry, strhash(buf.buf));
- hashmap_add(result, entry);
+ hashmap_entry_init(&entry->entry, strhash(buf.buf));
+ hashmap_add(result, &entry->entry);
}
fclose(fp);
if (finish_command(&diff_files))
@@ -461,12 +468,13 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
/* Avoid duplicate working_tree entries */
FLEX_ALLOC_STR(entry, path, dst_path);
- hashmap_entry_init(entry, strhash(dst_path));
- if (hashmap_get(&working_tree_dups, entry, NULL)) {
+ hashmap_entry_init(&entry->entry, strhash(dst_path));
+ if (hashmap_get(&working_tree_dups, &entry->entry,
+ NULL)) {
free(entry);
continue;
}
- hashmap_add(&working_tree_dups, entry);
+ hashmap_add(&working_tree_dups, &entry->entry);
if (!use_wt_file(workdir, dst_path, &roid)) {
if (checkout_path(rmode, &roid, dst_path,
@@ -530,8 +538,8 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
* temporary file to both the left and right directories to show the
* change in the recorded SHA1 for the submodule.
*/
- hashmap_iter_init(&submodules, &iter);
- while ((entry = hashmap_iter_next(&iter))) {
+ hashmap_for_each_entry(&submodules, &iter, entry,
+ entry /* member name */) {
if (*entry->left) {
add_path(&ldir, ldir_len, entry->path);
ensure_leading_directories(ldir.buf);
@@ -549,8 +557,8 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
* shows only the link itself, not the contents of the link target.
* This loop replicates that behavior.
*/
- hashmap_iter_init(&symlinks2, &iter);
- while ((entry = hashmap_iter_next(&iter))) {
+ hashmap_for_each_entry(&symlinks2, &iter, entry,
+ entry /* member name */) {
if (*entry->left) {
add_path(&ldir, ldir_len, entry->path);
ensure_leading_directories(ldir.buf);
diff --git a/builtin/fast-export.c b/builtin/fast-export.c
index f541f55..dbec4df 100644
--- a/builtin/fast-export.c
+++ b/builtin/fast-export.c
@@ -40,6 +40,7 @@ static int no_data;
static int full_tree;
static int reference_excluded_commits;
static int show_original_ids;
+static int mark_tags;
static struct string_list extra_refs = STRING_LIST_INIT_NODUP;
static struct string_list tag_refs = STRING_LIST_INIT_NODUP;
static struct refspec refspecs = REFSPEC_INIT_FETCH;
@@ -126,10 +127,15 @@ struct anonymized_entry {
};
static int anonymized_entry_cmp(const void *unused_cmp_data,
- const void *va, const void *vb,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *unused_keydata)
{
- const struct anonymized_entry *a = va, *b = vb;
+ const struct anonymized_entry *a, *b;
+
+ a = container_of(eptr, const struct anonymized_entry, hash);
+ b = container_of(entry_or_key, const struct anonymized_entry, hash);
+
return a->orig_len != b->orig_len ||
memcmp(a->orig, b->orig, a->orig_len);
}
@@ -148,10 +154,10 @@ static const void *anonymize_mem(struct hashmap *map,
if (!map->cmpfn)
hashmap_init(map, anonymized_entry_cmp, NULL, 0);
- hashmap_entry_init(&key, memhash(orig, *len));
+ hashmap_entry_init(&key.hash, memhash(orig, *len));
key.orig = orig;
key.orig_len = *len;
- ret = hashmap_get(map, &key, NULL);
+ ret = hashmap_get_entry(map, &key, hash, NULL);
if (!ret) {
ret = xmalloc(sizeof(*ret));
@@ -160,7 +166,7 @@ static const void *anonymize_mem(struct hashmap *map,
ret->orig_len = *len;
ret->anon = generate(orig, len);
ret->anon_len = *len;
- hashmap_put(map, ret);
+ hashmap_put(map, &ret->hash);
}
*len = ret->anon_len;
@@ -842,25 +848,40 @@ static void handle_tag(const char *name, struct tag *tag)
free(buf);
return;
case REWRITE:
- if (tagged->type != OBJ_COMMIT) {
- die("tag %s tags unexported %s!",
- oid_to_hex(&tag->object.oid),
- type_name(tagged->type));
- }
- p = rewrite_commit((struct commit *)tagged);
- if (!p) {
- printf("reset %s\nfrom %s\n\n",
- name, oid_to_hex(&null_oid));
- free(buf);
- return;
+ if (tagged->type == OBJ_TAG && !mark_tags) {
+ die(_("Error: Cannot export nested tags unless --mark-tags is specified."));
+ } else if (tagged->type == OBJ_COMMIT) {
+ p = rewrite_commit((struct commit *)tagged);
+ if (!p) {
+ printf("reset %s\nfrom %s\n\n",
+ name, oid_to_hex(&null_oid));
+ free(buf);
+ return;
+ }
+ tagged_mark = get_object_mark(&p->object);
+ } else {
+ /* tagged->type is either OBJ_BLOB or OBJ_TAG */
+ tagged_mark = get_object_mark(tagged);
}
- tagged_mark = get_object_mark(&p->object);
}
}
+ if (tagged->type == OBJ_TAG) {
+ printf("reset %s\nfrom %s\n\n",
+ name, oid_to_hex(&null_oid));
+ }
if (starts_with(name, "refs/tags/"))
name += 10;
- printf("tag %s\nfrom :%d\n", name, tagged_mark);
+ printf("tag %s\n", name);
+ if (mark_tags) {
+ mark_next_object(&tag->object);
+ printf("mark :%"PRIu32"\n", last_idnum);
+ }
+ if (tagged_mark)
+ printf("from :%d\n", tagged_mark);
+ else
+ printf("from %s\n", oid_to_hex(&tagged->oid));
+
if (show_original_ids)
printf("original-oid %s\n", oid_to_hex(&tag->object.oid));
printf("%.*s%sdata %d\n%.*s\n",
@@ -1047,11 +1068,16 @@ static void export_marks(char *file)
error("Unable to write marks file %s.", file);
}
-static void import_marks(char *input_file)
+static void import_marks(char *input_file, int check_exists)
{
char line[512];
- FILE *f = xfopen(input_file, "r");
+ FILE *f;
+ struct stat sb;
+
+ if (check_exists && stat(input_file, &sb))
+ return;
+ f = xfopen(input_file, "r");
while (fgets(line, sizeof(line), f)) {
uint32_t mark;
char *line_end, *mark_end;
@@ -1115,7 +1141,9 @@ int cmd_fast_export(int argc, const char **argv, const char *prefix)
struct rev_info revs;
struct object_array commits = OBJECT_ARRAY_INIT;
struct commit *commit;
- char *export_filename = NULL, *import_filename = NULL;
+ char *export_filename = NULL,
+ *import_filename = NULL,
+ *import_filename_if_exists = NULL;
uint32_t lastimportid;
struct string_list refspecs_list = STRING_LIST_INIT_NODUP;
struct string_list paths_of_changed_objects = STRING_LIST_INIT_DUP;
@@ -1135,6 +1163,10 @@ int cmd_fast_export(int argc, const char **argv, const char *prefix)
N_("Dump marks to this file")),
OPT_STRING(0, "import-marks", &import_filename, N_("file"),
N_("Import marks from this file")),
+ OPT_STRING(0, "import-marks-if-exists",
+ &import_filename_if_exists,
+ N_("file"),
+ N_("Import marks from this file if it exists")),
OPT_BOOL(0, "fake-missing-tagger", &fake_missing_tagger,
N_("Fake a tagger when tags lack one")),
OPT_BOOL(0, "full-tree", &full_tree,
@@ -1149,6 +1181,8 @@ int cmd_fast_export(int argc, const char **argv, const char *prefix)
&reference_excluded_commits, N_("Reference parents which are not in fast-export stream by object id")),
OPT_BOOL(0, "show-original-ids", &show_original_ids,
N_("Show original object ids of blobs/commits")),
+ OPT_BOOL(0, "mark-tags", &mark_tags,
+ N_("Label tags with mark ids")),
OPT_END()
};
@@ -1182,8 +1216,12 @@ int cmd_fast_export(int argc, const char **argv, const char *prefix)
if (use_done_feature)
printf("feature done\n");
+ if (import_filename && import_filename_if_exists)
+ die(_("Cannot pass both --import-marks and --import-marks-if-exists"));
if (import_filename)
- import_marks(import_filename);
+ import_marks(import_filename, 0);
+ else if (import_filename_if_exists)
+ import_marks(import_filename_if_exists, 1);
lastimportid = last_idnum;
if (import_filename && revs.prune_data.nr)
diff --git a/builtin/fetch.c b/builtin/fetch.c
index ee3dc08..0c345b5 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -7,6 +7,7 @@
#include "refs.h"
#include "refspec.h"
#include "object-store.h"
+#include "oidset.h"
#include "commit.h"
#include "builtin.h"
#include "string-list.h"
@@ -58,7 +59,8 @@ static int verbosity, deepen_relative, set_upstream;
static int progress = -1;
static int enable_auto_gc = 1;
static int tags = TAGS_DEFAULT, unshallow, update_shallow, deepen;
-static int max_children = 1;
+static int max_jobs = -1, submodule_fetch_jobs_config = -1;
+static int fetch_parallel_config = 1;
static enum transport_family family;
static const char *depth;
static const char *deepen_since;
@@ -100,13 +102,20 @@ static int git_fetch_config(const char *k, const char *v, void *cb)
}
if (!strcmp(k, "submodule.fetchjobs")) {
- max_children = parse_submodule_fetchjobs(k, v);
+ submodule_fetch_jobs_config = parse_submodule_fetchjobs(k, v);
return 0;
} else if (!strcmp(k, "fetch.recursesubmodules")) {
recurse_submodules = parse_fetch_recurse_submodules_arg(k, v);
return 0;
}
+ if (!strcmp(k, "fetch.parallel")) {
+ fetch_parallel_config = git_config_int(k, v);
+ if (fetch_parallel_config < 0)
+ die(_("fetch.parallel cannot be negative"));
+ return 0;
+ }
+
return git_default_config(k, v, cb);
}
@@ -140,7 +149,7 @@ static struct option builtin_fetch_options[] = {
N_("fetch all tags and associated objects"), TAGS_SET),
OPT_SET_INT('n', NULL, &tags,
N_("do not fetch all tags (--no-tags)"), TAGS_UNSET),
- OPT_INTEGER('j', "jobs", &max_children,
+ OPT_INTEGER('j', "jobs", &max_jobs,
N_("number of submodules fetched in parallel")),
OPT_BOOL('p', "prune", &prune,
N_("prune remote-tracking branches no longer on remote")),
@@ -245,32 +254,31 @@ static void add_merge_config(struct ref **head,
}
}
-static int will_fetch(struct ref **head, const unsigned char *sha1)
+static void create_fetch_oidset(struct ref **head, struct oidset *out)
{
struct ref *rm = *head;
while (rm) {
- if (hasheq(rm->old_oid.hash, sha1))
- return 1;
+ oidset_insert(out, &rm->old_oid);
rm = rm->next;
}
- return 0;
}
struct refname_hash_entry {
- struct hashmap_entry ent; /* must be the first member */
+ struct hashmap_entry ent;
struct object_id oid;
int ignore;
char refname[FLEX_ARRAY];
};
static int refname_hash_entry_cmp(const void *hashmap_cmp_fn_data,
- const void *e1_,
- const void *e2_,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *keydata)
{
- const struct refname_hash_entry *e1 = e1_;
- const struct refname_hash_entry *e2 = e2_;
+ const struct refname_hash_entry *e1, *e2;
+ e1 = container_of(eptr, const struct refname_hash_entry, ent);
+ e2 = container_of(entry_or_key, const struct refname_hash_entry, ent);
return strcmp(e1->refname, keydata ? keydata : e2->refname);
}
@@ -282,9 +290,9 @@ static struct refname_hash_entry *refname_hash_add(struct hashmap *map,
size_t len = strlen(refname);
FLEX_ALLOC_MEM(ent, refname, refname, len);
- hashmap_entry_init(ent, strhash(refname));
+ hashmap_entry_init(&ent->ent, strhash(refname));
oidcpy(&ent->oid, oid);
- hashmap_add(map, ent);
+ hashmap_add(map, &ent->ent);
return ent;
}
@@ -319,6 +327,7 @@ static void find_non_local_tags(const struct ref *refs,
{
struct hashmap existing_refs;
struct hashmap remote_refs;
+ struct oidset fetch_oids = OIDSET_INIT;
struct string_list remote_refs_list = STRING_LIST_INIT_NODUP;
struct string_list_item *remote_ref_item;
const struct ref *ref;
@@ -326,6 +335,7 @@ static void find_non_local_tags(const struct ref *refs,
refname_hash_init(&existing_refs);
refname_hash_init(&remote_refs);
+ create_fetch_oidset(head, &fetch_oids);
for_each_ref(add_one_refname, &existing_refs);
for (ref = refs; ref; ref = ref->next) {
@@ -342,9 +352,9 @@ static void find_non_local_tags(const struct ref *refs,
if (item &&
!has_object_file_with_flags(&ref->old_oid,
OBJECT_INFO_QUICK) &&
- !will_fetch(head, ref->old_oid.hash) &&
+ !oidset_contains(&fetch_oids, &ref->old_oid) &&
!has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) &&
- !will_fetch(head, item->oid.hash))
+ !oidset_contains(&fetch_oids, &item->oid))
clear_item(item);
item = NULL;
continue;
@@ -358,7 +368,7 @@ static void find_non_local_tags(const struct ref *refs,
*/
if (item &&
!has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) &&
- !will_fetch(head, item->oid.hash))
+ !oidset_contains(&fetch_oids, &item->oid))
clear_item(item);
item = NULL;
@@ -371,7 +381,7 @@ static void find_non_local_tags(const struct ref *refs,
item = refname_hash_add(&remote_refs, ref->name, &ref->old_oid);
string_list_insert(&remote_refs_list, ref->name);
}
- hashmap_free(&existing_refs, 1);
+ hashmap_free_entries(&existing_refs, struct refname_hash_entry, ent);
/*
* We may have a final lightweight tag that needs to be
@@ -379,7 +389,7 @@ static void find_non_local_tags(const struct ref *refs,
*/
if (item &&
!has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) &&
- !will_fetch(head, item->oid.hash))
+ !oidset_contains(&fetch_oids, &item->oid))
clear_item(item);
/*
@@ -389,8 +399,10 @@ static void find_non_local_tags(const struct ref *refs,
for_each_string_list_item(remote_ref_item, &remote_refs_list) {
const char *refname = remote_ref_item->string;
struct ref *rm;
+ unsigned int hash = strhash(refname);
- item = hashmap_get_from_hash(&remote_refs, strhash(refname), refname);
+ item = hashmap_get_entry_from_hash(&remote_refs, hash, refname,
+ struct refname_hash_entry, ent);
if (!item)
BUG("unseen remote ref?");
@@ -404,8 +416,9 @@ static void find_non_local_tags(const struct ref *refs,
**tail = rm;
*tail = &rm->next;
}
- hashmap_free(&remote_refs, 1);
+ hashmap_free_entries(&remote_refs, struct refname_hash_entry, ent);
string_list_clear(&remote_refs_list, 0);
+ oidset_clear(&fetch_oids);
}
static struct ref *get_ref_map(struct remote *remote,
@@ -522,17 +535,18 @@ static struct ref *get_ref_map(struct remote *remote,
if (rm->peer_ref) {
const char *refname = rm->peer_ref->name;
struct refname_hash_entry *peer_item;
+ unsigned int hash = strhash(refname);
- peer_item = hashmap_get_from_hash(&existing_refs,
- strhash(refname),
- refname);
+ peer_item = hashmap_get_entry_from_hash(&existing_refs,
+ hash, refname,
+ struct refname_hash_entry, ent);
if (peer_item) {
struct object_id *old_oid = &peer_item->oid;
oidcpy(&rm->peer_ref->old_oid, old_oid);
}
}
}
- hashmap_free(&existing_refs, 1);
+ hashmap_free_entries(&existing_refs, struct refname_hash_entry, ent);
return ref_map;
}
@@ -1519,7 +1533,62 @@ static void add_options_to_argv(struct argv_array *argv)
}
-static int fetch_multiple(struct string_list *list)
+/* Fetch multiple remotes in parallel */
+
+struct parallel_fetch_state {
+ const char **argv;
+ struct string_list *remotes;
+ int next, result;
+};
+
+static int fetch_next_remote(struct child_process *cp, struct strbuf *out,
+ void *cb, void **task_cb)
+{
+ struct parallel_fetch_state *state = cb;
+ char *remote;
+
+ if (state->next < 0 || state->next >= state->remotes->nr)
+ return 0;
+
+ remote = state->remotes->items[state->next++].string;
+ *task_cb = remote;
+
+ argv_array_pushv(&cp->args, state->argv);
+ argv_array_push(&cp->args, remote);
+ cp->git_cmd = 1;
+
+ if (verbosity >= 0)
+ printf(_("Fetching %s\n"), remote);
+
+ return 1;
+}
+
+static int fetch_failed_to_start(struct strbuf *out, void *cb, void *task_cb)
+{
+ struct parallel_fetch_state *state = cb;
+ const char *remote = task_cb;
+
+ state->result = error(_("Could not fetch %s"), remote);
+
+ return 0;
+}
+
+static int fetch_finished(int result, struct strbuf *out,
+ void *cb, void *task_cb)
+{
+ struct parallel_fetch_state *state = cb;
+ const char *remote = task_cb;
+
+ if (result) {
+ strbuf_addf(out, _("could not fetch '%s' (exit code: %d)\n"),
+ remote, result);
+ state->result = -1;
+ }
+
+ return 0;
+}
+
+static int fetch_multiple(struct string_list *list, int max_children)
{
int i, result = 0;
struct argv_array argv = ARGV_ARRAY_INIT;
@@ -1533,20 +1602,34 @@ static int fetch_multiple(struct string_list *list)
argv_array_pushl(&argv, "fetch", "--append", "--no-auto-gc", NULL);
add_options_to_argv(&argv);
- for (i = 0; i < list->nr; i++) {
- const char *name = list->items[i].string;
- argv_array_push(&argv, name);
- if (verbosity >= 0)
- printf(_("Fetching %s\n"), name);
- if (run_command_v_opt(argv.argv, RUN_GIT_CMD)) {
- error(_("Could not fetch %s"), name);
- result = 1;
+ if (max_children != 1 && list->nr != 1) {
+ struct parallel_fetch_state state = { argv.argv, list, 0, 0 };
+
+ argv_array_push(&argv, "--end-of-options");
+ result = run_processes_parallel_tr2(max_children,
+ &fetch_next_remote,
+ &fetch_failed_to_start,
+ &fetch_finished,
+ &state,
+ "fetch", "parallel/fetch");
+
+ if (!result)
+ result = state.result;
+ } else
+ for (i = 0; i < list->nr; i++) {
+ const char *name = list->items[i].string;
+ argv_array_push(&argv, name);
+ if (verbosity >= 0)
+ printf(_("Fetching %s\n"), name);
+ if (run_command_v_opt(argv.argv, RUN_GIT_CMD)) {
+ error(_("Could not fetch %s"), name);
+ result = 1;
+ }
+ argv_array_pop(&argv);
}
- argv_array_pop(&argv);
- }
argv_array_clear(&argv);
- return result;
+ return !!result;
}
/*
@@ -1679,7 +1762,8 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
for (i = 1; i < argc; i++)
strbuf_addf(&default_rla, " %s", argv[i]);
- fetch_config_from_gitmodules(&max_children, &recurse_submodules);
+ fetch_config_from_gitmodules(&submodule_fetch_jobs_config,
+ &recurse_submodules);
git_config(git_fetch_config, NULL);
argc = parse_options(argc, argv, prefix,
@@ -1745,15 +1829,27 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
fetch_one_setup_partial(remote);
result = fetch_one(remote, argc, argv, prune_tags_ok);
} else {
+ int max_children = max_jobs;
+
if (filter_options.choice)
die(_("--filter can only be used with the remote "
"configured in extensions.partialclone"));
+
+ if (max_children < 0)
+ max_children = fetch_parallel_config;
+
/* TODO should this also die if we have a previous partial-clone? */
- result = fetch_multiple(&list);
+ result = fetch_multiple(&list, max_children);
}
if (!result && (recurse_submodules != RECURSE_SUBMODULES_OFF)) {
struct argv_array options = ARGV_ARRAY_INIT;
+ int max_children = max_jobs;
+
+ if (max_children < 0)
+ max_children = submodule_fetch_jobs_config;
+ if (max_children < 0)
+ max_children = fetch_parallel_config;
add_options_to_argv(&options);
result = fetch_populated_submodules(the_repository,
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index a23454d..60a5591 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -1490,11 +1490,11 @@ static void final(const char *final_pack_name, const char *curr_pack_name,
}
if (!from_stdin) {
- printf("%s\n", sha1_to_hex(hash));
+ printf("%s\n", hash_to_hex(hash));
} else {
struct strbuf buf = STRBUF_INIT;
- strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(hash));
+ strbuf_addf(&buf, "%s\t%s\n", report, hash_to_hex(hash));
write_or_die(1, buf.buf, buf.len);
strbuf_release(&buf);
diff --git a/builtin/merge-recursive.c b/builtin/merge-recursive.c
index 5b910e3..a4bfd8f 100644
--- a/builtin/merge-recursive.c
+++ b/builtin/merge-recursive.c
@@ -1,3 +1,4 @@
+#include "cache.h"
#include "builtin.h"
#include "commit.h"
#include "tag.h"
@@ -63,6 +64,9 @@ int cmd_merge_recursive(int argc, const char **argv, const char *prefix)
if (argc - i != 3) /* "--" "<head>" "<remote>" */
die(_("not handling anything other than two heads merge."));
+ if (repo_read_index_unmerged(the_repository))
+ die_resolve_conflict("merge");
+
o.branch1 = argv[++i];
o.branch2 = argv[++i];
diff --git a/builtin/merge.c b/builtin/merge.c
index c9746e3..062e911 100644
--- a/builtin/merge.c
+++ b/builtin/merge.c
@@ -688,16 +688,13 @@ static int try_merge_strategy(const char *strategy, struct commit_list *common,
struct commit_list *remoteheads,
struct commit *head)
{
- struct lock_file lock = LOCK_INIT;
const char *head_arg = "HEAD";
- hold_locked_index(&lock, LOCK_DIE_ON_ERROR);
- refresh_cache(REFRESH_QUIET);
- if (write_locked_index(&the_index, &lock,
- COMMIT_LOCK | SKIP_IF_UNCHANGED))
+ if (refresh_and_write_cache(REFRESH_QUIET, SKIP_IF_UNCHANGED, 0) < 0)
return error(_("Unable to write index."));
if (!strcmp(strategy, "recursive") || !strcmp(strategy, "subtree")) {
+ struct lock_file lock = LOCK_INIT;
int clean, x;
struct commit *result;
struct commit_list *reversed = NULL;
@@ -872,12 +869,8 @@ static int merge_trivial(struct commit *head, struct commit_list *remoteheads)
{
struct object_id result_tree, result_commit;
struct commit_list *parents, **pptr = &parents;
- struct lock_file lock = LOCK_INIT;
- hold_locked_index(&lock, LOCK_DIE_ON_ERROR);
- refresh_cache(REFRESH_QUIET);
- if (write_locked_index(&the_index, &lock,
- COMMIT_LOCK | SKIP_IF_UNCHANGED))
+ if (refresh_and_write_cache(REFRESH_QUIET, SKIP_IF_UNCHANGED, 0) < 0)
return error(_("Unable to write index."));
write_tree_trivial(&result_tree);
diff --git a/builtin/name-rev.c b/builtin/name-rev.c
index c785fe1..b0f0776 100644
--- a/builtin/name-rev.c
+++ b/builtin/name-rev.c
@@ -9,7 +9,11 @@
#include "sha1-lookup.h"
#include "commit-slab.h"
-#define CUTOFF_DATE_SLOP 86400 /* one day */
+/*
+ * One day. See the 'name a rev shortly after epoch' test in t6120 when
+ * changing this value
+ */
+#define CUTOFF_DATE_SLOP 86400
typedef struct rev_name {
const char *tip_name;
@@ -481,8 +485,13 @@ int cmd_name_rev(int argc, const char **argv, const char *prefix)
add_object_array(object, *argv, &revs);
}
- if (cutoff)
- cutoff = cutoff - CUTOFF_DATE_SLOP;
+ if (cutoff) {
+ /* check for undeflow */
+ if (cutoff > TIME_MIN + CUTOFF_DATE_SLOP)
+ cutoff = cutoff - CUTOFF_DATE_SLOP;
+ else
+ cutoff = TIME_MIN;
+ }
for_each_ref(name_ref, &data);
if (transform_stdin) {
diff --git a/builtin/patch-id.c b/builtin/patch-id.c
index bd28b80..3059e52 100644
--- a/builtin/patch-id.c
+++ b/builtin/patch-id.c
@@ -1,15 +1,16 @@
+#include "cache.h"
#include "builtin.h"
#include "config.h"
#include "diff.h"
static void flush_current_id(int patchlen, struct object_id *id, struct object_id *result)
{
- char name[50];
+ char name[GIT_MAX_HEXSZ + 1];
if (!patchlen)
return;
- memcpy(name, oid_to_hex(id), GIT_SHA1_HEXSZ + 1);
+ memcpy(name, oid_to_hex(id), the_hash_algo->hexsz + 1);
printf("%s %s\n", oid_to_hex(result), name);
}
@@ -60,9 +61,9 @@ static int get_one_patchid(struct object_id *next_oid, struct object_id *result,
{
int patchlen = 0, found_next = 0;
int before = -1, after = -1;
- git_SHA_CTX ctx;
+ git_hash_ctx ctx;
- git_SHA1_Init(&ctx);
+ the_hash_algo->init_fn(&ctx);
oidclr(result);
while (strbuf_getwholeline(line_buf, stdin, '\n') != EOF) {
@@ -122,7 +123,7 @@ static int get_one_patchid(struct object_id *next_oid, struct object_id *result,
/* Compute the sha without whitespace */
len = remove_space(line);
patchlen += len;
- git_SHA1_Update(&ctx, line, len);
+ the_hash_algo->update_fn(&ctx, line, len);
}
if (!found_next)
diff --git a/builtin/push.c b/builtin/push.c
index cc1292a..843f5b2 100644
--- a/builtin/push.c
+++ b/builtin/push.c
@@ -143,8 +143,8 @@ static int push_url_of_remote(struct remote *remote, const char ***url_p)
return remote->url_nr;
}
-static NORETURN int die_push_simple(struct branch *branch,
- struct remote *remote)
+static NORETURN void die_push_simple(struct branch *branch,
+ struct remote *remote)
{
/*
* There's no point in using shorten_unambiguous_ref here,
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
index dcf3855..411e0b4 100644
--- a/builtin/receive-pack.c
+++ b/builtin/receive-pack.c
@@ -417,24 +417,22 @@ static int copy_to_sideband(int in, int out, void *arg)
return 0;
}
-#define HMAC_BLOCK_SIZE 64
-
-static void hmac_sha1(unsigned char *out,
+static void hmac(unsigned char *out,
const char *key_in, size_t key_len,
const char *text, size_t text_len)
{
- unsigned char key[HMAC_BLOCK_SIZE];
- unsigned char k_ipad[HMAC_BLOCK_SIZE];
- unsigned char k_opad[HMAC_BLOCK_SIZE];
+ unsigned char key[GIT_MAX_BLKSZ];
+ unsigned char k_ipad[GIT_MAX_BLKSZ];
+ unsigned char k_opad[GIT_MAX_BLKSZ];
int i;
- git_SHA_CTX ctx;
+ git_hash_ctx ctx;
/* RFC 2104 2. (1) */
- memset(key, '\0', HMAC_BLOCK_SIZE);
- if (HMAC_BLOCK_SIZE < key_len) {
- git_SHA1_Init(&ctx);
- git_SHA1_Update(&ctx, key_in, key_len);
- git_SHA1_Final(key, &ctx);
+ memset(key, '\0', GIT_MAX_BLKSZ);
+ if (the_hash_algo->blksz < key_len) {
+ the_hash_algo->init_fn(&ctx);
+ the_hash_algo->update_fn(&ctx, key_in, key_len);
+ the_hash_algo->final_fn(key, &ctx);
} else {
memcpy(key, key_in, key_len);
}
@@ -446,29 +444,29 @@ static void hmac_sha1(unsigned char *out,
}
/* RFC 2104 2. (3) & (4) */
- git_SHA1_Init(&ctx);
- git_SHA1_Update(&ctx, k_ipad, sizeof(k_ipad));
- git_SHA1_Update(&ctx, text, text_len);
- git_SHA1_Final(out, &ctx);
+ the_hash_algo->init_fn(&ctx);
+ the_hash_algo->update_fn(&ctx, k_ipad, sizeof(k_ipad));
+ the_hash_algo->update_fn(&ctx, text, text_len);
+ the_hash_algo->final_fn(out, &ctx);
/* RFC 2104 2. (6) & (7) */
- git_SHA1_Init(&ctx);
- git_SHA1_Update(&ctx, k_opad, sizeof(k_opad));
- git_SHA1_Update(&ctx, out, GIT_SHA1_RAWSZ);
- git_SHA1_Final(out, &ctx);
+ the_hash_algo->init_fn(&ctx);
+ the_hash_algo->update_fn(&ctx, k_opad, sizeof(k_opad));
+ the_hash_algo->update_fn(&ctx, out, the_hash_algo->rawsz);
+ the_hash_algo->final_fn(out, &ctx);
}
static char *prepare_push_cert_nonce(const char *path, timestamp_t stamp)
{
struct strbuf buf = STRBUF_INIT;
- unsigned char sha1[GIT_SHA1_RAWSZ];
+ unsigned char hash[GIT_MAX_RAWSZ];
strbuf_addf(&buf, "%s:%"PRItime, path, stamp);
- hmac_sha1(sha1, buf.buf, buf.len, cert_nonce_seed, strlen(cert_nonce_seed));
+ hmac(hash, buf.buf, buf.len, cert_nonce_seed, strlen(cert_nonce_seed));
strbuf_release(&buf);
/* RFC 2104 5. HMAC-SHA1-80 */
- strbuf_addf(&buf, "%"PRItime"-%.*s", stamp, GIT_SHA1_HEXSZ, sha1_to_hex(sha1));
+ strbuf_addf(&buf, "%"PRItime"-%.*s", stamp, (int)the_hash_algo->hexsz, hash_to_hex(hash));
return strbuf_detach(&buf, NULL);
}
@@ -970,7 +968,7 @@ static const char *push_to_deploy(unsigned char *sha1,
if (run_command(&child))
return "Working directory has staged changes";
- read_tree[3] = sha1_to_hex(sha1);
+ read_tree[3] = hash_to_hex(sha1);
child_process_init(&child);
child.argv = read_tree;
child.env = env->argv;
@@ -987,13 +985,13 @@ static const char *push_to_deploy(unsigned char *sha1,
static const char *push_to_checkout_hook = "push-to-checkout";
-static const char *push_to_checkout(unsigned char *sha1,
+static const char *push_to_checkout(unsigned char *hash,
struct argv_array *env,
const char *work_tree)
{
argv_array_pushf(env, "GIT_WORK_TREE=%s", absolute_path(work_tree));
if (run_hook_le(env->argv, push_to_checkout_hook,
- sha1_to_hex(sha1), NULL))
+ hash_to_hex(hash), NULL))
return "push-to-checkout hook declined";
else
return NULL;
diff --git a/builtin/repack.c b/builtin/repack.c
index 3b3dd14..094c2f8 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -191,7 +191,7 @@ static int write_oid(const struct object_id *oid, struct packed_git *pack,
die(_("could not start pack-objects to repack promisor objects"));
}
- xwrite(cmd->in, oid_to_hex(oid), GIT_SHA1_HEXSZ);
+ xwrite(cmd->in, oid_to_hex(oid), the_hash_algo->hexsz);
xwrite(cmd->in, "\n", 1);
return 0;
}
diff --git a/builtin/replace.c b/builtin/replace.c
index 2a4afb3..bd92dc6 100644
--- a/builtin/replace.c
+++ b/builtin/replace.c
@@ -272,7 +272,7 @@ static int import_object(struct object_id *oid, enum object_type type,
return error(_("unable to spawn mktree"));
}
- if (strbuf_read(&result, cmd.out, 41) < 0) {
+ if (strbuf_read(&result, cmd.out, the_hash_algo->hexsz + 1) < 0) {
error_errno(_("unable to read from mktree"));
close(fd);
close(cmd.out);
@@ -358,14 +358,15 @@ static int replace_parents(struct strbuf *buf, int argc, const char **argv)
struct strbuf new_parents = STRBUF_INIT;
const char *parent_start, *parent_end;
int i;
+ const unsigned hexsz = the_hash_algo->hexsz;
/* find existing parents */
parent_start = buf->buf;
- parent_start += GIT_SHA1_HEXSZ + 6; /* "tree " + "hex sha1" + "\n" */
+ parent_start += hexsz + 6; /* "tree " + "hex sha1" + "\n" */
parent_end = parent_start;
while (starts_with(parent_end, "parent "))
- parent_end += 48; /* "parent " + "hex sha1" + "\n" */
+ parent_end += hexsz + 8; /* "parent " + "hex sha1" + "\n" */
/* prepare new parents */
for (i = 0; i < argc; i++) {
diff --git a/builtin/rev-list.c b/builtin/rev-list.c
index b8dc2e1..e28d62e 100644
--- a/builtin/rev-list.c
+++ b/builtin/rev-list.c
@@ -18,7 +18,6 @@
#include "reflog-walk.h"
#include "oidset.h"
#include "packfile.h"
-#include "object-store.h"
static const char rev_list_usage[] =
"git rev-list [OPTION] <commit-id>... [ -- paths... ]\n"
@@ -471,12 +470,6 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
parse_list_objects_filter(&filter_options, arg);
if (filter_options.choice && !revs.blob_objects)
die(_("object filtering requires --objects"));
- if (filter_options.choice == LOFC_SPARSE_OID &&
- !filter_options.sparse_oid_value)
- die(
- _("invalid sparse value '%s'"),
- list_objects_filter_spec(
- &filter_options));
continue;
}
if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) {
diff --git a/builtin/rev-parse.c b/builtin/rev-parse.c
index f8bbe6d..308c67e 100644
--- a/builtin/rev-parse.c
+++ b/builtin/rev-parse.c
@@ -593,6 +593,7 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
const char *name = NULL;
struct object_context unused;
struct strbuf buf = STRBUF_INIT;
+ const int hexsz = the_hash_algo->hexsz;
if (argc > 1 && !strcmp("--parseopt", argv[1]))
return cmd_parseopt(argc - 1, argv + 1, prefix);
@@ -730,8 +731,8 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
abbrev = strtoul(arg, NULL, 10);
if (abbrev < MINIMUM_ABBREV)
abbrev = MINIMUM_ABBREV;
- else if (40 <= abbrev)
- abbrev = 40;
+ else if (hexsz <= abbrev)
+ abbrev = hexsz;
continue;
}
if (!strcmp(arg, "--sq")) {
diff --git a/builtin/show-index.c b/builtin/show-index.c
index a6e6788..0826f6a 100644
--- a/builtin/show-index.c
+++ b/builtin/show-index.c
@@ -11,6 +11,7 @@ int cmd_show_index(int argc, const char **argv, const char *prefix)
unsigned nr;
unsigned int version;
static unsigned int top_index[256];
+ const unsigned hashsz = the_hash_algo->rawsz;
if (argc != 1)
usage(show_index_usage);
@@ -36,23 +37,23 @@ int cmd_show_index(int argc, const char **argv, const char *prefix)
}
if (version == 1) {
for (i = 0; i < nr; i++) {
- unsigned int offset, entry[6];
+ unsigned int offset, entry[(GIT_MAX_RAWSZ + 4) / sizeof(unsigned int)];
- if (fread(entry, 4 + 20, 1, stdin) != 1)
+ if (fread(entry, 4 + hashsz, 1, stdin) != 1)
die("unable to read entry %u/%u", i, nr);
offset = ntohl(entry[0]);
- printf("%u %s\n", offset, sha1_to_hex((void *)(entry+1)));
+ printf("%u %s\n", offset, hash_to_hex((void *)(entry+1)));
}
} else {
unsigned off64_nr = 0;
struct {
- unsigned char sha1[20];
+ struct object_id oid;
uint32_t crc;
uint32_t off;
} *entries;
ALLOC_ARRAY(entries, nr);
for (i = 0; i < nr; i++)
- if (fread(entries[i].sha1, 20, 1, stdin) != 1)
+ if (fread(entries[i].oid.hash, hashsz, 1, stdin) != 1)
die("unable to read sha1 %u/%u", i, nr);
for (i = 0; i < nr; i++)
if (fread(&entries[i].crc, 4, 1, stdin) != 1)
@@ -77,7 +78,7 @@ int cmd_show_index(int argc, const char **argv, const char *prefix)
}
printf("%" PRIuMAX " %s (%08"PRIx32")\n",
(uintmax_t) offset,
- sha1_to_hex(entries[i].sha1),
+ oid_to_hex(&entries[i].oid),
ntohl(entries[i].crc));
}
free(entries);
diff --git a/builtin/stash.c b/builtin/stash.c
index b5a301f..bb4f6d8 100644
--- a/builtin/stash.c
+++ b/builtin/stash.c
@@ -396,7 +396,7 @@ static int do_apply_stash(const char *prefix, struct stash_info *info,
const struct object_id *bases[1];
read_cache_preload(NULL);
- if (refresh_cache(REFRESH_QUIET))
+ if (refresh_and_write_cache(REFRESH_QUIET, 0, 0))
return -1;
if (write_cache_as_tree(&c_tree, 0, NULL))
@@ -427,6 +427,8 @@ static int do_apply_stash(const char *prefix, struct stash_info *info,
return error(_("could not save index tree"));
reset_head();
+ discard_cache();
+ read_cache();
}
}
@@ -485,7 +487,7 @@ static int do_apply_stash(const char *prefix, struct stash_info *info,
}
if (quiet) {
- if (refresh_cache(REFRESH_QUIET))
+ if (refresh_and_write_cache(REFRESH_QUIET, 0, 0))
warning("could not refresh index");
} else {
struct child_process cp = CHILD_PROCESS_INIT;
@@ -497,6 +499,10 @@ static int do_apply_stash(const char *prefix, struct stash_info *info,
*/
cp.git_cmd = 1;
cp.dir = prefix;
+ argv_array_pushf(&cp.env_array, GIT_WORK_TREE_ENVIRONMENT"=%s",
+ absolute_path(get_git_work_tree()));
+ argv_array_pushf(&cp.env_array, GIT_DIR_ENVIRONMENT"=%s",
+ absolute_path(get_git_dir()));
argv_array_push(&cp.args, "status");
run_command(&cp);
}
@@ -1129,7 +1135,10 @@ static int do_create_stash(const struct pathspec *ps, struct strbuf *stash_msg_b
prepare_fallback_ident("git stash", "git@stash");
read_cache_preload(NULL);
- refresh_cache(REFRESH_QUIET);
+ if (refresh_and_write_cache(REFRESH_QUIET, 0, 0) < 0) {
+ ret = -1;
+ goto done;
+ }
if (get_oid("HEAD", &info->b_commit)) {
if (!quiet)
@@ -1290,7 +1299,7 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q
free(ps_matched);
}
- if (refresh_cache(REFRESH_QUIET)) {
+ if (refresh_and_write_cache(REFRESH_QUIET, 0, 0)) {
ret = -1;
goto done;
}
diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c
index 909e77e..2c2395a 100644
--- a/builtin/submodule--helper.c
+++ b/builtin/submodule--helper.c
@@ -424,7 +424,7 @@ static int module_list(int argc, const char **argv, const char *prefix)
const struct cache_entry *ce = list.entries[i];
if (ce_stage(ce))
- printf("%06o %s U\t", ce->ce_mode, sha1_to_hex(null_sha1));
+ printf("%06o %s U\t", ce->ce_mode, oid_to_hex(&null_oid));
else
printf("%06o %s %d\t", ce->ce_mode,
oid_to_hex(&ce->oid), ce_stage(ce));
@@ -1874,7 +1874,7 @@ static int update_clone(int argc, const char **argv, const char *prefix)
};
const char *const git_submodule_helper_usage[] = {
- N_("git submodule--helper update_clone [--prefix=<path>] [<path>...]"),
+ N_("git submodule--helper update-clone [--prefix=<path>] [<path>...]"),
NULL
};
suc.prefix = prefix;
diff --git a/builtin/worktree.c b/builtin/worktree.c
index 7f094f8..4de44f5 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -10,7 +10,6 @@
#include "run-command.h"
#include "sigchain.h"
#include "submodule.h"
-#include "refs.h"
#include "utf8.h"
#include "worktree.h"
@@ -350,7 +349,7 @@ static int add_worktree(const char *path, const char *refname,
*/
strbuf_reset(&sb);
strbuf_addf(&sb, "%s/HEAD", sb_repo.buf);
- write_file(sb.buf, "%s", sha1_to_hex(null_sha1));
+ write_file(sb.buf, "%s", oid_to_hex(&null_oid));
strbuf_reset(&sb);
strbuf_addf(&sb, "%s/commondir", sb_repo.buf);
write_file(sb.buf, "../..");
diff --git a/bundle.c b/bundle.c
index b5d21cd..a85ed3f 100644
--- a/bundle.c
+++ b/bundle.c
@@ -282,7 +282,7 @@ static int write_pack_data(int bundle_fd, struct rev_info *revs)
struct object *object = revs->pending.objects[i].item;
if (object->flags & UNINTERESTING)
write_or_die(pack_objects.in, "^", 1);
- write_or_die(pack_objects.in, oid_to_hex(&object->oid), GIT_SHA1_HEXSZ);
+ write_or_die(pack_objects.in, oid_to_hex(&object->oid), the_hash_algo->hexsz);
write_or_die(pack_objects.in, "\n", 1);
}
close(pack_objects.in);
@@ -414,7 +414,7 @@ static int write_bundle_refs(int bundle_fd, struct rev_info *revs)
}
ref_count++;
- write_or_die(bundle_fd, oid_to_hex(&e->item->oid), 40);
+ write_or_die(bundle_fd, oid_to_hex(&e->item->oid), the_hash_algo->hexsz);
write_or_die(bundle_fd, " ", 1);
write_or_die(bundle_fd, display_ref, strlen(display_ref));
write_or_die(bundle_fd, "\n", 1);
diff --git a/cache-tree.c b/cache-tree.c
index 0e5724f..1bd1b23 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -408,7 +408,7 @@ static int update_one(struct cache_tree *it,
if (repair) {
struct object_id oid;
hash_object_file(buffer.buf, buffer.len, tree_type, &oid);
- if (has_object_file(&oid))
+ if (has_object_file_with_flags(&oid, OBJECT_INFO_SKIP_FETCH_OBJECT))
oidcpy(&it->oid, &oid);
else
to_invalidate = 1;
@@ -609,11 +609,66 @@ static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *pat
return it;
}
+static int write_index_as_tree_internal(struct object_id *oid,
+ struct index_state *index_state,
+ int cache_tree_valid,
+ int flags,
+ const char *prefix)
+{
+ if (flags & WRITE_TREE_IGNORE_CACHE_TREE) {
+ cache_tree_free(&index_state->cache_tree);
+ cache_tree_valid = 0;
+ }
+
+ if (!index_state->cache_tree)
+ index_state->cache_tree = cache_tree();
+
+ if (!cache_tree_valid && cache_tree_update(index_state, flags) < 0)
+ return WRITE_TREE_UNMERGED_INDEX;
+
+ if (prefix) {
+ struct cache_tree *subtree;
+ subtree = cache_tree_find(index_state->cache_tree, prefix);
+ if (!subtree)
+ return WRITE_TREE_PREFIX_ERROR;
+ oidcpy(oid, &subtree->oid);
+ }
+ else
+ oidcpy(oid, &index_state->cache_tree->oid);
+
+ return 0;
+}
+
+struct tree* write_in_core_index_as_tree(struct repository *repo) {
+ struct object_id o;
+ int was_valid, ret;
+
+ struct index_state *index_state = repo->index;
+ was_valid = index_state->cache_tree &&
+ cache_tree_fully_valid(index_state->cache_tree);
+
+ ret = write_index_as_tree_internal(&o, index_state, was_valid, 0, NULL);
+ if (ret == WRITE_TREE_UNMERGED_INDEX) {
+ int i;
+ fprintf(stderr, "BUG: There are unmerged index entries:\n");
+ for (i = 0; i < index_state->cache_nr; i++) {
+ const struct cache_entry *ce = index_state->cache[i];
+ if (ce_stage(ce))
+ fprintf(stderr, "BUG: %d %.*s\n", ce_stage(ce),
+ (int)ce_namelen(ce), ce->name);
+ }
+ BUG("unmerged index entries when writing inmemory index");
+ }
+
+ return lookup_tree(repo, &index_state->cache_tree->oid);
+}
+
+
int write_index_as_tree(struct object_id *oid, struct index_state *index_state, const char *index_path, int flags, const char *prefix)
{
int entries, was_valid;
struct lock_file lock_file = LOCK_INIT;
- int ret = 0;
+ int ret;
hold_lock_file_for_update(&lock_file, index_path, LOCK_DIE_ON_ERROR);
@@ -622,18 +677,14 @@ int write_index_as_tree(struct object_id *oid, struct index_state *index_state,
ret = WRITE_TREE_UNREADABLE_INDEX;
goto out;
}
- if (flags & WRITE_TREE_IGNORE_CACHE_TREE)
- cache_tree_free(&index_state->cache_tree);
- if (!index_state->cache_tree)
- index_state->cache_tree = cache_tree();
+ was_valid = !(flags & WRITE_TREE_IGNORE_CACHE_TREE) &&
+ index_state->cache_tree &&
+ cache_tree_fully_valid(index_state->cache_tree);
- was_valid = cache_tree_fully_valid(index_state->cache_tree);
- if (!was_valid) {
- if (cache_tree_update(index_state, flags) < 0) {
- ret = WRITE_TREE_UNMERGED_INDEX;
- goto out;
- }
+ ret = write_index_as_tree_internal(oid, index_state, was_valid, flags,
+ prefix);
+ if (!ret && !was_valid) {
write_locked_index(index_state, &lock_file, COMMIT_LOCK);
/* Not being able to write is fine -- we are only interested
* in updating the cache-tree part, and if the next caller
@@ -643,18 +694,6 @@ int write_index_as_tree(struct object_id *oid, struct index_state *index_state,
*/
}
- if (prefix) {
- struct cache_tree *subtree;
- subtree = cache_tree_find(index_state->cache_tree, prefix);
- if (!subtree) {
- ret = WRITE_TREE_PREFIX_ERROR;
- goto out;
- }
- oidcpy(oid, &subtree->oid);
- }
- else
- oidcpy(oid, &index_state->cache_tree->oid);
-
out:
rollback_lock_file(&lock_file);
return ret;
diff --git a/cache-tree.h b/cache-tree.h
index 757bbc4..639bfa5 100644
--- a/cache-tree.h
+++ b/cache-tree.h
@@ -34,7 +34,7 @@ int cache_tree_fully_valid(struct cache_tree *);
int cache_tree_update(struct index_state *, int);
void cache_tree_verify(struct repository *, struct index_state *);
-/* bitmasks to write_cache_as_tree flags */
+/* bitmasks to write_index_as_tree flags */
#define WRITE_TREE_MISSING_OK 1
#define WRITE_TREE_IGNORE_CACHE_TREE 2
#define WRITE_TREE_DRY_RUN 4
@@ -46,6 +46,7 @@ void cache_tree_verify(struct repository *, struct index_state *);
#define WRITE_TREE_UNMERGED_INDEX (-2)
#define WRITE_TREE_PREFIX_ERROR (-3)
+struct tree* write_in_core_index_as_tree(struct repository *repo);
int write_index_as_tree(struct object_id *oid, struct index_state *index_state, const char *index_path, int flags, const char *prefix);
void prime_cache_tree(struct repository *, struct index_state *, struct tree *);
diff --git a/cache.h b/cache.h
index 5624e6c..04cabaa 100644
--- a/cache.h
+++ b/cache.h
@@ -414,6 +414,7 @@ extern struct index_state the_index;
#define add_file_to_cache(path, flags) add_file_to_index(&the_index, (path), (flags))
#define chmod_cache_entry(ce, flip) chmod_index_entry(&the_index, (ce), (flip))
#define refresh_cache(flags) refresh_index(&the_index, (flags), NULL, NULL, NULL)
+#define refresh_and_write_cache(refresh_flags, write_flags, gentle) repo_refresh_and_write_index(the_repository, (refresh_flags), (write_flags), (gentle), NULL, NULL, NULL)
#define ce_match_stat(ce, st, options) ie_match_stat(&the_index, (ce), (st), (options))
#define ce_modified(ce, st, options) ie_modified(&the_index, (ce), (st), (options))
#define cache_dir_exists(name, namelen) index_dir_exists(&the_index, (name), (namelen))
@@ -747,6 +748,19 @@ struct cache_entry *index_file_exists(struct index_state *istate, const char *na
*/
int index_name_pos(const struct index_state *, const char *name, int namelen);
+/*
+ * Some functions return the negative complement of an insert position when a
+ * precise match was not found but a position was found where the entry would
+ * need to be inserted. This helper protects that logic from any integer
+ * underflow.
+ */
+static inline int index_pos_to_insert_pos(uintmax_t pos)
+{
+ if (pos > INT_MAX)
+ die("overflow: -1 - %"PRIuMAX, pos);
+ return -1 - (int)pos;
+}
+
#define ADD_CACHE_OK_TO_ADD 1 /* Ok to add */
#define ADD_CACHE_OK_TO_REPLACE 2 /* Ok to replace file/directory */
#define ADD_CACHE_SKIP_DFCHECK 4 /* Ok to skip DF conflict checks */
@@ -834,6 +848,23 @@ void fill_stat_cache_info(struct index_state *istate, struct cache_entry *ce, st
#define REFRESH_IN_PORCELAIN 0x0020 /* user friendly output, not "needs update" */
#define REFRESH_PROGRESS 0x0040 /* show progress bar if stderr is tty */
int refresh_index(struct index_state *, unsigned int flags, const struct pathspec *pathspec, char *seen, const char *header_msg);
+/*
+ * Refresh the index and write it to disk.
+ *
+ * 'refresh_flags' is passed directly to 'refresh_index()', while
+ * 'COMMIT_LOCK | write_flags' is passed to 'write_locked_index()', so
+ * the lockfile is always either committed or rolled back.
+ *
+ * If 'gentle' is passed, errors locking the index are ignored.
+ *
+ * Return 1 if refreshing the index returns an error, -1 if writing
+ * the index to disk fails, 0 on success.
+ *
+ * Note that if refreshing the index returns an error, we still write
+ * out the index (unless locking fails).
+ */
+int repo_refresh_and_write_index(struct repository*, unsigned int refresh_flags, unsigned int write_flags, int gentle, const struct pathspec *, char *seen, const char *header_msg);
+
struct cache_entry *refresh_cache_entry(struct index_state *, struct cache_entry *, unsigned int);
void set_alternate_index_output(const char *);
@@ -1049,7 +1080,6 @@ const char *repo_find_unique_abbrev(struct repository *r, const struct object_id
int repo_find_unique_abbrev_r(struct repository *r, char *hex, const struct object_id *oid, int len);
#define find_unique_abbrev_r(hex, oid, len) repo_find_unique_abbrev_r(the_repository, hex, oid, len)
-extern const unsigned char null_sha1[GIT_MAX_RAWSZ];
extern const struct object_id null_oid;
static inline int hashcmp(const unsigned char *sha1, const unsigned char *sha2)
@@ -1084,14 +1114,9 @@ static inline int oideq(const struct object_id *oid1, const struct object_id *oi
return hasheq(oid1->hash, oid2->hash);
}
-static inline int is_null_sha1(const unsigned char *sha1)
-{
- return hasheq(sha1, null_sha1);
-}
-
static inline int is_null_oid(const struct object_id *oid)
{
- return hasheq(oid->hash, null_sha1);
+ return oideq(oid, &null_oid);
}
static inline void hashcpy(unsigned char *sha_dst, const unsigned char *sha_src)
@@ -1536,8 +1561,7 @@ struct date_mode {
struct date_mode *date_mode_from_type(enum date_mode_type type);
const char *show_date(timestamp_t time, int timezone, const struct date_mode *mode);
-void show_date_relative(timestamp_t time, const struct timeval *now,
- struct strbuf *timebuf);
+void show_date_relative(timestamp_t time, struct strbuf *timebuf);
void show_date_human(timestamp_t time, int tz, const struct timeval *now,
struct strbuf *timebuf);
int parse_date(const char *date, struct strbuf *out);
@@ -1546,7 +1570,7 @@ int parse_expiry_date(const char *date, timestamp_t *timestamp);
void datestamp(struct strbuf *out);
#define approxidate(s) approxidate_careful((s), NULL)
timestamp_t approxidate_careful(const char *, int *);
-timestamp_t approxidate_relative(const char *date, const struct timeval *now);
+timestamp_t approxidate_relative(const char *date);
void parse_date_format(const char *format, struct date_mode *mode);
int date_overflows(timestamp_t date);
diff --git a/ci/install-dependencies.sh b/ci/install-dependencies.sh
index 8cc7250..85a9d6b 100755
--- a/ci/install-dependencies.sh
+++ b/ci/install-dependencies.sh
@@ -49,11 +49,12 @@ osx-clang|osx-gcc)
;;
StaticAnalysis)
sudo apt-get -q update
- sudo apt-get -q -y install coccinelle
+ sudo apt-get -q -y install coccinelle libcurl4-openssl-dev libssl-dev \
+ libexpat-dev gettext
;;
Documentation)
sudo apt-get -q update
- sudo apt-get -q -y install asciidoc xmlto
+ sudo apt-get -q -y install asciidoc xmlto docbook-xsl-ns
test -n "$ALREADY_HAVE_ASCIIDOCTOR" ||
gem install --version 1.5.8 asciidoctor
diff --git a/ci/lib.sh b/ci/lib.sh
index 29dc740..c8c2c38 100755
--- a/ci/lib.sh
+++ b/ci/lib.sh
@@ -34,6 +34,11 @@ save_good_tree () {
# successfully before (e.g. because the branch got rebased, changing only
# the commit messages).
skip_good_tree () {
+ if test "$TRAVIS_DEBUG_MODE" = true
+ then
+ return
+ fi
+
if ! good_tree_info="$(grep "^$(git rev-parse $CI_COMMIT^{tree}) " "$good_trees_file")"
then
# Haven't seen this tree yet, or no cached good trees file yet.
diff --git a/ci/run-static-analysis.sh b/ci/run-static-analysis.sh
index a19aa7e..65bcebd 100755
--- a/ci/run-static-analysis.sh
+++ b/ci/run-static-analysis.sh
@@ -26,4 +26,7 @@ then
exit 1
fi
+make hdr-check ||
+exit 1
+
save_good_tree
diff --git a/ci/test-documentation.sh b/ci/test-documentation.sh
index d490898..b3e76ef 100755
--- a/ci/test-documentation.sh
+++ b/ci/test-documentation.sh
@@ -8,6 +8,8 @@
filter_log () {
sed -e '/^GIT_VERSION = /d' \
-e '/^ \* new asciidoc flags$/d' \
+ -e '/stripped namespace before processing/d' \
+ -e '/Attributed.*IDs for element/d' \
"$1"
}
diff --git a/combine-diff.c b/combine-diff.c
index 3e49f3b..d5c4d83 100644
--- a/combine-diff.c
+++ b/combine-diff.c
@@ -930,7 +930,7 @@ static void show_combined_header(struct combine_diff_path *elem,
int show_file_header)
{
struct diff_options *opt = &rev->diffopt;
- int abbrev = opt->flags.full_index ? GIT_SHA1_HEXSZ : DEFAULT_ABBREV;
+ int abbrev = opt->flags.full_index ? the_hash_algo->hexsz : DEFAULT_ABBREV;
const char *a_prefix = opt->a_prefix ? opt->a_prefix : "a/";
const char *b_prefix = opt->b_prefix ? opt->b_prefix : "b/";
const char *c_meta = diff_get_color_opt(opt, DIFF_METAINFO);
diff --git a/commit-graph.c b/commit-graph.c
index 9b02d2c..fc4a43b 100644
--- a/commit-graph.c
+++ b/commit-graph.c
@@ -468,14 +468,21 @@ static int prepare_commit_graph(struct repository *r)
{
struct object_directory *odb;
- if (git_env_bool(GIT_TEST_COMMIT_GRAPH_DIE_ON_LOAD, 0))
- die("dying as requested by the '%s' variable on commit-graph load!",
- GIT_TEST_COMMIT_GRAPH_DIE_ON_LOAD);
+ /*
+ * This must come before the "already attempted?" check below, because
+ * we want to disable even an already-loaded graph file.
+ */
+ if (r->commit_graph_disabled)
+ return 0;
if (r->objects->commit_graph_attempted)
return !!r->objects->commit_graph;
r->objects->commit_graph_attempted = 1;
+ if (git_env_bool(GIT_TEST_COMMIT_GRAPH_DIE_ON_LOAD, 0))
+ die("dying as requested by the '%s' variable on commit-graph load!",
+ GIT_TEST_COMMIT_GRAPH_DIE_ON_LOAD);
+
prepare_repo_settings(r);
if (!git_env_bool(GIT_TEST_COMMIT_GRAPH, 0) &&
@@ -839,12 +846,19 @@ static void write_graph_chunk_data(struct hashfile *f, int hash_len,
while (list < last) {
struct commit_list *parent;
+ struct object_id *tree;
int edge_value;
uint32_t packedDate[2];
display_progress(ctx->progress, ++ctx->progress_cnt);
- parse_commit_no_graph(*list);
- hashwrite(f, get_commit_tree_oid(*list)->hash, hash_len);
+ if (parse_commit_no_graph(*list))
+ die(_("unable to parse commit %s"),
+ oid_to_hex(&(*list)->object.oid));
+ tree = get_commit_tree_oid(*list);
+ if (!tree)
+ die(_("unable to get tree for %s"),
+ oid_to_hex(&(*list)->object.oid));
+ hashwrite(f, tree->hash, hash_len);
parent = (*list)->parents;
@@ -1050,7 +1064,7 @@ static void close_reachable(struct write_commit_graph_context *ctx)
if (ctx->report_progress)
ctx->progress = start_delayed_progress(
_("Expanding reachable commits in commit graph"),
- ctx->oids.nr);
+ 0);
for (i = 0; i < ctx->oids.nr; i++) {
display_progress(ctx->progress, i + 1);
commit = lookup_commit(ctx->r, &ctx->oids.list[i]);
@@ -1279,7 +1293,6 @@ static uint32_t count_distinct_commits(struct write_commit_graph_context *ctx)
static void copy_oids_to_commits(struct write_commit_graph_context *ctx)
{
uint32_t i;
- struct commit_list *parent;
ctx->num_extra_edges = 0;
if (ctx->report_progress)
@@ -1287,7 +1300,8 @@ static void copy_oids_to_commits(struct write_commit_graph_context *ctx)
_("Finding extra edges in commit graph"),
ctx->oids.nr);
for (i = 0; i < ctx->oids.nr; i++) {
- int num_parents = 0;
+ unsigned int num_parents;
+
display_progress(ctx->progress, i + 1);
if (i > 0 && oideq(&ctx->oids.list[i - 1], &ctx->oids.list[i]))
continue;
@@ -1301,10 +1315,7 @@ static void copy_oids_to_commits(struct write_commit_graph_context *ctx)
parse_commit_no_graph(ctx->commits.list[ctx->commits.nr]);
- for (parent = ctx->commits.list[ctx->commits.nr]->parents;
- parent; parent = parent->next)
- num_parents++;
-
+ num_parents = commit_list_count(ctx->commits.list[ctx->commits.nr]->parents);
if (num_parents > 2)
ctx->num_extra_edges += num_parents - 1;
@@ -1522,8 +1533,8 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
static void split_graph_merge_strategy(struct write_commit_graph_context *ctx)
{
- struct commit_graph *g = ctx->r->objects->commit_graph;
- uint32_t num_commits = ctx->commits.nr;
+ struct commit_graph *g;
+ uint32_t num_commits;
uint32_t i;
int max_commits = 0;
@@ -1535,6 +1546,7 @@ static void split_graph_merge_strategy(struct write_commit_graph_context *ctx)
}
g = ctx->r->objects->commit_graph;
+ num_commits = ctx->commits.nr;
ctx->num_commit_graphs_after = ctx->num_commit_graphs_before + 1;
while (g && (g->num_commits <= size_mult * num_commits ||
@@ -1616,8 +1628,7 @@ static int commit_compare(const void *_a, const void *_b)
static void sort_and_scan_merged_commits(struct write_commit_graph_context *ctx)
{
- uint32_t i, num_parents;
- struct commit_list *parent;
+ uint32_t i;
if (ctx->report_progress)
ctx->progress = start_delayed_progress(
@@ -1635,10 +1646,9 @@ static void sort_and_scan_merged_commits(struct write_commit_graph_context *ctx)
die(_("unexpected duplicate commit id %s"),
oid_to_hex(&ctx->commits.list[i]->object.oid));
} else {
- num_parents = 0;
- for (parent = ctx->commits.list[i]->parents; parent; parent = parent->next)
- num_parents++;
+ unsigned int num_parents;
+ num_parents = commit_list_count(ctx->commits.list[i]->parents);
if (num_parents > 2)
ctx->num_extra_edges += num_parents - 1;
}
@@ -1992,8 +2002,10 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g, int flags)
if (verify_commit_graph_error & ~VERIFY_COMMIT_GRAPH_ERROR_HASH)
return verify_commit_graph_error;
- progress = start_progress(_("Verifying commits in commit graph"),
- g->num_commits);
+ if (flags & COMMIT_GRAPH_WRITE_PROGRESS)
+ progress = start_progress(_("Verifying commits in commit graph"),
+ g->num_commits);
+
for (i = 0; i < g->num_commits; i++) {
struct commit *graph_commit, *odb_commit;
struct commit_list *graph_parents, *odb_parents;
@@ -2101,3 +2113,8 @@ void free_commit_graph(struct commit_graph *g)
free(g->filename);
free(g);
}
+
+void disable_commit_graph(struct repository *r)
+{
+ r->commit_graph_disabled = 1;
+}
diff --git a/commit-graph.h b/commit-graph.h
index 486e64e..7f5c933 100644
--- a/commit-graph.h
+++ b/commit-graph.h
@@ -107,4 +107,10 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g, int flags)
void close_commit_graph(struct raw_object_store *);
void free_commit_graph(struct commit_graph *);
+/*
+ * Disable further use of the commit graph in this process when parsing a
+ * "struct commit".
+ */
+void disable_commit_graph(struct repository *r);
+
#endif
diff --git a/commit.c b/commit.c
index 3fe5f8f..40890ae 100644
--- a/commit.c
+++ b/commit.c
@@ -358,7 +358,8 @@ struct tree *repo_get_commit_tree(struct repository *r,
struct object_id *get_commit_tree_oid(const struct commit *commit)
{
- return &get_commit_tree(commit)->object.oid;
+ struct tree *tree = get_commit_tree(commit);
+ return tree ? &tree->object.oid : NULL;
}
void release_commit_memory(struct parsed_object_pool *pool, struct commit *c)
diff --git a/compat/mingw.c b/compat/mingw.c
index 7a0d619..6b765d9 100644
--- a/compat/mingw.c
+++ b/compat/mingw.c
@@ -1236,11 +1236,6 @@ static int wenvcmp(const void *a, const void *b)
return _wcsnicmp(p, q, p_len);
}
-/* We need a stable sort to convert the environment between UTF-16 <-> UTF-8 */
-#ifndef INTERNAL_QSORT
-#include "qsort.c"
-#endif
-
/*
* Build an environment block combining the inherited environment
* merged with the given list of settings.
@@ -1279,8 +1274,8 @@ static wchar_t *make_environment_block(char **deltaenv)
/*
* If there is a deltaenv, let's accumulate all keys into `array`,
- * sort them using the stable git_qsort() and then copy, skipping
- * duplicate keys
+ * sort them using the stable git_stable_qsort() and then copy,
+ * skipping duplicate keys
*/
for (p = wenv; p && *p; ) {
ALLOC_GROW(array, nr + 1, alloc);
@@ -1303,7 +1298,7 @@ static wchar_t *make_environment_block(char **deltaenv)
p += wlen + 1;
}
- git_qsort(array, nr, sizeof(*array), wenvcmp);
+ git_stable_qsort(array, nr, sizeof(*array), wenvcmp);
ALLOC_ARRAY(result, size + delta_size);
for (p = result, i = 0; i < nr; i++) {
@@ -1670,6 +1665,8 @@ char *mingw_getenv(const char *name)
if (!w_key)
die("Out of memory, (tried to allocate %u wchar_t's)", len_key);
xutftowcs(w_key, name, len_key);
+ /* GetEnvironmentVariableW() only sets the last error upon failure */
+ SetLastError(ERROR_SUCCESS);
len_value = GetEnvironmentVariableW(w_key, w_value, ARRAY_SIZE(w_value));
if (!len_value && GetLastError() == ERROR_ENVVAR_NOT_FOUND) {
free(w_key);
diff --git a/compat/vcbuild/scripts/clink.pl b/compat/vcbuild/scripts/clink.pl
index c7b021b..ec95a3b 100755
--- a/compat/vcbuild/scripts/clink.pl
+++ b/compat/vcbuild/scripts/clink.pl
@@ -68,8 +68,54 @@ while (@ARGV) {
} elsif ("$arg" =~ /^-L/ && "$arg" ne "-LTCG") {
$arg =~ s/^-L/-LIBPATH:/;
push(@lflags, $arg);
- } elsif ("$arg" =~ /^-R/) {
+ } elsif ("$arg" =~ /^-[Rl]/) {
# eat
+ } elsif ("$arg" eq "-Werror") {
+ push(@cflags, "-WX");
+ } elsif ("$arg" eq "-Wall") {
+ # cl.exe understands -Wall, but it is really overzealous
+ push(@cflags, "-W4");
+ # disable the "signed/unsigned mismatch" warnings; our source code violates that
+ push(@cflags, "-wd4018");
+ push(@cflags, "-wd4245");
+ push(@cflags, "-wd4389");
+ # disable the "unreferenced formal parameter" warning; our source code violates that
+ push(@cflags, "-wd4100");
+ # disable the "conditional expression is constant" warning; our source code violates that
+ push(@cflags, "-wd4127");
+ # disable the "const object should be initialized" warning; these warnings affect only objects that are `static`
+ push(@cflags, "-wd4132");
+ # disable the "function/data pointer conversion in expression" warning; our source code violates that
+ push(@cflags, "-wd4152");
+ # disable the "non-constant aggregate initializer" warning; our source code violates that
+ push(@cflags, "-wd4204");
+ # disable the "cannot be initialized using address of automatic variable" warning; our source code violates that
+ push(@cflags, "-wd4221");
+ # disable the "possible loss of data" warnings; our source code violates that
+ push(@cflags, "-wd4244");
+ push(@cflags, "-wd4267");
+ # disable the "array is too small to include a terminating null character" warning; we ab-use strings to initialize OIDs
+ push(@cflags, "-wd4295");
+ # disable the "'<<': result of 32-bit shift implicitly converted to 64 bits" warning; our source code violates that
+ push(@cflags, "-wd4334");
+ # disable the "declaration hides previous local declaration" warning; our source code violates that
+ push(@cflags, "-wd4456");
+ # disable the "declaration hides function parameter" warning; our source code violates that
+ push(@cflags, "-wd4457");
+ # disable the "declaration hides global declaration" warning; our source code violates that
+ push(@cflags, "-wd4459");
+ # disable the "potentially uninitialized local variable '<name>' used" warning; our source code violates that
+ push(@cflags, "-wd4701");
+ # disable the "unreachable code" warning; our source code violates that
+ push(@cflags, "-wd4702");
+ # disable the "potentially uninitialized local pointer variable used" warning; our source code violates that
+ push(@cflags, "-wd4703");
+ # disable the "assignment within conditional expression" warning; our source code violates that
+ push(@cflags, "-wd4706");
+ # disable the "'inet_ntoa': Use inet_ntop() or InetNtop() instead" warning; our source code violates that
+ push(@cflags, "-wd4996");
+ } elsif ("$arg" =~ /^-W[a-z]/) {
+ # let's ignore those
} else {
push(@args, $arg);
}
diff --git a/compat/win32/path-utils.h b/compat/win32/path-utils.h
index 0f70d43..8ed062a 100644
--- a/compat/win32/path-utils.h
+++ b/compat/win32/path-utils.h
@@ -1,3 +1,6 @@
+#ifndef WIN32_PATH_UTILS_H
+#define WIN32_PATH_UTILS_H
+
#define has_dos_drive_prefix(path) \
(isalpha(*(path)) && (path)[1] == ':' ? 2 : 0)
int win32_skip_dos_drive_prefix(char **path);
@@ -18,3 +21,5 @@ static inline char *win32_find_last_dir_sep(const char *path)
#define find_last_dir_sep win32_find_last_dir_sep
int win32_offset_1st_component(const char *path);
#define offset_1st_component win32_offset_1st_component
+
+#endif
diff --git a/compat/winansi.c b/compat/winansi.c
index cacd82c..54fd701 100644
--- a/compat/winansi.c
+++ b/compat/winansi.c
@@ -546,7 +546,7 @@ static HANDLE swap_osfhnd(int fd, HANDLE new_handle)
typedef struct _OBJECT_NAME_INFORMATION
{
UNICODE_STRING Name;
- WCHAR NameBuffer[0];
+ WCHAR NameBuffer[FLEX_ARRAY];
} OBJECT_NAME_INFORMATION, *POBJECT_NAME_INFORMATION;
#define ObjectNameInformation 1
diff --git a/config.c b/config.c
index 743e457..e7052b3 100644
--- a/config.c
+++ b/config.c
@@ -1204,7 +1204,7 @@ static int git_default_core_config(const char *var, const char *value, void *cb)
default_abbrev = -1;
else {
int abbrev = git_config_int(var, value);
- if (abbrev < minimum_abbrev || abbrev > 40)
+ if (abbrev < minimum_abbrev || abbrev > the_hash_algo->hexsz)
return error(_("abbrev length out of range: %d"), abbrev);
default_abbrev = abbrev;
}
@@ -1856,9 +1856,9 @@ static struct config_set_element *configset_find_element(struct config_set *cs,
if (git_config_parse_key(key, &normalized_key, NULL))
return NULL;
- hashmap_entry_init(&k, strhash(normalized_key));
+ hashmap_entry_init(&k.ent, strhash(normalized_key));
k.key = normalized_key;
- found_entry = hashmap_get(&cs->config_hash, &k, NULL);
+ found_entry = hashmap_get_entry(&cs->config_hash, &k, ent, NULL);
free(normalized_key);
return found_entry;
}
@@ -1877,10 +1877,10 @@ static int configset_add_value(struct config_set *cs, const char *key, const cha
*/
if (!e) {
e = xmalloc(sizeof(*e));
- hashmap_entry_init(e, strhash(key));
+ hashmap_entry_init(&e->ent, strhash(key));
e->key = xstrdup(key);
string_list_init(&e->value_list, 1);
- hashmap_add(&cs->config_hash, e);
+ hashmap_add(&cs->config_hash, &e->ent);
}
si = string_list_append_nodup(&e->value_list, xstrdup_or_null(value));
@@ -1908,12 +1908,14 @@ static int configset_add_value(struct config_set *cs, const char *key, const cha
}
static int config_set_element_cmp(const void *unused_cmp_data,
- const void *entry,
- const void *entry_or_key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *unused_keydata)
{
- const struct config_set_element *e1 = entry;
- const struct config_set_element *e2 = entry_or_key;
+ const struct config_set_element *e1, *e2;
+
+ e1 = container_of(eptr, const struct config_set_element, ent);
+ e2 = container_of(entry_or_key, const struct config_set_element, ent);
return strcmp(e1->key, e2->key);
}
@@ -1934,12 +1936,12 @@ void git_configset_clear(struct config_set *cs)
if (!cs->hash_initialized)
return;
- hashmap_iter_init(&cs->config_hash, &iter);
- while ((entry = hashmap_iter_next(&iter))) {
+ hashmap_for_each_entry(&cs->config_hash, &iter, entry,
+ ent /* member name */) {
free(entry->key);
string_list_clear(&entry->value_list, 1);
}
- hashmap_free(&cs->config_hash, 1);
+ hashmap_free_entries(&cs->config_hash, struct config_set_element, ent);
cs->hash_initialized = 0;
free(cs->list.items);
cs->list.nr = 0;
diff --git a/config.mak.uname b/config.mak.uname
index db7f06b..cc8efd9 100644
--- a/config.mak.uname
+++ b/config.mak.uname
@@ -703,20 +703,24 @@ vcxproj:
perl contrib/buildsystems/generate -g Vcxproj
git add -f git.sln {*,*/lib,t/helper/*}/*.vcxproj
- # Generate the LinkOrCopyBuiltins.targets file
+ # Generate the LinkOrCopyBuiltins.targets and LinkOrCopyRemoteHttp.targets file
(echo '<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">' && \
echo ' <Target Name="CopyBuiltins_AfterBuild" AfterTargets="AfterBuild">' && \
for name in $(BUILT_INS);\
do \
echo ' <Copy SourceFiles="$$(OutDir)\git.exe" DestinationFiles="$$(OutDir)\'"$$name"'" SkipUnchangedFiles="true" UseHardlinksIfPossible="true" />'; \
done && \
+ echo ' </Target>' && \
+ echo '</Project>') >git/LinkOrCopyBuiltins.targets
+ (echo '<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">' && \
+ echo ' <Target Name="CopyBuiltins_AfterBuild" AfterTargets="AfterBuild">' && \
for name in $(REMOTE_CURL_ALIASES); \
do \
echo ' <Copy SourceFiles="$$(OutDir)\'"$(REMOTE_CURL_PRIMARY)"'" DestinationFiles="$$(OutDir)\'"$$name"'" SkipUnchangedFiles="true" UseHardlinksIfPossible="true" />'; \
done && \
echo ' </Target>' && \
- echo '</Project>') >git/LinkOrCopyBuiltins.targets
- git add -f git/LinkOrCopyBuiltins.targets
+ echo '</Project>') >git-remote-http/LinkOrCopyRemoteHttp.targets
+ git add -f git/LinkOrCopyBuiltins.targets git-remote-http/LinkOrCopyRemoteHttp.targets
# Add command-list.h
$(MAKE) MSVC=1 SKIP_VCPKG=1 prefix=/mingw64 command-list.h
@@ -724,11 +728,10 @@ vcxproj:
# Add scripts
rm -f perl/perl.mak
- $(MAKE) MSVC=1 SKIP_VCPKG=1 prefix=/mingw64 \
- $(SCRIPT_LIB) $(SCRIPT_SH_GEN) $(SCRIPT_PERL_GEN)
+ $(MAKE) MSVC=1 SKIP_VCPKG=1 prefix=/mingw64 $(SCRIPT_LIB) $(SCRIPTS)
# Strip out the sane tool path, needed only for building
sed -i '/^git_broken_path_fix ".*/d' git-sh-setup
- git add -f $(SCRIPT_LIB) $(SCRIPT_SH_GEN) $(SCRIPT_PERL_GEN)
+ git add -f $(SCRIPT_LIB) $(SCRIPTS)
# Add Perl module
$(MAKE) $(LIB_PERL_GEN)
@@ -758,6 +761,10 @@ vcxproj:
$(MAKE) -C templates
git add -f templates/boilerplates.made templates/blt/
+ # Add the translated messages
+ make MSVC=1 SKIP_VCPKG=1 prefix=/mingw64 $(MOFILES)
+ git add -f $(MOFILES)
+
# Add build options
$(MAKE) MSVC=1 SKIP_VCPKG=1 prefix=/mingw64 GIT-BUILD-OPTIONS
git add -f GIT-BUILD-OPTIONS
diff --git a/connected.c b/connected.c
index 971db00..36c4e5d 100644
--- a/connected.c
+++ b/connected.c
@@ -29,6 +29,7 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
struct packed_git *new_pack = NULL;
struct transport *transport;
size_t base_len;
+ const unsigned hexsz = the_hash_algo->hexsz;
if (!opt)
opt = &defaults;
@@ -100,7 +101,7 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
sigchain_push(SIGPIPE, SIG_IGN);
- commit[GIT_SHA1_HEXSZ] = '\n';
+ commit[hexsz] = '\n';
do {
/*
* If index-pack already checked that:
@@ -113,8 +114,8 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
if (new_pack && find_pack_entry_one(oid.hash, new_pack))
continue;
- memcpy(commit, oid_to_hex(&oid), GIT_SHA1_HEXSZ);
- if (write_in_full(rev_list.in, commit, GIT_SHA1_HEXSZ + 1) < 0) {
+ memcpy(commit, oid_to_hex(&oid), hexsz);
+ if (write_in_full(rev_list.in, commit, hexsz + 1) < 0) {
if (errno != EPIPE && errno != EINVAL)
error_errno(_("failed write to rev-list"));
err = -1;
diff --git a/contrib/buildsystems/Generators/Vcxproj.pm b/contrib/buildsystems/Generators/Vcxproj.pm
index 576ccab..5c666f9 100644
--- a/contrib/buildsystems/Generators/Vcxproj.pm
+++ b/contrib/buildsystems/Generators/Vcxproj.pm
@@ -79,7 +79,8 @@ sub createProject {
if (!$static_library) {
$libs_release = join(";", sort(grep /^(?!libgit\.lib|xdiff\/lib\.lib|vcs-svn\/lib\.lib)/, @{$$build_structure{"$prefix${name}_LIBS"}}));
$libs_debug = $libs_release;
- $libs_debug =~ s/zlib\.lib/zlibd\.lib/;
+ $libs_debug =~ s/zlib\.lib/zlibd\.lib/g;
+ $libs_debug =~ s/libcurl\.lib/libcurl-d\.lib/g;
}
$defines =~ s/-D//g;
@@ -119,13 +120,13 @@ sub createProject {
<VCPKGArch Condition="'\$(Platform)'=='Win32'">x86-windows</VCPKGArch>
<VCPKGArch Condition="'\$(Platform)'!='Win32'">x64-windows</VCPKGArch>
<VCPKGArchDirectory>$cdup\\compat\\vcbuild\\vcpkg\\installed\\\$(VCPKGArch)</VCPKGArchDirectory>
- <VCPKGBinDirectory Condition="'\(Configuration)'=='Debug'">\$(VCPKGArchDirectory)\\debug\\bin</VCPKGBinDirectory>
- <VCPKGLibDirectory Condition="'\(Configuration)'=='Debug'">\$(VCPKGArchDirectory)\\debug\\lib</VCPKGLibDirectory>
- <VCPKGBinDirectory Condition="'\(Configuration)'!='Debug'">\$(VCPKGArchDirectory)\\bin</VCPKGBinDirectory>
- <VCPKGLibDirectory Condition="'\(Configuration)'!='Debug'">\$(VCPKGArchDirectory)\\lib</VCPKGLibDirectory>
+ <VCPKGBinDirectory Condition="'\$(Configuration)'=='Debug'">\$(VCPKGArchDirectory)\\debug\\bin</VCPKGBinDirectory>
+ <VCPKGLibDirectory Condition="'\$(Configuration)'=='Debug'">\$(VCPKGArchDirectory)\\debug\\lib</VCPKGLibDirectory>
+ <VCPKGBinDirectory Condition="'\$(Configuration)'!='Debug'">\$(VCPKGArchDirectory)\\bin</VCPKGBinDirectory>
+ <VCPKGLibDirectory Condition="'\$(Configuration)'!='Debug'">\$(VCPKGArchDirectory)\\lib</VCPKGLibDirectory>
<VCPKGIncludeDirectory>\$(VCPKGArchDirectory)\\include</VCPKGIncludeDirectory>
- <VCPKGLibs Condition="'\(Configuration)'=='Debug'">$libs_debug</VCPKGLibs>
- <VCPKGLibs Condition="'\(Configuration)'!='Debug'">$libs_release</VCPKGLibs>
+ <VCPKGLibs Condition="'\$(Configuration)'=='Debug'">$libs_debug</VCPKGLibs>
+ <VCPKGLibs Condition="'\$(Configuration)'!='Debug'">$libs_release</VCPKGLibs>
</PropertyGroup>
<Import Project="\$(VCTargetsPath)\\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'\$(Configuration)'=='Debug'" Label="Configuration">
@@ -277,6 +278,9 @@ EOM
if ($target eq 'git') {
print F " <Import Project=\"LinkOrCopyBuiltins.targets\" />\n";
}
+ if ($target eq 'git-remote-http') {
+ print F " <Import Project=\"LinkOrCopyRemoteHttp.targets\" />\n";
+ }
print F << "EOM";
</Project>
EOM
diff --git a/contrib/coccinelle/hashmap.cocci b/contrib/coccinelle/hashmap.cocci
new file mode 100644
index 0000000..d69e120
--- /dev/null
+++ b/contrib/coccinelle/hashmap.cocci
@@ -0,0 +1,16 @@
+@ hashmap_entry_init_usage @
+expression E;
+struct hashmap_entry HME;
+@@
+- HME.hash = E;
++ hashmap_entry_init(&HME, E);
+
+@@
+identifier f !~ "^hashmap_entry_init$";
+expression E;
+struct hashmap_entry *HMEP;
+@@
+ f(...) {<...
+- HMEP->hash = E;
++ hashmap_entry_init(HMEP, E);
+ ...>}
diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash
index 59cd3e8..00fbe6c 100644
--- a/contrib/completion/git-completion.bash
+++ b/contrib/completion/git-completion.bash
@@ -1250,10 +1250,7 @@ _git_archive ()
return
;;
--*)
- __gitcomp "
- --format= --list --verbose
- --prefix= --remote= --exec= --output
- "
+ __gitcomp_builtin archive "--format= --list --verbose --prefix= --worktree-attributes"
return
;;
esac
@@ -1489,6 +1486,8 @@ __git_diff_common_options="--stat --numstat --shortstat --summary
--dirstat-by-file= --cumulative
--diff-algorithm=
--submodule --submodule= --ignore-submodules
+ --indent-heuristic --no-indent-heuristic
+ --textconv --no-textconv
"
_git_diff ()
@@ -1797,6 +1796,10 @@ _git_log ()
__gitcomp "$__git_diff_submodule_formats" "" "${cur##--submodule=}"
return
;;
+ --no-walk=*)
+ __gitcomp "sorted unsorted" "" "${cur##--no-walk=}"
+ return
+ ;;
--*)
__gitcomp "
$__git_log_common_options
@@ -1804,16 +1807,19 @@ _git_log ()
$__git_log_gitk_options
--root --topo-order --date-order --reverse
--follow --full-diff
- --abbrev-commit --abbrev=
+ --abbrev-commit --no-abbrev-commit --abbrev=
--relative-date --date=
--pretty= --format= --oneline
--show-signature
--cherry-mark
--cherry-pick
--graph
- --decorate --decorate=
+ --decorate --decorate= --no-decorate
--walk-reflogs
+ --no-walk --no-walk= --do-walk
--parents --children
+ --expand-tabs --expand-tabs= --no-expand-tabs
+ --patch
$merge
$__git_diff_common_options
--pickaxe-all --pickaxe-regex
@@ -2017,15 +2023,18 @@ _git_range_diff ()
__git_complete_revlist
}
+__git_rebase_inprogress_options="--continue --skip --abort --quit --show-current-patch"
+__git_rebase_interactive_inprogress_options="$__git_rebase_inprogress_options --edit-todo"
+
_git_rebase ()
{
__git_find_repo_path
if [ -f "$__git_repo_path"/rebase-merge/interactive ]; then
- __gitcomp "--continue --skip --abort --quit --edit-todo --show-current-patch"
+ __gitcomp "$__git_rebase_interactive_inprogress_options"
return
elif [ -d "$__git_repo_path"/rebase-apply ] || \
[ -d "$__git_repo_path"/rebase-merge ]; then
- __gitcomp "--continue --skip --abort --quit --show-current-patch"
+ __gitcomp "$__git_rebase_inprogress_options"
return
fi
__git_complete_strategy && return
@@ -2035,19 +2044,8 @@ _git_rebase ()
return
;;
--*)
- __gitcomp "
- --onto --merge --strategy --interactive
- --rebase-merges --preserve-merges --stat --no-stat
- --committer-date-is-author-date --ignore-date
- --ignore-whitespace --whitespace=
- --autosquash --no-autosquash
- --fork-point --no-fork-point
- --autostash --no-autostash
- --verify --no-verify --keep-base
- --keep-empty --root --force-rebase --no-ff
- --rerere-autoupdate
- --exec
- "
+ __gitcomp_builtin rebase "" \
+ "$__git_rebase_interactive_inprogress_options"
return
esac
@@ -2694,8 +2692,9 @@ _git_show ()
return
;;
--*)
- __gitcomp "--pretty= --format= --abbrev-commit --oneline
- --show-signature
+ __gitcomp "--pretty= --format= --abbrev-commit --no-abbrev-commit
+ --oneline --show-signature --patch
+ --expand-tabs --expand-tabs= --no-expand-tabs
$__git_diff_common_options
"
return
diff --git a/contrib/hg-to-git/hg-to-git.py b/contrib/hg-to-git/hg-to-git.py
index de3f816..7eb1b24 100755
--- a/contrib/hg-to-git/hg-to-git.py
+++ b/contrib/hg-to-git/hg-to-git.py
@@ -42,7 +42,7 @@ hgnewcsets = 0
def usage():
- print """\
+ print("""\
%s: [OPTIONS] <hgprj>
options:
@@ -54,7 +54,7 @@ options:
required:
hgprj: name of the HG project to import (directory)
-""" % sys.argv[0]
+""" % sys.argv[0])
#------------------------------------------------------------------------------
@@ -104,22 +104,22 @@ os.chdir(hgprj)
if state:
if os.path.exists(state):
if verbose:
- print 'State does exist, reading'
+ print('State does exist, reading')
f = open(state, 'r')
hgvers = pickle.load(f)
else:
- print 'State does not exist, first run'
+ print('State does not exist, first run')
sock = os.popen('hg tip --template "{rev}"')
tip = sock.read()
if sock.close():
sys.exit(1)
if verbose:
- print 'tip is', tip
+ print('tip is', tip)
# Calculate the branches
if verbose:
- print 'analysing the branches...'
+ print('analysing the branches...')
hgchildren["0"] = ()
hgparents["0"] = (None, None)
hgbranch["0"] = "master"
@@ -154,15 +154,15 @@ for cset in range(1, int(tip) + 1):
else:
hgbranch[str(cset)] = "branch-" + str(cset)
-if not hgvers.has_key("0"):
- print 'creating repository'
+if "0" not in hgvers:
+ print('creating repository')
os.system('git init')
# loop through every hg changeset
for cset in range(int(tip) + 1):
# incremental, already seen
- if hgvers.has_key(str(cset)):
+ if str(cset) in hgvers:
continue
hgnewcsets += 1
@@ -180,27 +180,27 @@ for cset in range(int(tip) + 1):
os.write(fdcomment, csetcomment)
os.close(fdcomment)
- print '-----------------------------------------'
- print 'cset:', cset
- print 'branch:', hgbranch[str(cset)]
- print 'user:', user
- print 'date:', date
- print 'comment:', csetcomment
+ print('-----------------------------------------')
+ print('cset:', cset)
+ print('branch:', hgbranch[str(cset)])
+ print('user:', user)
+ print('date:', date)
+ print('comment:', csetcomment)
if parent:
- print 'parent:', parent
+ print('parent:', parent)
if mparent:
- print 'mparent:', mparent
+ print('mparent:', mparent)
if tag:
- print 'tag:', tag
- print '-----------------------------------------'
+ print('tag:', tag)
+ print('-----------------------------------------')
# checkout the parent if necessary
if cset != 0:
if hgbranch[str(cset)] == "branch-" + str(cset):
- print 'creating new branch', hgbranch[str(cset)]
+ print('creating new branch', hgbranch[str(cset)])
os.system('git checkout -b %s %s' % (hgbranch[str(cset)], hgvers[parent]))
else:
- print 'checking out branch', hgbranch[str(cset)]
+ print('checking out branch', hgbranch[str(cset)])
os.system('git checkout %s' % hgbranch[str(cset)])
# merge
@@ -209,7 +209,7 @@ for cset in range(int(tip) + 1):
otherbranch = hgbranch[mparent]
else:
otherbranch = hgbranch[parent]
- print 'merging', otherbranch, 'into', hgbranch[str(cset)]
+ print('merging', otherbranch, 'into', hgbranch[str(cset)])
os.system(getgitenv(user, date) + 'git merge --no-commit -s ours "" %s %s' % (hgbranch[str(cset)], otherbranch))
# remove everything except .git and .hg directories
@@ -233,12 +233,12 @@ for cset in range(int(tip) + 1):
# delete branch if not used anymore...
if mparent and len(hgchildren[str(cset)]):
- print "Deleting unused branch:", otherbranch
+ print("Deleting unused branch:", otherbranch)
os.system('git branch -d %s' % otherbranch)
# retrieve and record the version
vvv = os.popen('git show --quiet --pretty=format:%H').read()
- print 'record', cset, '->', vvv
+ print('record', cset, '->', vvv)
hgvers[str(cset)] = vvv
if hgnewcsets >= opt_nrepack and opt_nrepack != -1:
@@ -247,7 +247,7 @@ if hgnewcsets >= opt_nrepack and opt_nrepack != -1:
# write the state for incrementals
if state:
if verbose:
- print 'Writing state'
+ print('Writing state')
f = open(state, 'w')
pickle.dump(hgvers, f)
diff --git a/contrib/svn-fe/svnrdump_sim.py b/contrib/svn-fe/svnrdump_sim.py
index 11ac6f6..50c6a4f 100755
--- a/contrib/svn-fe/svnrdump_sim.py
+++ b/contrib/svn-fe/svnrdump_sim.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
"""
Simulates svnrdump by replaying an existing dump from a file, taking care
of the specified revision range.
diff --git a/convert.c b/convert.c
index deb6f71..25ac525 100644
--- a/convert.c
+++ b/convert.c
@@ -290,8 +290,8 @@ static int validate_encoding(const char *path, const char *enc,
const char *stripped = NULL;
char *upper = xstrdup_toupper(enc);
upper[strlen(upper)-2] = '\0';
- if (!skip_prefix(upper, "UTF-", &stripped))
- skip_prefix(stripped, "UTF", &stripped);
+ if (skip_prefix(upper, "UTF", &stripped))
+ skip_prefix(stripped, "-", &stripped);
advise(advise_msg, path, stripped);
free(upper);
if (die_on_error)
@@ -310,8 +310,8 @@ static int validate_encoding(const char *path, const char *enc,
"working-tree-encoding.");
const char *stripped = NULL;
char *upper = xstrdup_toupper(enc);
- if (!skip_prefix(upper, "UTF-", &stripped))
- skip_prefix(stripped, "UTF", &stripped);
+ if (skip_prefix(upper, "UTF", &stripped))
+ skip_prefix(stripped, "-", &stripped);
advise(advise_msg, path, stripped, stripped);
free(upper);
if (die_on_error)
diff --git a/date.c b/date.c
index 8126146..041db7d 100644
--- a/date.c
+++ b/date.c
@@ -128,16 +128,17 @@ static void get_time(struct timeval *now)
gettimeofday(now, NULL);
}
-void show_date_relative(timestamp_t time,
- const struct timeval *now,
- struct strbuf *timebuf)
+void show_date_relative(timestamp_t time, struct strbuf *timebuf)
{
+ struct timeval now;
timestamp_t diff;
- if (now->tv_sec < time) {
+
+ get_time(&now);
+ if (now.tv_sec < time) {
strbuf_addstr(timebuf, _("in the future"));
return;
}
- diff = now->tv_sec - time;
+ diff = now.tv_sec - time;
if (diff < 90) {
strbuf_addf(timebuf,
Q_("%"PRItime" second ago", "%"PRItime" seconds ago", diff), diff);
@@ -240,9 +241,7 @@ static void show_date_normal(struct strbuf *buf, timestamp_t time, struct tm *tm
/* Show "today" times as just relative times */
if (hide.wday) {
- struct timeval now;
- get_time(&now);
- show_date_relative(time, &now, buf);
+ show_date_relative(time, buf);
return;
}
@@ -313,11 +312,8 @@ const char *show_date(timestamp_t time, int tz, const struct date_mode *mode)
}
if (mode->type == DATE_RELATIVE) {
- struct timeval now;
-
strbuf_reset(&timebuf);
- get_time(&now);
- show_date_relative(time, &now, &timebuf);
+ show_date_relative(time, &timebuf);
return timebuf.buf;
}
@@ -1288,15 +1284,18 @@ static timestamp_t approxidate_str(const char *date,
return (timestamp_t)update_tm(&tm, &now, 0);
}
-timestamp_t approxidate_relative(const char *date, const struct timeval *tv)
+timestamp_t approxidate_relative(const char *date)
{
+ struct timeval tv;
timestamp_t timestamp;
int offset;
int errors = 0;
if (!parse_date_basic(date, &timestamp, &offset))
return timestamp;
- return approxidate_str(date, tv, &errors);
+
+ get_time(&tv);
+ return approxidate_str(date, (const struct timeval *) &tv, &errors);
}
timestamp_t approxidate_careful(const char *date, int *error_ret)
diff --git a/diff.c b/diff.c
index 6db6927..afe4400 100644
--- a/diff.c
+++ b/diff.c
@@ -933,16 +933,18 @@ static int cmp_in_block_with_wsd(const struct diff_options *o,
}
static int moved_entry_cmp(const void *hashmap_cmp_fn_data,
- const void *entry,
- const void *entry_or_key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *keydata)
{
const struct diff_options *diffopt = hashmap_cmp_fn_data;
- const struct moved_entry *a = entry;
- const struct moved_entry *b = entry_or_key;
+ const struct moved_entry *a, *b;
unsigned flags = diffopt->color_moved_ws_handling
& XDF_WHITESPACE_FLAGS;
+ a = container_of(eptr, const struct moved_entry, ent);
+ b = container_of(entry_or_key, const struct moved_entry, ent);
+
if (diffopt->color_moved_ws_handling &
COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE)
/*
@@ -964,8 +966,9 @@ static struct moved_entry *prepare_entry(struct diff_options *o,
struct moved_entry *ret = xmalloc(sizeof(*ret));
struct emitted_diff_symbol *l = &o->emitted_symbols->buf[line_no];
unsigned flags = o->color_moved_ws_handling & XDF_WHITESPACE_FLAGS;
+ unsigned int hash = xdiff_hash_string(l->line, l->len, flags);
- ret->ent.hash = xdiff_hash_string(l->line, l->len, flags);
+ hashmap_entry_init(&ret->ent, hash);
ret->es = l;
ret->next_line = NULL;
@@ -1002,7 +1005,7 @@ static void add_lines_to_move_detection(struct diff_options *o,
if (prev_line && prev_line->es->s == o->emitted_symbols->buf[n].s)
prev_line->next_line = key;
- hashmap_add(hm, key);
+ hashmap_add(hm, &key->ent);
prev_line = key;
}
}
@@ -1018,7 +1021,7 @@ static void pmb_advance_or_null(struct diff_options *o,
struct moved_entry *prev = pmb[i].match;
struct moved_entry *cur = (prev && prev->next_line) ?
prev->next_line : NULL;
- if (cur && !hm->cmpfn(o, cur, match, NULL)) {
+ if (cur && !hm->cmpfn(o, &cur->ent, &match->ent, NULL)) {
pmb[i].match = cur;
} else {
pmb[i].match = NULL;
@@ -1035,7 +1038,7 @@ static void pmb_advance_or_null_multi_match(struct diff_options *o,
int i;
char *got_match = xcalloc(1, pmb_nr);
- for (; match; match = hashmap_get_next(hm, match)) {
+ hashmap_for_each_entry_from(hm, match, ent) {
for (i = 0; i < pmb_nr; i++) {
struct moved_entry *prev = pmb[i].match;
struct moved_entry *cur = (prev && prev->next_line) ?
@@ -1143,13 +1146,13 @@ static void mark_color_as_moved(struct diff_options *o,
case DIFF_SYMBOL_PLUS:
hm = del_lines;
key = prepare_entry(o, n);
- match = hashmap_get(hm, key, NULL);
+ match = hashmap_get_entry(hm, key, ent, NULL);
free(key);
break;
case DIFF_SYMBOL_MINUS:
hm = add_lines;
key = prepare_entry(o, n);
- match = hashmap_get(hm, key, NULL);
+ match = hashmap_get_entry(hm, key, ent, NULL);
free(key);
break;
default:
@@ -1188,7 +1191,7 @@ static void mark_color_as_moved(struct diff_options *o,
* The current line is the start of a new block.
* Setup the set of potential blocks.
*/
- for (; match; match = hashmap_get_next(hm, match)) {
+ hashmap_for_each_entry_from(hm, match, ent) {
ALLOC_GROW(pmb, pmb_nr + 1, pmb_alloc);
if (o->color_moved_ws_handling &
COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE) {
@@ -5978,7 +5981,7 @@ static void diff_summary(struct diff_options *opt, struct diff_filepair *p)
}
struct patch_id_t {
- git_SHA_CTX *ctx;
+ git_hash_ctx *ctx;
int patchlen;
};
@@ -5995,16 +5998,16 @@ static int remove_space(char *line, int len)
return dst - line;
}
-void flush_one_hunk(struct object_id *result, git_SHA_CTX *ctx)
+void flush_one_hunk(struct object_id *result, git_hash_ctx *ctx)
{
unsigned char hash[GIT_MAX_RAWSZ];
unsigned short carry = 0;
int i;
- git_SHA1_Final(hash, ctx);
- git_SHA1_Init(ctx);
+ the_hash_algo->final_fn(hash, ctx);
+ the_hash_algo->init_fn(ctx);
/* 20-byte sum, with carry */
- for (i = 0; i < GIT_SHA1_RAWSZ; ++i) {
+ for (i = 0; i < the_hash_algo->rawsz; ++i) {
carry += result->hash[i] + hash[i];
result->hash[i] = carry;
carry >>= 8;
@@ -6018,21 +6021,21 @@ static void patch_id_consume(void *priv, char *line, unsigned long len)
new_len = remove_space(line, len);
- git_SHA1_Update(data->ctx, line, new_len);
+ the_hash_algo->update_fn(data->ctx, line, new_len);
data->patchlen += new_len;
}
-static void patch_id_add_string(git_SHA_CTX *ctx, const char *str)
+static void patch_id_add_string(git_hash_ctx *ctx, const char *str)
{
- git_SHA1_Update(ctx, str, strlen(str));
+ the_hash_algo->update_fn(ctx, str, strlen(str));
}
-static void patch_id_add_mode(git_SHA_CTX *ctx, unsigned mode)
+static void patch_id_add_mode(git_hash_ctx *ctx, unsigned mode)
{
/* large enough for 2^32 in octal */
char buf[12];
int len = xsnprintf(buf, sizeof(buf), "%06o", mode);
- git_SHA1_Update(ctx, buf, len);
+ the_hash_algo->update_fn(ctx, buf, len);
}
/* returns 0 upon success, and writes result into oid */
@@ -6040,10 +6043,10 @@ static int diff_get_patch_id(struct diff_options *options, struct object_id *oid
{
struct diff_queue_struct *q = &diff_queued_diff;
int i;
- git_SHA_CTX ctx;
+ git_hash_ctx ctx;
struct patch_id_t data;
- git_SHA1_Init(&ctx);
+ the_hash_algo->init_fn(&ctx);
memset(&data, 0, sizeof(struct patch_id_t));
data.ctx = &ctx;
oidclr(oid);
@@ -6076,27 +6079,27 @@ static int diff_get_patch_id(struct diff_options *options, struct object_id *oid
len2 = remove_space(p->two->path, strlen(p->two->path));
patch_id_add_string(&ctx, "diff--git");
patch_id_add_string(&ctx, "a/");
- git_SHA1_Update(&ctx, p->one->path, len1);
+ the_hash_algo->update_fn(&ctx, p->one->path, len1);
patch_id_add_string(&ctx, "b/");
- git_SHA1_Update(&ctx, p->two->path, len2);
+ the_hash_algo->update_fn(&ctx, p->two->path, len2);
if (p->one->mode == 0) {
patch_id_add_string(&ctx, "newfilemode");
patch_id_add_mode(&ctx, p->two->mode);
patch_id_add_string(&ctx, "---/dev/null");
patch_id_add_string(&ctx, "+++b/");
- git_SHA1_Update(&ctx, p->two->path, len2);
+ the_hash_algo->update_fn(&ctx, p->two->path, len2);
} else if (p->two->mode == 0) {
patch_id_add_string(&ctx, "deletedfilemode");
patch_id_add_mode(&ctx, p->one->mode);
patch_id_add_string(&ctx, "---a/");
- git_SHA1_Update(&ctx, p->one->path, len1);
+ the_hash_algo->update_fn(&ctx, p->one->path, len1);
patch_id_add_string(&ctx, "+++/dev/null");
} else {
patch_id_add_string(&ctx, "---a/");
- git_SHA1_Update(&ctx, p->one->path, len1);
+ the_hash_algo->update_fn(&ctx, p->one->path, len1);
patch_id_add_string(&ctx, "+++b/");
- git_SHA1_Update(&ctx, p->two->path, len2);
+ the_hash_algo->update_fn(&ctx, p->two->path, len2);
}
if (diff_header_only)
@@ -6108,10 +6111,10 @@ static int diff_get_patch_id(struct diff_options *options, struct object_id *oid
if (diff_filespec_is_binary(options->repo, p->one) ||
diff_filespec_is_binary(options->repo, p->two)) {
- git_SHA1_Update(&ctx, oid_to_hex(&p->one->oid),
- GIT_SHA1_HEXSZ);
- git_SHA1_Update(&ctx, oid_to_hex(&p->two->oid),
- GIT_SHA1_HEXSZ);
+ the_hash_algo->update_fn(&ctx, oid_to_hex(&p->one->oid),
+ the_hash_algo->hexsz);
+ the_hash_algo->update_fn(&ctx, oid_to_hex(&p->two->oid),
+ the_hash_algo->hexsz);
continue;
}
@@ -6128,7 +6131,7 @@ static int diff_get_patch_id(struct diff_options *options, struct object_id *oid
}
if (!stable)
- git_SHA1_Final(oid->hash, &ctx);
+ the_hash_algo->final_fn(oid->hash, &ctx);
return 0;
}
@@ -6230,8 +6233,10 @@ static void diff_flush_patch_all_file_pairs(struct diff_options *o)
if (o->color_moved == COLOR_MOVED_ZEBRA_DIM)
dim_moved_lines(o);
- hashmap_free(&add_lines, 1);
- hashmap_free(&del_lines, 1);
+ hashmap_free_entries(&add_lines, struct moved_entry,
+ ent);
+ hashmap_free_entries(&del_lines, struct moved_entry,
+ ent);
}
for (i = 0; i < esm.nr; i++)
diff --git a/diff.h b/diff.h
index c2c3056..7f8f024 100644
--- a/diff.h
+++ b/diff.h
@@ -438,7 +438,7 @@ int run_diff_index(struct rev_info *revs, int cached);
int do_diff_cache(const struct object_id *, struct diff_options *);
int diff_flush_patch_id(struct diff_options *, struct object_id *, int, int);
-void flush_one_hunk(struct object_id *, git_SHA_CTX *);
+void flush_one_hunk(struct object_id *result, git_hash_ctx *ctx);
int diff_result_code(struct diff_options *, int);
diff --git a/diffcore-break.c b/diffcore-break.c
index 875aefd..9d20a6a 100644
--- a/diffcore-break.c
+++ b/diffcore-break.c
@@ -286,17 +286,17 @@ void diffcore_merge_broken(void)
/* Peer survived. Merge them */
merge_broken(p, pp, &outq);
q->queue[j] = NULL;
- break;
+ goto next;
}
}
- if (q->nr <= j)
- /* The peer did not survive, so we keep
- * it in the output.
- */
- diff_q(&outq, p);
+ /* The peer did not survive, so we keep
+ * it in the output.
+ */
+ diff_q(&outq, p);
}
else
diff_q(&outq, p);
+next:;
}
free(q->queue);
*q = outq;
diff --git a/diffcore-rename.c b/diffcore-rename.c
index 9624864..531d7ad 100644
--- a/diffcore-rename.c
+++ b/diffcore-rename.c
@@ -274,18 +274,17 @@ static int find_identical_files(struct hashmap *srcs,
struct diff_options *options)
{
int renames = 0;
-
struct diff_filespec *target = rename_dst[dst_index].two;
struct file_similarity *p, *best = NULL;
int i = 100, best_score = -1;
+ unsigned int hash = hash_filespec(options->repo, target);
/*
* Find the best source match for specified destination.
*/
- p = hashmap_get_from_hash(srcs,
- hash_filespec(options->repo, target),
- NULL);
- for (; p; p = hashmap_get_next(srcs, p)) {
+ p = hashmap_get_entry_from_hash(srcs, hash, NULL,
+ struct file_similarity, entry);
+ hashmap_for_each_entry_from(srcs, p, entry) {
int score;
struct diff_filespec *source = p->filespec;
@@ -329,8 +328,8 @@ static void insert_file_table(struct repository *r,
entry->index = index;
entry->filespec = filespec;
- hashmap_entry_init(entry, hash_filespec(r, filespec));
- hashmap_add(table, entry);
+ hashmap_entry_init(&entry->entry, hash_filespec(r, filespec));
+ hashmap_add(table, &entry->entry);
}
/*
@@ -359,7 +358,7 @@ static int find_exact_renames(struct diff_options *options)
renames += find_identical_files(&file_table, i, options);
/* Free the hash data structure and entries */
- hashmap_free(&file_table, 1);
+ hashmap_free_entries(&file_table, struct file_similarity, entry);
return renames;
}
@@ -585,7 +584,7 @@ void diffcore_rename(struct diff_options *options)
stop_progress(&progress);
/* cost matrix sorted by most to least similar pair */
- QSORT(mx, dst_cnt * NUM_CANDIDATE_PER_DST, score_compare);
+ STABLE_QSORT(mx, dst_cnt * NUM_CANDIDATE_PER_DST, score_compare);
rename_count += find_renames(mx, dst_cnt, minimum_score, 0);
if (detect_rename == DIFF_DETECT_COPY)
diff --git a/dir.c b/dir.c
index cab9c2a..61f559f9 100644
--- a/dir.c
+++ b/dir.c
@@ -139,7 +139,7 @@ static size_t common_prefix_len(const struct pathspec *pathspec)
* ":(icase)path" is treated as a pathspec full of
* wildcard. In other words, only prefix is considered common
* prefix. If the pathspec is abc/foo abc/bar, running in
- * subdir xyz, the common prefix is still xyz, not xuz/abc as
+ * subdir xyz, the common prefix is still xyz, not xyz/abc as
* in non-:(icase).
*/
GUARD_PATHSPEC(pathspec,
@@ -273,19 +273,30 @@ static int do_read_blob(const struct object_id *oid, struct oid_stat *oid_stat,
#define DO_MATCH_EXCLUDE (1<<0)
#define DO_MATCH_DIRECTORY (1<<1)
-#define DO_MATCH_SUBMODULE (1<<2)
+#define DO_MATCH_LEADING_PATHSPEC (1<<2)
/*
- * Does 'match' match the given name?
- * A match is found if
+ * Does the given pathspec match the given name? A match is found if
*
- * (1) the 'match' string is leading directory of 'name', or
- * (2) the 'match' string is a wildcard and matches 'name', or
- * (3) the 'match' string is exactly the same as 'name'.
+ * (1) the pathspec string is leading directory of 'name' ("RECURSIVELY"), or
+ * (2) the pathspec string has a leading part matching 'name' ("LEADING"), or
+ * (3) the pathspec string is a wildcard and matches 'name' ("WILDCARD"), or
+ * (4) the pathspec string is exactly the same as 'name' ("EXACT").
*
- * and the return value tells which case it was.
+ * Return value tells which case it was (1-4), or 0 when there is no match.
*
- * It returns 0 when there is no match.
+ * It may be instructive to look at a small table of concrete examples
+ * to understand the differences between 1, 2, and 4:
+ *
+ * Pathspecs
+ * | a/b | a/b/ | a/b/c
+ * ------+-----------+-----------+------------
+ * a/b | EXACT | EXACT[1] | LEADING[2]
+ * Names a/b/ | RECURSIVE | EXACT | LEADING[2]
+ * a/b/c | RECURSIVE | RECURSIVE | EXACT
+ *
+ * [1] Only if DO_MATCH_DIRECTORY is passed; otherwise, this is NOT a match.
+ * [2] Only if DO_MATCH_LEADING_PATHSPEC is passed; otherwise, not a match.
*/
static int match_pathspec_item(const struct index_state *istate,
const struct pathspec_item *item, int prefix,
@@ -353,13 +364,14 @@ static int match_pathspec_item(const struct index_state *istate,
item->nowildcard_len - prefix))
return MATCHED_FNMATCH;
- /* Perform checks to see if "name" is a super set of the pathspec */
- if (flags & DO_MATCH_SUBMODULE) {
+ /* Perform checks to see if "name" is a leading string of the pathspec */
+ if (flags & DO_MATCH_LEADING_PATHSPEC) {
/* name is a literal prefix of the pathspec */
+ int offset = name[namelen-1] == '/' ? 1 : 0;
if ((namelen < matchlen) &&
- (match[namelen] == '/') &&
+ (match[namelen-offset] == '/') &&
!ps_strncmp(item, match, name, namelen))
- return MATCHED_RECURSIVELY;
+ return MATCHED_RECURSIVELY_LEADING_PATHSPEC;
/* name" doesn't match up to the first wild character */
if (item->nowildcard_len < item->len &&
@@ -376,7 +388,7 @@ static int match_pathspec_item(const struct index_state *istate,
* The submodules themselves will be able to perform more
* accurate matching to determine if the pathspec matches.
*/
- return MATCHED_RECURSIVELY;
+ return MATCHED_RECURSIVELY_LEADING_PATHSPEC;
}
return 0;
@@ -497,7 +509,7 @@ int submodule_path_match(const struct index_state *istate,
strlen(submodule_name),
0, seen,
DO_MATCH_DIRECTORY |
- DO_MATCH_SUBMODULE);
+ DO_MATCH_LEADING_PATHSPEC);
return matched;
}
@@ -1451,6 +1463,16 @@ static enum path_treatment treat_directory(struct dir_struct *dir,
return path_none;
case index_nonexistent:
+ if (dir->flags & DIR_SKIP_NESTED_GIT) {
+ int nested_repo;
+ struct strbuf sb = STRBUF_INIT;
+ strbuf_addstr(&sb, dirname);
+ nested_repo = is_nonbare_repository_dir(&sb);
+ strbuf_release(&sb);
+ if (nested_repo)
+ return path_none;
+ }
+
if (dir->flags & DIR_SHOW_OTHER_DIRECTORIES)
break;
if (exclude &&
@@ -1950,8 +1972,11 @@ static enum path_treatment read_directory_recursive(struct dir_struct *dir,
/* recurse into subdir if instructed by treat_path */
if ((state == path_recurse) ||
((state == path_untracked) &&
- (dir->flags & DIR_SHOW_IGNORED_TOO) &&
- (get_dtype(cdir.de, istate, path.buf, path.len) == DT_DIR))) {
+ (get_dtype(cdir.de, istate, path.buf, path.len) == DT_DIR) &&
+ ((dir->flags & DIR_SHOW_IGNORED_TOO) ||
+ (pathspec &&
+ do_match_pathspec(istate, pathspec, path.buf, path.len,
+ baselen, NULL, DO_MATCH_LEADING_PATHSPEC) == MATCHED_RECURSIVELY_LEADING_PATHSPEC)))) {
struct untracked_cache_dir *ud;
ud = lookup_untracked(dir->untracked, untracked,
path.buf + baselen,
@@ -1962,6 +1987,12 @@ static enum path_treatment read_directory_recursive(struct dir_struct *dir,
check_only, stop_at_first_file, pathspec);
if (subdir_state > dir_state)
dir_state = subdir_state;
+
+ if (pathspec &&
+ !match_pathspec(istate, pathspec, path.buf, path.len,
+ 0 /* prefix */, NULL,
+ 0 /* do NOT special case dirs */))
+ state = path_none;
}
if (check_only) {
diff --git a/dir.h b/dir.h
index 608696c..2fbdef0 100644
--- a/dir.h
+++ b/dir.h
@@ -156,7 +156,8 @@ struct dir_struct {
DIR_SHOW_IGNORED_TOO = 1<<5,
DIR_COLLECT_KILLED_ONLY = 1<<6,
DIR_KEEP_UNTRACKED_CONTENTS = 1<<7,
- DIR_SHOW_IGNORED_TOO_MODE_MATCHING = 1<<8
+ DIR_SHOW_IGNORED_TOO_MODE_MATCHING = 1<<8,
+ DIR_SKIP_NESTED_GIT = 1<<9
} flags;
struct dir_entry **entries;
struct dir_entry **ignored;
@@ -211,8 +212,9 @@ int count_slashes(const char *s);
* when populating the seen[] array.
*/
#define MATCHED_RECURSIVELY 1
-#define MATCHED_FNMATCH 2
-#define MATCHED_EXACTLY 3
+#define MATCHED_RECURSIVELY_LEADING_PATHSPEC 2
+#define MATCHED_FNMATCH 3
+#define MATCHED_EXACTLY 4
int simple_length(const char *match);
int no_wildcard(const char *string);
char *common_prefix(const struct pathspec *pathspec);
diff --git a/fast-import.c b/fast-import.c
index 1f9160b..9503d08 100644
--- a/fast-import.c
+++ b/fast-import.c
@@ -2489,18 +2489,14 @@ static void parse_from_existing(struct branch *b)
}
}
-static int parse_from(struct branch *b)
+static int parse_objectish(struct branch *b, const char *objectish)
{
- const char *from;
struct branch *s;
struct object_id oid;
- if (!skip_prefix(command_buf.buf, "from ", &from))
- return 0;
-
oidcpy(&oid, &b->branch_tree.versions[1].oid);
- s = lookup_branch(from);
+ s = lookup_branch(objectish);
if (b == s)
die("Can't create a branch from itself: %s", b->name);
else if (s) {
@@ -2508,8 +2504,8 @@ static int parse_from(struct branch *b)
oidcpy(&b->oid, &s->oid);
oidcpy(&b->branch_tree.versions[0].oid, t);
oidcpy(&b->branch_tree.versions[1].oid, t);
- } else if (*from == ':') {
- uintmax_t idnum = parse_mark_ref_eol(from);
+ } else if (*objectish == ':') {
+ uintmax_t idnum = parse_mark_ref_eol(objectish);
struct object_entry *oe = find_mark(idnum);
if (oe->type != OBJ_COMMIT)
die("Mark :%" PRIuMAX " not a commit", idnum);
@@ -2523,13 +2519,13 @@ static int parse_from(struct branch *b)
} else
parse_from_existing(b);
}
- } else if (!get_oid(from, &b->oid)) {
+ } else if (!get_oid(objectish, &b->oid)) {
parse_from_existing(b);
if (is_null_oid(&b->oid))
b->delete = 1;
}
else
- die("Invalid ref name or SHA1 expression: %s", from);
+ die("Invalid ref name or SHA1 expression: %s", objectish);
if (b->branch_tree.tree && !oideq(&oid, &b->branch_tree.versions[1].oid)) {
release_tree_content_recursive(b->branch_tree.tree);
@@ -2540,6 +2536,26 @@ static int parse_from(struct branch *b)
return 1;
}
+static int parse_from(struct branch *b)
+{
+ const char *from;
+
+ if (!skip_prefix(command_buf.buf, "from ", &from))
+ return 0;
+
+ return parse_objectish(b, from);
+}
+
+static int parse_objectish_with_prefix(struct branch *b, const char *prefix)
+{
+ const char *base;
+
+ if (!skip_prefix(command_buf.buf, prefix, &base))
+ return 0;
+
+ return parse_objectish(b, base);
+}
+
static struct hash_list *parse_merge(unsigned int *count)
{
struct hash_list *list = NULL, **tail = &list, *n;
@@ -2714,6 +2730,7 @@ static void parse_new_tag(const char *arg)
first_tag = t;
last_tag = t;
read_next_command();
+ parse_mark();
/* from ... */
if (!skip_prefix(command_buf.buf, "from ", &from))
@@ -2770,7 +2787,7 @@ static void parse_new_tag(const char *arg)
strbuf_addbuf(&new_data, &msg);
free(tagger);
- if (store_object(OBJ_TAG, &new_data, NULL, &t->oid, 0))
+ if (store_object(OBJ_TAG, &new_data, NULL, &t->oid, next_mark))
t->pack_id = MAX_PACK_ID;
else
t->pack_id = pack_id;
@@ -2779,6 +2796,7 @@ static void parse_new_tag(const char *arg)
static void parse_reset_branch(const char *arg)
{
struct branch *b;
+ const char *tag_name;
b = lookup_branch(arg);
if (b) {
@@ -2794,6 +2812,32 @@ static void parse_reset_branch(const char *arg)
b = new_branch(arg);
read_next_command();
parse_from(b);
+ if (b->delete && skip_prefix(b->name, "refs/tags/", &tag_name)) {
+ /*
+ * Elsewhere, we call dump_branches() before dump_tags(),
+ * and dump_branches() will handle ref deletions first, so
+ * in order to make sure the deletion actually takes effect,
+ * we need to remove the tag from our list of tags to update.
+ *
+ * NEEDSWORK: replace list of tags with hashmap for faster
+ * deletion?
+ */
+ struct tag *t, *prev = NULL;
+ for (t = first_tag; t; t = t->next_tag) {
+ if (!strcmp(t->name, tag_name))
+ break;
+ prev = t;
+ }
+ if (t) {
+ if (prev)
+ prev->next_tag = t->next_tag;
+ else
+ first_tag = t->next_tag;
+ if (!t->next_tag)
+ last_tag = prev;
+ /* There is no mem_pool_free(t) function to call. */
+ }
+ }
if (command_buf.len > 0)
unread_command_buf = 1;
}
@@ -3060,6 +3104,28 @@ static void parse_progress(void)
skip_optional_lf();
}
+static void parse_alias(void)
+{
+ struct object_entry *e;
+ struct branch b;
+
+ skip_optional_lf();
+ read_next_command();
+
+ /* mark ... */
+ parse_mark();
+ if (!next_mark)
+ die(_("Expected 'mark' command, got %s"), command_buf.buf);
+
+ /* to ... */
+ memset(&b, 0, sizeof(b));
+ if (!parse_objectish_with_prefix(&b, "to "))
+ die(_("Expected 'to' command, got %s"), command_buf.buf);
+ e = find_object(&b.oid);
+ assert(e);
+ insert_mark(next_mark, e);
+}
+
static char* make_fast_import_path(const char *path)
{
if (!relative_marks_paths || is_absolute_path(path))
@@ -3187,6 +3253,8 @@ static int parse_one_feature(const char *feature, int from_stream)
option_import_marks(arg, from_stream, 1);
} else if (skip_prefix(feature, "export-marks=", &arg)) {
option_export_marks(arg);
+ } else if (!strcmp(feature, "alias")) {
+ ; /* Don't die - this feature is supported */
} else if (!strcmp(feature, "get-mark")) {
; /* Don't die - this feature is supported */
} else if (!strcmp(feature, "cat-blob")) {
@@ -3343,6 +3411,8 @@ int cmd_main(int argc, const char **argv)
parse_checkpoint();
else if (!strcmp("done", command_buf.buf))
break;
+ else if (!strcmp("alias", command_buf.buf))
+ parse_alias();
else if (starts_with(command_buf.buf, "progress "))
parse_progress();
else if (skip_prefix(command_buf.buf, "feature ", &v))
diff --git a/fetch-pack.c b/fetch-pack.c
index c016eea..0130b44 100644
--- a/fetch-pack.c
+++ b/fetch-pack.c
@@ -167,16 +167,16 @@ static enum ack_type get_ack(struct packet_reader *reader,
if (!strcmp(reader->line, "NAK"))
return NAK;
if (skip_prefix(reader->line, "ACK ", &arg)) {
- if (!get_oid_hex(arg, result_oid)) {
- arg += 40;
- len -= arg - reader->line;
+ const char *p;
+ if (!parse_oid_hex(arg, result_oid, &p)) {
+ len -= p - reader->line;
if (len < 1)
return ACK;
- if (strstr(arg, "continue"))
+ if (strstr(p, "continue"))
return ACK_continue;
- if (strstr(arg, "common"))
+ if (strstr(p, "common"))
return ACK_common;
- if (strstr(arg, "ready"))
+ if (strstr(p, "ready"))
return ACK_ready;
return ACK;
}
diff --git a/git-add--interactive.perl b/git-add--interactive.perl
index c20ae9e..52659bb 100755
--- a/git-add--interactive.perl
+++ b/git-add--interactive.perl
@@ -1541,7 +1541,7 @@ sub patch_update_file {
for (@{$hunk[$ix]{DISPLAY}}) {
print;
}
- print colored $prompt_color,
+ print colored $prompt_color, "(", ($ix+1), "/$num) ",
sprintf(__($patch_update_prompt_modes{$patch_mode}{$hunk[$ix]{TYPE}}), $other);
my $line = prompt_single_character;
diff --git a/git-compat-util.h b/git-compat-util.h
index f0d13e4..8b8b29a 100644
--- a/git-compat-util.h
+++ b/git-compat-util.h
@@ -344,6 +344,7 @@ typedef uintmax_t timestamp_t;
#define PRItime PRIuMAX
#define parse_timestamp strtoumax
#define TIME_MAX UINTMAX_MAX
+#define TIME_MIN 0
#ifndef PATH_SEP
#define PATH_SEP ':'
@@ -1091,10 +1092,10 @@ static inline int strtol_i(char const *s, int base, int *result)
return 0;
}
+void git_stable_qsort(void *base, size_t nmemb, size_t size,
+ int(*compar)(const void *, const void *));
#ifdef INTERNAL_QSORT
-void git_qsort(void *base, size_t nmemb, size_t size,
- int(*compar)(const void *, const void *));
-#define qsort git_qsort
+#define qsort git_stable_qsort
#endif
#define QSORT(base, n, compar) sane_qsort((base), (n), sizeof(*(base)), compar)
@@ -1105,6 +1106,9 @@ static inline void sane_qsort(void *base, size_t nmemb, size_t size,
qsort(base, nmemb, size, compar);
}
+#define STABLE_QSORT(base, n, compar) \
+ git_stable_qsort((base), (n), sizeof(*(base)), compar)
+
#ifndef HAVE_ISO_QSORT_S
int git_qsort_s(void *base, size_t nmemb, size_t size,
int (*compar)(const void *, const void *, void *), void *ctx);
@@ -1309,4 +1313,42 @@ void unleak_memory(const void *ptr, size_t len);
*/
#include "banned.h"
+/*
+ * container_of - Get the address of an object containing a field.
+ *
+ * @ptr: pointer to the field.
+ * @type: type of the object.
+ * @member: name of the field within the object.
+ */
+#define container_of(ptr, type, member) \
+ ((type *) ((char *)(ptr) - offsetof(type, member)))
+
+/*
+ * helper function for `container_of_or_null' to avoid multiple
+ * evaluation of @ptr
+ */
+static inline void *container_of_or_null_offset(void *ptr, size_t offset)
+{
+ return ptr ? (char *)ptr - offset : NULL;
+}
+
+/*
+ * like `container_of', but allows returned value to be NULL
+ */
+#define container_of_or_null(ptr, type, member) \
+ (type *)container_of_or_null_offset(ptr, offsetof(type, member))
+
+/*
+ * like offsetof(), but takes a pointer to a a variable of type which
+ * contains @member, instead of a specified type.
+ * @ptr is subject to multiple evaluation since we can't rely on __typeof__
+ * everywhere.
+ */
+#if defined(__GNUC__) /* clang sets this, too */
+#define OFFSETOF_VAR(ptr, member) offsetof(__typeof__(*ptr), member)
+#else /* !__GNUC__ */
+#define OFFSETOF_VAR(ptr, member) \
+ ((uintptr_t)&(ptr)->member - (uintptr_t)(ptr))
+#endif /* !__GNUC__ */
+
#endif
diff --git a/git-p4.py b/git-p4.py
index 3991e7d..60c73b6 100755
--- a/git-p4.py
+++ b/git-p4.py
@@ -1160,13 +1160,11 @@ class LargeFileSystem(object):
if contentsSize <= gitConfigInt('git-p4.largeFileCompressedThreshold'):
return False
contentTempFile = self.generateTempFile(contents)
- compressedContentFile = tempfile.NamedTemporaryFile(prefix='git-p4-large-file', delete=False)
- zf = zipfile.ZipFile(compressedContentFile.name, mode='w')
- zf.write(contentTempFile, compress_type=zipfile.ZIP_DEFLATED)
- zf.close()
- compressedContentsSize = zf.infolist()[0].compress_size
+ compressedContentFile = tempfile.NamedTemporaryFile(prefix='git-p4-large-file', delete=True)
+ with zipfile.ZipFile(compressedContentFile, mode='w') as zf:
+ zf.write(contentTempFile, compress_type=zipfile.ZIP_DEFLATED)
+ compressedContentsSize = zf.infolist()[0].compress_size
os.remove(contentTempFile)
- os.remove(compressedContentFile.name)
if compressedContentsSize > gitConfigInt('git-p4.largeFileCompressedThreshold'):
return True
return False
@@ -3525,8 +3523,9 @@ class P4Sync(Command, P4UserMap):
self.updateOptionDict(details)
try:
self.commit(details, self.extractFilesFromCommit(details), self.branch)
- except IOError:
+ except IOError as err:
print("IO error with git fast-import. Is your git version recent enough?")
+ print("IO error details: {}".format(err))
print(self.gitError.read())
def openStreams(self):
diff --git a/git.c b/git.c
index c1ee712..ce6ab0e 100644
--- a/git.c
+++ b/git.c
@@ -369,8 +369,7 @@ static int handle_alias(int *argcp, const char ***argv)
die(_("alias '%s' changes environment variables.\n"
"You can use '!git' in the alias to do this"),
alias_command);
- memmove(new_argv - option_count, new_argv,
- count * sizeof(char *));
+ MOVE_ARRAY(new_argv - option_count, new_argv, count);
new_argv -= option_count;
if (count < 1)
@@ -385,7 +384,7 @@ static int handle_alias(int *argcp, const char ***argv)
REALLOC_ARRAY(new_argv, count + *argcp);
/* insert after command name */
- memcpy(new_argv + count, *argv + 1, sizeof(char *) * *argcp);
+ COPY_ARRAY(new_argv + count, *argv + 1, *argcp);
trace2_cmd_alias(alias_command, new_argv);
trace2_cmd_list_config();
diff --git a/grep.c b/grep.c
index cd952ef..0bb4cbd 100644
--- a/grep.c
+++ b/grep.c
@@ -368,31 +368,20 @@ static int is_fixed(const char *s, size_t len)
return 1;
}
-static int has_null(const char *s, size_t len)
-{
- /*
- * regcomp cannot accept patterns with NULs so when using it
- * we consider any pattern containing a NUL fixed.
- */
- if (memchr(s, 0, len))
- return 1;
-
- return 0;
-}
-
#ifdef USE_LIBPCRE1
static void compile_pcre1_regexp(struct grep_pat *p, const struct grep_opt *opt)
{
const char *error;
int erroffset;
int options = PCRE_MULTILINE;
+ int study_options = 0;
if (opt->ignore_case) {
- if (has_non_ascii(p->pattern))
+ if (!opt->ignore_locale && has_non_ascii(p->pattern))
p->pcre1_tables = pcre_maketables();
options |= PCRE_CASELESS;
}
- if (is_utf8_locale() && has_non_ascii(p->pattern))
+ if (!opt->ignore_locale && is_utf8_locale() && has_non_ascii(p->pattern))
options |= PCRE_UTF8;
p->pcre1_regexp = pcre_compile(p->pattern, options, &error, &erroffset,
@@ -400,44 +389,31 @@ static void compile_pcre1_regexp(struct grep_pat *p, const struct grep_opt *opt)
if (!p->pcre1_regexp)
compile_regexp_failed(p, error);
- p->pcre1_extra_info = pcre_study(p->pcre1_regexp, GIT_PCRE_STUDY_JIT_COMPILE, &error);
- if (!p->pcre1_extra_info && error)
- die("%s", error);
-
-#ifdef GIT_PCRE1_USE_JIT
+#if defined(PCRE_CONFIG_JIT) && !defined(NO_LIBPCRE1_JIT)
pcre_config(PCRE_CONFIG_JIT, &p->pcre1_jit_on);
- if (p->pcre1_jit_on == 1) {
- p->pcre1_jit_stack = pcre_jit_stack_alloc(1, 1024 * 1024);
- if (!p->pcre1_jit_stack)
- die("Couldn't allocate PCRE JIT stack");
- pcre_assign_jit_stack(p->pcre1_extra_info, NULL, p->pcre1_jit_stack);
- } else if (p->pcre1_jit_on != 0) {
- BUG("The pcre1_jit_on variable should be 0 or 1, not %d",
- p->pcre1_jit_on);
- }
+ if (opt->debug)
+ fprintf(stderr, "pcre1_jit_on=%d\n", p->pcre1_jit_on);
+
+ if (p->pcre1_jit_on)
+ study_options = PCRE_STUDY_JIT_COMPILE;
#endif
+
+ p->pcre1_extra_info = pcre_study(p->pcre1_regexp, study_options, &error);
+ if (!p->pcre1_extra_info && error)
+ die("%s", error);
}
static int pcre1match(struct grep_pat *p, const char *line, const char *eol,
regmatch_t *match, int eflags)
{
- int ovector[30], ret, flags = 0;
+ int ovector[30], ret, flags = PCRE_NO_UTF8_CHECK;
if (eflags & REG_NOTBOL)
flags |= PCRE_NOTBOL;
-#ifdef GIT_PCRE1_USE_JIT
- if (p->pcre1_jit_on) {
- ret = pcre_jit_exec(p->pcre1_regexp, p->pcre1_extra_info, line,
- eol - line, 0, flags, ovector,
- ARRAY_SIZE(ovector), p->pcre1_jit_stack);
- } else
-#endif
- {
- ret = pcre_exec(p->pcre1_regexp, p->pcre1_extra_info, line,
- eol - line, 0, flags, ovector,
- ARRAY_SIZE(ovector));
- }
+ ret = pcre_exec(p->pcre1_regexp, p->pcre1_extra_info, line,
+ eol - line, 0, flags, ovector,
+ ARRAY_SIZE(ovector));
if (ret < 0 && ret != PCRE_ERROR_NOMATCH)
die("pcre_exec failed with error code %d", ret);
@@ -453,15 +429,12 @@ static int pcre1match(struct grep_pat *p, const char *line, const char *eol,
static void free_pcre1_regexp(struct grep_pat *p)
{
pcre_free(p->pcre1_regexp);
-#ifdef GIT_PCRE1_USE_JIT
- if (p->pcre1_jit_on) {
+#ifdef PCRE_CONFIG_JIT
+ if (p->pcre1_jit_on)
pcre_free_study(p->pcre1_extra_info);
- pcre_jit_stack_free(p->pcre1_jit_stack);
- } else
+ else
#endif
- {
pcre_free(p->pcre1_extra_info);
- }
pcre_free((void *)p->pcre1_tables);
}
#else /* !USE_LIBPCRE1 */
@@ -498,14 +471,15 @@ static void compile_pcre2_pattern(struct grep_pat *p, const struct grep_opt *opt
p->pcre2_compile_context = NULL;
if (opt->ignore_case) {
- if (has_non_ascii(p->pattern)) {
+ if (!opt->ignore_locale && has_non_ascii(p->pattern)) {
character_tables = pcre2_maketables(NULL);
p->pcre2_compile_context = pcre2_compile_context_create(NULL);
pcre2_set_character_tables(p->pcre2_compile_context, character_tables);
}
options |= PCRE2_CASELESS;
}
- if (is_utf8_locale() && has_non_ascii(p->pattern))
+ if (!opt->ignore_locale && is_utf8_locale() && has_non_ascii(p->pattern) &&
+ !(!opt->ignore_case && (p->fixed || p->is_fixed)))
options |= PCRE2_UTF;
p->pcre2_pattern = pcre2_compile((PCRE2_SPTR)p->pattern,
@@ -522,7 +496,9 @@ static void compile_pcre2_pattern(struct grep_pat *p, const struct grep_opt *opt
}
pcre2_config(PCRE2_CONFIG_JIT, &p->pcre2_jit_on);
- if (p->pcre2_jit_on == 1) {
+ if (opt->debug)
+ fprintf(stderr, "pcre2_jit_on=%d\n", p->pcre2_jit_on);
+ if (p->pcre2_jit_on) {
jitret = pcre2_jit_compile(p->pcre2_pattern, PCRE2_JIT_COMPLETE);
if (jitret)
die("Couldn't JIT the PCRE2 pattern '%s', got '%d'\n", p->pattern, jitret);
@@ -547,19 +523,11 @@ static void compile_pcre2_pattern(struct grep_pat *p, const struct grep_opt *opt
BUG("pcre2_pattern_info() failed: %d", patinforet);
if (jitsizearg == 0) {
p->pcre2_jit_on = 0;
+ if (opt->debug)
+ fprintf(stderr, "pcre2_jit_on=%d: (*NO_JIT) in regex\n",
+ p->pcre2_jit_on);
return;
}
-
- p->pcre2_jit_stack = pcre2_jit_stack_create(1, 1024 * 1024, NULL);
- if (!p->pcre2_jit_stack)
- die("Couldn't allocate PCRE2 JIT stack");
- p->pcre2_match_context = pcre2_match_context_create(NULL);
- if (!p->pcre2_match_context)
- die("Couldn't allocate PCRE2 match context");
- pcre2_jit_stack_assign(p->pcre2_match_context, NULL, p->pcre2_jit_stack);
- } else if (p->pcre2_jit_on != 0) {
- BUG("The pcre2_jit_on variable should be 0 or 1, not %d",
- p->pcre2_jit_on);
}
}
@@ -603,8 +571,6 @@ static void free_pcre2_pattern(struct grep_pat *p)
pcre2_compile_context_free(p->pcre2_compile_context);
pcre2_code_free(p->pcre2_pattern);
pcre2_match_data_free(p->pcre2_match_data);
- pcre2_jit_stack_free(p->pcre2_jit_stack);
- pcre2_match_context_free(p->pcre2_match_context);
}
#else /* !USE_LIBPCRE2 */
static void compile_pcre2_pattern(struct grep_pat *p, const struct grep_opt *opt)
@@ -626,7 +592,6 @@ static int pcre2match(struct grep_pat *p, const char *line, const char *eol,
static void free_pcre2_pattern(struct grep_pat *p)
{
}
-#endif /* !USE_LIBPCRE2 */
static void compile_fixed_regexp(struct grep_pat *p, struct grep_opt *opt)
{
@@ -647,46 +612,66 @@ static void compile_fixed_regexp(struct grep_pat *p, struct grep_opt *opt)
compile_regexp_failed(p, errbuf);
}
}
+#endif /* !USE_LIBPCRE2 */
static void compile_regexp(struct grep_pat *p, struct grep_opt *opt)
{
- int ascii_only;
int err;
int regflags = REG_NEWLINE;
p->word_regexp = opt->word_regexp;
p->ignore_case = opt->ignore_case;
- ascii_only = !has_non_ascii(p->pattern);
+ p->fixed = opt->fixed;
- /*
- * Even when -F (fixed) asks us to do a non-regexp search, we
- * may not be able to correctly case-fold when -i
- * (ignore-case) is asked (in which case, we'll synthesize a
- * regexp to match the pattern that matches regexp special
- * characters literally, while ignoring case differences). On
- * the other hand, even without -F, if the pattern does not
- * have any regexp special characters and there is no need for
- * case-folding search, we can internally turn it into a
- * simple string match using kws. p->fixed tells us if we
- * want to use kws.
- */
- if (opt->fixed ||
- has_null(p->pattern, p->patternlen) ||
- is_fixed(p->pattern, p->patternlen))
- p->fixed = !p->ignore_case || ascii_only;
-
- if (p->fixed) {
- p->kws = kwsalloc(p->ignore_case ? tolower_trans_tbl : NULL);
- kwsincr(p->kws, p->pattern, p->patternlen);
- kwsprep(p->kws);
- return;
- } else if (opt->fixed) {
- /*
- * We come here when the pattern has the non-ascii
- * characters we cannot case-fold, and asked to
- * ignore-case.
- */
+ if (memchr(p->pattern, 0, p->patternlen) && !opt->pcre2)
+ die(_("given pattern contains NULL byte (via -f <file>). This is only supported with -P under PCRE v2"));
+
+ p->is_fixed = is_fixed(p->pattern, p->patternlen);
+#ifdef USE_LIBPCRE2
+ if (!p->fixed && !p->is_fixed) {
+ const char *no_jit = "(*NO_JIT)";
+ const int no_jit_len = strlen(no_jit);
+ if (starts_with(p->pattern, no_jit) &&
+ is_fixed(p->pattern + no_jit_len,
+ p->patternlen - no_jit_len))
+ p->is_fixed = 1;
+ }
+#endif
+ if (p->fixed || p->is_fixed) {
+#ifdef USE_LIBPCRE2
+ opt->pcre2 = 1;
+ if (p->is_fixed) {
+ compile_pcre2_pattern(p, opt);
+ } else {
+ /*
+ * E.g. t7811-grep-open.sh relies on the
+ * pattern being restored.
+ */
+ char *old_pattern = p->pattern;
+ size_t old_patternlen = p->patternlen;
+ struct strbuf sb = STRBUF_INIT;
+
+ /*
+ * There is the PCRE2_LITERAL flag, but it's
+ * only in PCRE v2 10.30 and later. Needing to
+ * ifdef our way around that and dealing with
+ * it + PCRE2_MULTILINE being an error is more
+ * complex than just quoting this ourselves.
+ */
+ strbuf_add(&sb, "\\Q", 2);
+ strbuf_add(&sb, p->pattern, p->patternlen);
+ strbuf_add(&sb, "\\E", 2);
+
+ p->pattern = sb.buf;
+ p->patternlen = sb.len;
+ compile_pcre2_pattern(p, opt);
+ p->pattern = old_pattern;
+ p->patternlen = old_patternlen;
+ strbuf_release(&sb);
+ }
+#else /* !USE_LIBPCRE2 */
compile_fixed_regexp(p, opt);
+#endif /* !USE_LIBPCRE2 */
return;
}
@@ -1053,9 +1038,7 @@ void free_grep_patterns(struct grep_opt *opt)
case GREP_PATTERN: /* atom */
case GREP_PATTERN_HEAD:
case GREP_PATTERN_BODY:
- if (p->kws)
- kwsfree(p->kws);
- else if (p->pcre1_regexp)
+ if (p->pcre1_regexp)
free_pcre1_regexp(p);
else if (p->pcre2_pattern)
free_pcre2_pattern(p);
@@ -1115,29 +1098,12 @@ static void show_name(struct grep_opt *opt, const char *name)
opt->output(opt, opt->null_following_name ? "\0" : "\n", 1);
}
-static int fixmatch(struct grep_pat *p, char *line, char *eol,
- regmatch_t *match)
-{
- struct kwsmatch kwsm;
- size_t offset = kwsexec(p->kws, line, eol - line, &kwsm);
- if (offset == -1) {
- match->rm_so = match->rm_eo = -1;
- return REG_NOMATCH;
- } else {
- match->rm_so = offset;
- match->rm_eo = match->rm_so + kwsm.size[0];
- return 0;
- }
-}
-
static int patmatch(struct grep_pat *p, char *line, char *eol,
regmatch_t *match, int eflags)
{
int hit;
- if (p->fixed)
- hit = !fixmatch(p, line, eol, match);
- else if (p->pcre1_regexp)
+ if (p->pcre1_regexp)
hit = !pcre1match(p, line, eol, match, eflags);
else if (p->pcre2_pattern)
hit = !pcre2match(p, line, eol, match, eflags);
diff --git a/grep.h b/grep.h
index 1875880..05dc1bb 100644
--- a/grep.h
+++ b/grep.h
@@ -3,24 +3,12 @@
#include "color.h"
#ifdef USE_LIBPCRE1
#include <pcre.h>
-#ifdef PCRE_CONFIG_JIT
-#if PCRE_MAJOR >= 8 && PCRE_MINOR >= 32
-#ifndef NO_LIBPCRE1_JIT
-#define GIT_PCRE1_USE_JIT
-#define GIT_PCRE_STUDY_JIT_COMPILE PCRE_STUDY_JIT_COMPILE
-#endif
-#endif
-#endif
-#ifndef GIT_PCRE_STUDY_JIT_COMPILE
-#define GIT_PCRE_STUDY_JIT_COMPILE 0
-#endif
-#if PCRE_MAJOR <= 8 && PCRE_MINOR < 20
-typedef int pcre_jit_stack;
+#ifndef PCRE_NO_UTF8_CHECK
+#define PCRE_NO_UTF8_CHECK 0
#endif
#else
typedef int pcre;
typedef int pcre_extra;
-typedef int pcre_jit_stack;
#endif
#ifdef USE_LIBPCRE2
#define PCRE2_CODE_UNIT_WIDTH 8
@@ -29,10 +17,7 @@ typedef int pcre_jit_stack;
typedef int pcre2_code;
typedef int pcre2_match_data;
typedef int pcre2_compile_context;
-typedef int pcre2_match_context;
-typedef int pcre2_jit_stack;
#endif
-#include "kwset.h"
#include "thread-utils.h"
#include "userdiff.h"
@@ -88,17 +73,14 @@ struct grep_pat {
regex_t regexp;
pcre *pcre1_regexp;
pcre_extra *pcre1_extra_info;
- pcre_jit_stack *pcre1_jit_stack;
const unsigned char *pcre1_tables;
int pcre1_jit_on;
pcre2_code *pcre2_pattern;
pcre2_match_data *pcre2_match_data;
pcre2_compile_context *pcre2_compile_context;
- pcre2_match_context *pcre2_match_context;
- pcre2_jit_stack *pcre2_jit_stack;
uint32_t pcre2_jit_on;
- kwset_t kws;
unsigned fixed:1;
+ unsigned is_fixed:1;
unsigned ignore_case:1;
unsigned word_regexp:1;
};
@@ -173,6 +155,7 @@ struct grep_opt {
int funcbody;
int extended_regexp_option;
int pattern_type_option;
+ int ignore_locale;
char colors[NR_GREP_COLORS][COLOR_MAXLEN];
unsigned pre_context;
unsigned post_context;
diff --git a/hashmap.c b/hashmap.c
index d42f01f..39c1311 100644
--- a/hashmap.c
+++ b/hashmap.c
@@ -140,8 +140,8 @@ static inline struct hashmap_entry **find_entry_ptr(const struct hashmap *map,
}
static int always_equal(const void *unused_cmp_data,
- const void *unused1,
- const void *unused2,
+ const struct hashmap_entry *unused1,
+ const struct hashmap_entry *unused2,
const void *unused_keydata)
{
return 0;
@@ -171,41 +171,49 @@ void hashmap_init(struct hashmap *map, hashmap_cmp_fn equals_function,
map->do_count_items = 1;
}
-void hashmap_free(struct hashmap *map, int free_entries)
+void hashmap_free_(struct hashmap *map, ssize_t entry_offset)
{
if (!map || !map->table)
return;
- if (free_entries) {
+ if (entry_offset >= 0) { /* called by hashmap_free_entries */
struct hashmap_iter iter;
struct hashmap_entry *e;
+
hashmap_iter_init(map, &iter);
while ((e = hashmap_iter_next(&iter)))
- free(e);
+ /*
+ * like container_of, but using caller-calculated
+ * offset (caller being hashmap_free_entries)
+ */
+ free((char *)e - entry_offset);
}
free(map->table);
memset(map, 0, sizeof(*map));
}
-void *hashmap_get(const struct hashmap *map, const void *key, const void *keydata)
+struct hashmap_entry *hashmap_get(const struct hashmap *map,
+ const struct hashmap_entry *key,
+ const void *keydata)
{
return *find_entry_ptr(map, key, keydata);
}
-void *hashmap_get_next(const struct hashmap *map, const void *entry)
+struct hashmap_entry *hashmap_get_next(const struct hashmap *map,
+ const struct hashmap_entry *entry)
{
- struct hashmap_entry *e = ((struct hashmap_entry *) entry)->next;
+ struct hashmap_entry *e = entry->next;
for (; e; e = e->next)
if (entry_equals(map, entry, e, NULL))
return e;
return NULL;
}
-void hashmap_add(struct hashmap *map, void *entry)
+void hashmap_add(struct hashmap *map, struct hashmap_entry *entry)
{
unsigned int b = bucket(map, entry);
/* add entry */
- ((struct hashmap_entry *) entry)->next = map->table[b];
+ entry->next = map->table[b];
map->table[b] = entry;
/* fix size and rehash if appropriate */
@@ -216,7 +224,9 @@ void hashmap_add(struct hashmap *map, void *entry)
}
}
-void *hashmap_remove(struct hashmap *map, const void *key, const void *keydata)
+struct hashmap_entry *hashmap_remove(struct hashmap *map,
+ const struct hashmap_entry *key,
+ const void *keydata)
{
struct hashmap_entry *old;
struct hashmap_entry **e = find_entry_ptr(map, key, keydata);
@@ -238,7 +248,8 @@ void *hashmap_remove(struct hashmap *map, const void *key, const void *keydata)
return old;
}
-void *hashmap_put(struct hashmap *map, void *entry)
+struct hashmap_entry *hashmap_put(struct hashmap *map,
+ struct hashmap_entry *entry)
{
struct hashmap_entry *old = hashmap_remove(map, entry, NULL);
hashmap_add(map, entry);
@@ -252,7 +263,7 @@ void hashmap_iter_init(struct hashmap *map, struct hashmap_iter *iter)
iter->next = NULL;
}
-void *hashmap_iter_next(struct hashmap_iter *iter)
+struct hashmap_entry *hashmap_iter_next(struct hashmap_iter *iter)
{
struct hashmap_entry *current = iter->next;
for (;;) {
@@ -275,10 +286,15 @@ struct pool_entry {
};
static int pool_entry_cmp(const void *unused_cmp_data,
- const struct pool_entry *e1,
- const struct pool_entry *e2,
- const unsigned char *keydata)
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
+ const void *keydata)
{
+ const struct pool_entry *e1, *e2;
+
+ e1 = container_of(eptr, const struct pool_entry, ent);
+ e2 = container_of(entry_or_key, const struct pool_entry, ent);
+
return e1->data != keydata &&
(e1->len != e2->len || memcmp(e1->data, keydata, e1->len));
}
@@ -290,18 +306,18 @@ const void *memintern(const void *data, size_t len)
/* initialize string pool hashmap */
if (!map.tablesize)
- hashmap_init(&map, (hashmap_cmp_fn) pool_entry_cmp, NULL, 0);
+ hashmap_init(&map, pool_entry_cmp, NULL, 0);
/* lookup interned string in pool */
- hashmap_entry_init(&key, memhash(data, len));
+ hashmap_entry_init(&key.ent, memhash(data, len));
key.len = len;
- e = hashmap_get(&map, &key, data);
+ e = hashmap_get_entry(&map, &key, ent, data);
if (!e) {
/* not found: create it */
FLEX_ALLOC_MEM(e, data, data, len);
- hashmap_entry_init(e, key.ent.hash);
+ hashmap_entry_init(&e->ent, key.ent.hash);
e->len = len;
- hashmap_add(&map, e);
+ hashmap_add(&map, &e->ent);
}
return e->data;
}
diff --git a/hashmap.h b/hashmap.h
index 8424911..bd27015 100644
--- a/hashmap.h
+++ b/hashmap.h
@@ -13,7 +13,7 @@
*
* struct hashmap map;
* struct long2string {
- * struct hashmap_entry ent; // must be the first member!
+ * struct hashmap_entry ent;
* long key;
* char value[FLEX_ARRAY]; // be careful with allocating on stack!
* };
@@ -21,12 +21,16 @@
* #define COMPARE_VALUE 1
*
* static int long2string_cmp(const void *hashmap_cmp_fn_data,
- * const struct long2string *e1,
- * const struct long2string *e2,
+ * const struct hashmap_entry *eptr,
+ * const struct hashmap_entry *entry_or_key,
* const void *keydata)
* {
* const char *string = keydata;
* unsigned flags = *(unsigned *)hashmap_cmp_fn_data;
+ * const struct long2string *e1, *e2;
+ *
+ * e1 = container_of(eptr, const struct long2string, ent);
+ * e2 = container_of(entry_or_key, const struct long2string, ent);
*
* if (flags & COMPARE_VALUE)
* return e1->key != e2->key ||
@@ -41,54 +45,58 @@
* char value[255], action[32];
* unsigned flags = 0;
*
- * hashmap_init(&map, (hashmap_cmp_fn) long2string_cmp, &flags, 0);
+ * hashmap_init(&map, long2string_cmp, &flags, 0);
*
* while (scanf("%s %ld %s", action, &key, value)) {
*
* if (!strcmp("add", action)) {
* struct long2string *e;
* FLEX_ALLOC_STR(e, value, value);
- * hashmap_entry_init(e, memhash(&key, sizeof(long)));
+ * hashmap_entry_init(&e->ent, memhash(&key, sizeof(long)));
* e->key = key;
- * hashmap_add(&map, e);
+ * hashmap_add(&map, &e->ent);
* }
*
* if (!strcmp("print_all_by_key", action)) {
* struct long2string k, *e;
- * hashmap_entry_init(&k, memhash(&key, sizeof(long)));
+ * hashmap_entry_init(&k->ent, memhash(&key, sizeof(long)));
* k.key = key;
*
* flags &= ~COMPARE_VALUE;
- * e = hashmap_get(&map, &k, NULL);
+ * e = hashmap_get_entry(&map, &k, ent, NULL);
* if (e) {
* printf("first: %ld %s\n", e->key, e->value);
- * while ((e = hashmap_get_next(&map, e)))
+ * while ((e = hashmap_get_next_entry(&map, e,
+ * struct long2string, ent))) {
* printf("found more: %ld %s\n", e->key, e->value);
+ * }
* }
* }
*
* if (!strcmp("has_exact_match", action)) {
* struct long2string *e;
* FLEX_ALLOC_STR(e, value, value);
- * hashmap_entry_init(e, memhash(&key, sizeof(long)));
+ * hashmap_entry_init(&e->ent, memhash(&key, sizeof(long)));
* e->key = key;
*
* flags |= COMPARE_VALUE;
- * printf("%sfound\n", hashmap_get(&map, e, NULL) ? "" : "not ");
+ * printf("%sfound\n",
+ * hashmap_get(&map, &e->ent, NULL) ? "" : "not ");
* free(e);
* }
*
* if (!strcmp("has_exact_match_no_heap_alloc", action)) {
* struct long2string k;
- * hashmap_entry_init(&k, memhash(&key, sizeof(long)));
+ * hashmap_entry_init(&k->ent, memhash(&key, sizeof(long)));
* k.key = key;
*
* flags |= COMPARE_VALUE;
- * printf("%sfound\n", hashmap_get(&map, &k, value) ? "" : "not ");
+ * printf("%sfound\n",
+ * hashmap_get(&map, &k->ent, value) ? "" : "not ");
* }
*
* if (!strcmp("end", action)) {
- * hashmap_free(&map, 1);
+ * hashmap_free_entries(&map, struct long2string, ent);
* break;
* }
* }
@@ -133,7 +141,7 @@ static inline unsigned int oidhash(const struct object_id *oid)
/*
* struct hashmap_entry is an opaque structure representing an entry in the
- * hash table, which must be used as first member of user data structures.
+ * hash table.
* Ideally it should be followed by an int-sized member to prevent unused
* memory on 64-bit systems due to alignment.
*/
@@ -168,7 +176,8 @@ struct hashmap_entry {
* The `hashmap_cmp_fn_data` entry is the pointer given in the init function.
*/
typedef int (*hashmap_cmp_fn)(const void *hashmap_cmp_fn_data,
- const void *entry, const void *entry_or_key,
+ const struct hashmap_entry *entry,
+ const struct hashmap_entry *entry_or_key,
const void *keydata);
/*
@@ -223,13 +232,20 @@ void hashmap_init(struct hashmap *map,
const void *equals_function_data,
size_t initial_size);
+/* internal function for freeing hashmap */
+void hashmap_free_(struct hashmap *map, ssize_t offset);
+
/*
- * Frees a hashmap structure and allocated memory.
- *
- * If `free_entries` is true, each hashmap_entry in the map is freed as well
- * using stdlibs free().
+ * Frees a hashmap structure and allocated memory, leaves entries undisturbed
*/
-void hashmap_free(struct hashmap *map, int free_entries);
+#define hashmap_free(map) hashmap_free_(map, -1)
+
+/*
+ * Frees @map and all entries. @type is the struct type of the entry
+ * where @member is the hashmap_entry struct used to associate with @map
+ */
+#define hashmap_free_entries(map, type, member) \
+ hashmap_free_(map, offsetof(type, member));
/* hashmap_entry functions */
@@ -244,9 +260,9 @@ void hashmap_free(struct hashmap *map, int free_entries);
* your structure was allocated with xmalloc(), you can just free(3) it,
* and if it is on stack, you can just let it go out of scope).
*/
-static inline void hashmap_entry_init(void *entry, unsigned int hash)
+static inline void hashmap_entry_init(struct hashmap_entry *e,
+ unsigned int hash)
{
- struct hashmap_entry *e = entry;
e->hash = hash;
e->next = NULL;
}
@@ -286,8 +302,9 @@ static inline unsigned int hashmap_get_size(struct hashmap *map)
* If an entry with matching hash code is found, `key` and `keydata` are passed
* to `hashmap_cmp_fn` to decide whether the entry matches the key.
*/
-void *hashmap_get(const struct hashmap *map, const void *key,
- const void *keydata);
+struct hashmap_entry *hashmap_get(const struct hashmap *map,
+ const struct hashmap_entry *key,
+ const void *keydata);
/*
* Returns the hashmap entry for the specified hash code and key data,
@@ -301,9 +318,10 @@ void *hashmap_get(const struct hashmap *map, const void *key,
* `entry_or_key` parameter of `hashmap_cmp_fn` points to a hashmap_entry
* structure that should not be used in the comparison.
*/
-static inline void *hashmap_get_from_hash(const struct hashmap *map,
- unsigned int hash,
- const void *keydata)
+static inline struct hashmap_entry *hashmap_get_from_hash(
+ const struct hashmap *map,
+ unsigned int hash,
+ const void *keydata)
{
struct hashmap_entry key;
hashmap_entry_init(&key, hash);
@@ -318,7 +336,8 @@ static inline void *hashmap_get_from_hash(const struct hashmap *map,
* `entry` is the hashmap_entry to start the search from, obtained via a previous
* call to `hashmap_get` or `hashmap_get_next`.
*/
-void *hashmap_get_next(const struct hashmap *map, const void *entry);
+struct hashmap_entry *hashmap_get_next(const struct hashmap *map,
+ const struct hashmap_entry *entry);
/*
* Adds a hashmap entry. This allows to add duplicate entries (i.e.
@@ -327,7 +346,7 @@ void *hashmap_get_next(const struct hashmap *map, const void *entry);
* `map` is the hashmap structure.
* `entry` is the entry to add.
*/
-void hashmap_add(struct hashmap *map, void *entry);
+void hashmap_add(struct hashmap *map, struct hashmap_entry *entry);
/*
* Adds or replaces a hashmap entry. If the hashmap contains duplicate
@@ -337,7 +356,20 @@ void hashmap_add(struct hashmap *map, void *entry);
* `entry` is the entry to add or replace.
* Returns the replaced entry, or NULL if not found (i.e. the entry was added).
*/
-void *hashmap_put(struct hashmap *map, void *entry);
+struct hashmap_entry *hashmap_put(struct hashmap *map,
+ struct hashmap_entry *entry);
+
+/*
+ * Adds or replaces a hashmap entry contained within @keyvar,
+ * where @keyvar is a pointer to a struct containing a
+ * "struct hashmap_entry" @member.
+ *
+ * Returns the replaced pointer which is of the same type as @keyvar,
+ * or NULL if not found.
+ */
+#define hashmap_put_entry(map, keyvar, member) \
+ container_of_or_null_offset(hashmap_put(map, &(keyvar)->member), \
+ OFFSETOF_VAR(keyvar, member))
/*
* Removes a hashmap entry matching the specified key. If the hashmap contains
@@ -346,8 +378,24 @@ void *hashmap_put(struct hashmap *map, void *entry);
*
* Argument explanation is the same as in `hashmap_get`.
*/
-void *hashmap_remove(struct hashmap *map, const void *key,
- const void *keydata);
+struct hashmap_entry *hashmap_remove(struct hashmap *map,
+ const struct hashmap_entry *key,
+ const void *keydata);
+
+/*
+ * Removes a hashmap entry contained within @keyvar,
+ * where @keyvar is a pointer to a struct containing a
+ * "struct hashmap_entry" @member.
+ *
+ * See `hashmap_get` for an explanation of @keydata
+ *
+ * Returns the replaced pointer which is of the same type as @keyvar,
+ * or NULL if not found.
+ */
+#define hashmap_remove_entry(map, keyvar, member, keydata) \
+ container_of_or_null_offset( \
+ hashmap_remove(map, &(keyvar)->member, keydata), \
+ OFFSETOF_VAR(keyvar, member))
/*
* Returns the `bucket` an entry is stored in.
@@ -370,10 +418,10 @@ struct hashmap_iter {
void hashmap_iter_init(struct hashmap *map, struct hashmap_iter *iter);
/* Returns the next hashmap_entry, or NULL if there are no more entries. */
-void *hashmap_iter_next(struct hashmap_iter *iter);
+struct hashmap_entry *hashmap_iter_next(struct hashmap_iter *iter);
/* Initializes the iterator and returns the first entry, if any. */
-static inline void *hashmap_iter_first(struct hashmap *map,
+static inline struct hashmap_entry *hashmap_iter_first(struct hashmap *map,
struct hashmap_iter *iter)
{
hashmap_iter_init(map, iter);
@@ -381,6 +429,64 @@ static inline void *hashmap_iter_first(struct hashmap *map,
}
/*
+ * returns the first entry in @map using @iter, where the entry is of
+ * @type (e.g. "struct foo") and @member is the name of the
+ * "struct hashmap_entry" in @type
+ */
+#define hashmap_iter_first_entry(map, iter, type, member) \
+ container_of_or_null(hashmap_iter_first(map, iter), type, member)
+
+/* internal macro for hashmap_for_each_entry */
+#define hashmap_iter_next_entry_offset(iter, offset) \
+ container_of_or_null_offset(hashmap_iter_next(iter), offset)
+
+/* internal macro for hashmap_for_each_entry */
+#define hashmap_iter_first_entry_offset(map, iter, offset) \
+ container_of_or_null_offset(hashmap_iter_first(map, iter), offset)
+
+/*
+ * iterate through @map using @iter, @var is a pointer to a type
+ * containing a @member which is a "struct hashmap_entry"
+ */
+#define hashmap_for_each_entry(map, iter, var, member) \
+ for (var = hashmap_iter_first_entry_offset(map, iter, \
+ OFFSETOF_VAR(var, member)); \
+ var; \
+ var = hashmap_iter_next_entry_offset(iter, \
+ OFFSETOF_VAR(var, member)))
+
+/*
+ * returns a pointer of type matching @keyvar, or NULL if nothing found.
+ * @keyvar is a pointer to a struct containing a
+ * "struct hashmap_entry" @member.
+ */
+#define hashmap_get_entry(map, keyvar, member, keydata) \
+ container_of_or_null_offset( \
+ hashmap_get(map, &(keyvar)->member, keydata), \
+ OFFSETOF_VAR(keyvar, member))
+
+#define hashmap_get_entry_from_hash(map, hash, keydata, type, member) \
+ container_of_or_null(hashmap_get_from_hash(map, hash, keydata), \
+ type, member)
+/*
+ * returns the next equal pointer to @var, or NULL if not found.
+ * @var is a pointer of any type containing "struct hashmap_entry"
+ * @member is the name of the "struct hashmap_entry" field
+ */
+#define hashmap_get_next_entry(map, var, member) \
+ container_of_or_null_offset(hashmap_get_next(map, &(var)->member), \
+ OFFSETOF_VAR(var, member))
+
+/*
+ * iterate @map starting from @var, where @var is a pointer of @type
+ * and @member is the name of the "struct hashmap_entry" field in @type
+ */
+#define hashmap_for_each_entry_from(map, var, member) \
+ for (; \
+ var; \
+ var = hashmap_get_next_entry(map, var, member))
+
+/*
* Disable item counting and automatic rehashing when adding/removing items.
*
* Normally, the hashmap keeps track of the number of items in the map
diff --git a/list-objects-filter-options.c b/list-objects-filter-options.c
index 4d88bfe..256bcfb 100644
--- a/list-objects-filter-options.c
+++ b/list-objects-filter-options.c
@@ -62,17 +62,7 @@ static int gently_parse_list_objects_filter(
return 0;
} else if (skip_prefix(arg, "sparse:oid=", &v0)) {
- struct object_context oc;
- struct object_id sparse_oid;
-
- /*
- * Try to parse <oid-expression> into an OID for the current
- * command, but DO NOT complain if we don't have the blob or
- * ref locally.
- */
- if (!get_oid_with_context(the_repository, v0, GET_OID_BLOB,
- &sparse_oid, &oc))
- filter_options->sparse_oid_value = oiddup(&sparse_oid);
+ filter_options->sparse_oid_name = xstrdup(v0);
filter_options->choice = LOFC_SPARSE_OID;
return 0;
@@ -320,7 +310,7 @@ void list_objects_filter_release(
if (!filter_options)
return;
string_list_clear(&filter_options->filter_spec, /*free_util=*/0);
- free(filter_options->sparse_oid_value);
+ free(filter_options->sparse_oid_name);
for (sub = 0; sub < filter_options->sub_nr; sub++)
list_objects_filter_release(&filter_options->sub[sub]);
free(filter_options->sub);
diff --git a/list-objects-filter-options.h b/list-objects-filter-options.h
index b63c5ee..2ffb392 100644
--- a/list-objects-filter-options.h
+++ b/list-objects-filter-options.h
@@ -45,7 +45,7 @@ struct list_objects_filter_options {
* some values will be defined for any given choice.
*/
- struct object_id *sparse_oid_value;
+ char *sparse_oid_name;
unsigned long blob_limit_value;
unsigned long tree_exclude_depth;
diff --git a/list-objects-filter.c b/list-objects-filter.c
index d624f1c..1e8d4e7 100644
--- a/list-objects-filter.c
+++ b/list-objects-filter.c
@@ -483,9 +483,17 @@ static void filter_sparse_oid__init(
struct filter *filter)
{
struct filter_sparse_data *d = xcalloc(1, sizeof(*d));
- if (add_patterns_from_blob_to_list(filter_options->sparse_oid_value,
- NULL, 0, &d->pl) < 0)
- die("could not load filter specification");
+ struct object_context oc;
+ struct object_id sparse_oid;
+
+ if (get_oid_with_context(the_repository,
+ filter_options->sparse_oid_name,
+ GET_OID_BLOB, &sparse_oid, &oc))
+ die(_("unable to access sparse blob in '%s'"),
+ filter_options->sparse_oid_name);
+ if (add_patterns_from_blob_to_list(&sparse_oid, "", 0, &d->pl) < 0)
+ die(_("unable to parse sparse filter data in %s"),
+ oid_to_hex(&sparse_oid));
ALLOC_GROW(d->array_frame, d->nr + 1, d->alloc);
d->array_frame[d->nr].default_match = 0; /* default to include */
diff --git a/list-objects.c b/list-objects.c
index 9307d91..e19589b 100644
--- a/list-objects.c
+++ b/list-objects.c
@@ -365,7 +365,9 @@ static void do_traverse(struct traversal_context *ctx)
* an uninteresting boundary commit may not have its tree
* parsed yet, but we are not going to show them anyway
*/
- if (get_commit_tree(commit)) {
+ if (!ctx->revs->tree_objects)
+ ; /* do not bother loading tree */
+ else if (get_commit_tree(commit)) {
struct tree *tree = get_commit_tree(commit);
tree->object.flags |= NOT_USER_GIVEN;
add_pending_tree(ctx->revs, tree);
diff --git a/log-tree.c b/log-tree.c
index 109c212..923a299 100644
--- a/log-tree.c
+++ b/log-tree.c
@@ -77,6 +77,7 @@ void add_name_decoration(enum decoration_type type, const char *name, struct obj
const struct name_decoration *get_name_decoration(const struct object *obj)
{
+ load_ref_decorations(NULL, DECORATE_SHORT_REFS);
return lookup_decoration(&name_decoration, obj);
}
diff --git a/merge-recursive.c b/merge-recursive.c
index 6b812d6..42be7c9 100644
--- a/merge-recursive.c
+++ b/merge-recursive.c
@@ -4,30 +4,40 @@
* The thieves were Alex Riesen and Johannes Schindelin, in June/July 2006
*/
#include "cache.h"
-#include "config.h"
+#include "merge-recursive.h"
+
#include "advice.h"
-#include "lockfile.h"
-#include "cache-tree.h"
-#include "object-store.h"
-#include "repository.h"
-#include "commit.h"
+#include "alloc.h"
+#include "attr.h"
#include "blob.h"
#include "builtin.h"
-#include "tree-walk.h"
+#include "cache-tree.h"
+#include "commit.h"
+#include "commit-reach.h"
+#include "config.h"
#include "diff.h"
#include "diffcore.h"
+#include "dir.h"
+#include "ll-merge.h"
+#include "lockfile.h"
+#include "object-store.h"
+#include "repository.h"
+#include "revision.h"
+#include "string-list.h"
+#include "submodule.h"
#include "tag.h"
-#include "alloc.h"
+#include "tree-walk.h"
#include "unpack-trees.h"
-#include "string-list.h"
#include "xdiff-interface.h"
-#include "ll-merge.h"
-#include "attr.h"
-#include "merge-recursive.h"
-#include "dir.h"
-#include "submodule.h"
-#include "revision.h"
-#include "commit-reach.h"
+
+struct merge_options_internal {
+ int call_depth;
+ int needed_rename_limit;
+ struct hashmap current_file_dir_set;
+ struct string_list df_conflict_file_set;
+ struct unpack_trees_options unpack_opts;
+ struct index_state orig_index;
+};
struct path_hashmap_entry {
struct hashmap_entry e;
@@ -35,14 +45,16 @@ struct path_hashmap_entry {
};
static int path_hashmap_cmp(const void *cmp_data,
- const void *entry,
- const void *entry_or_key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *keydata)
{
- const struct path_hashmap_entry *a = entry;
- const struct path_hashmap_entry *b = entry_or_key;
+ const struct path_hashmap_entry *a, *b;
const char *key = keydata;
+ a = container_of(eptr, const struct path_hashmap_entry, e);
+ b = container_of(entry_or_key, const struct path_hashmap_entry, e);
+
if (ignore_case)
return strcasecmp(a->path, key ? key : b->path);
else
@@ -54,6 +66,24 @@ static unsigned int path_hash(const char *path)
return ignore_case ? strihash(path) : strhash(path);
}
+/*
+ * For dir_rename_entry, directory names are stored as a full path from the
+ * toplevel of the repository and do not include a trailing '/'. Also:
+ *
+ * dir: original name of directory being renamed
+ * non_unique_new_dir: if true, could not determine new_dir
+ * new_dir: final name of directory being renamed
+ * possible_new_dirs: temporary used to help determine new_dir; see comments
+ * in get_directory_renames() for details
+ */
+struct dir_rename_entry {
+ struct hashmap_entry ent;
+ char *dir;
+ unsigned non_unique_new_dir:1;
+ struct strbuf new_dir;
+ struct string_list possible_new_dirs;
+};
+
static struct dir_rename_entry *dir_rename_find_entry(struct hashmap *hashmap,
char *dir)
{
@@ -61,18 +91,20 @@ static struct dir_rename_entry *dir_rename_find_entry(struct hashmap *hashmap,
if (dir == NULL)
return NULL;
- hashmap_entry_init(&key, strhash(dir));
+ hashmap_entry_init(&key.ent, strhash(dir));
key.dir = dir;
- return hashmap_get(hashmap, &key, NULL);
+ return hashmap_get_entry(hashmap, &key, ent, NULL);
}
static int dir_rename_cmp(const void *unused_cmp_data,
- const void *entry,
- const void *entry_or_key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *unused_keydata)
{
- const struct dir_rename_entry *e1 = entry;
- const struct dir_rename_entry *e2 = entry_or_key;
+ const struct dir_rename_entry *e1, *e2;
+
+ e1 = container_of(eptr, const struct dir_rename_entry, ent);
+ e2 = container_of(entry_or_key, const struct dir_rename_entry, ent);
return strcmp(e1->dir, e2->dir);
}
@@ -85,34 +117,46 @@ static void dir_rename_init(struct hashmap *map)
static void dir_rename_entry_init(struct dir_rename_entry *entry,
char *directory)
{
- hashmap_entry_init(entry, strhash(directory));
+ hashmap_entry_init(&entry->ent, strhash(directory));
entry->dir = directory;
entry->non_unique_new_dir = 0;
strbuf_init(&entry->new_dir, 0);
string_list_init(&entry->possible_new_dirs, 0);
}
+struct collision_entry {
+ struct hashmap_entry ent;
+ char *target_file;
+ struct string_list source_files;
+ unsigned reported_already:1;
+};
+
static struct collision_entry *collision_find_entry(struct hashmap *hashmap,
char *target_file)
{
struct collision_entry key;
- hashmap_entry_init(&key, strhash(target_file));
+ hashmap_entry_init(&key.ent, strhash(target_file));
key.target_file = target_file;
- return hashmap_get(hashmap, &key, NULL);
+ return hashmap_get_entry(hashmap, &key, ent, NULL);
}
-static int collision_cmp(void *unused_cmp_data,
- const struct collision_entry *e1,
- const struct collision_entry *e2,
+static int collision_cmp(const void *unused_cmp_data,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *unused_keydata)
{
+ const struct collision_entry *e1, *e2;
+
+ e1 = container_of(eptr, const struct collision_entry, ent);
+ e2 = container_of(entry_or_key, const struct collision_entry, ent);
+
return strcmp(e1->target_file, e2->target_file);
}
static void collision_init(struct hashmap *map)
{
- hashmap_init(map, (hashmap_cmp_fn) collision_cmp, NULL, 0);
+ hashmap_init(map, collision_cmp, NULL, 0);
}
static void flush_output(struct merge_options *opt)
@@ -284,7 +328,8 @@ static inline void setup_rename_conflict_info(enum rename_type rename_type,
static int show(struct merge_options *opt, int v)
{
- return (!opt->call_depth && opt->verbosity >= v) || opt->verbosity >= 5;
+ return (!opt->priv->call_depth && opt->verbosity >= v) ||
+ opt->verbosity >= 5;
}
__attribute__((format (printf, 3, 4)))
@@ -295,7 +340,7 @@ static void output(struct merge_options *opt, int v, const char *fmt, ...)
if (!show(opt, v))
return;
- strbuf_addchars(&opt->obuf, ' ', opt->call_depth * 2);
+ strbuf_addchars(&opt->obuf, ' ', opt->priv->call_depth * 2);
va_start(ap, fmt);
strbuf_vaddf(&opt->obuf, fmt, ap);
@@ -310,7 +355,7 @@ static void output_commit_title(struct merge_options *opt, struct commit *commit
{
struct merge_remote_desc *desc;
- strbuf_addchars(&opt->obuf, ' ', opt->call_depth * 2);
+ strbuf_addchars(&opt->obuf, ' ', opt->priv->call_depth * 2);
desc = merge_remote_util(commit);
if (desc)
strbuf_addf(&opt->obuf, "virtual %s\n", desc->name);
@@ -358,6 +403,11 @@ static int add_cacheinfo(struct merge_options *opt,
return ret;
}
+static inline int merge_detect_rename(struct merge_options *opt)
+{
+ return (opt->detect_renames >= 0) ? opt->detect_renames : 1;
+}
+
static void init_tree_desc_from_tree(struct tree_desc *desc, struct tree *tree)
{
parse_tree(tree);
@@ -373,74 +423,43 @@ static int unpack_trees_start(struct merge_options *opt,
struct tree_desc t[3];
struct index_state tmp_index = { NULL };
- memset(&opt->unpack_opts, 0, sizeof(opt->unpack_opts));
- if (opt->call_depth)
- opt->unpack_opts.index_only = 1;
+ memset(&opt->priv->unpack_opts, 0, sizeof(opt->priv->unpack_opts));
+ if (opt->priv->call_depth)
+ opt->priv->unpack_opts.index_only = 1;
else
- opt->unpack_opts.update = 1;
- opt->unpack_opts.merge = 1;
- opt->unpack_opts.head_idx = 2;
- opt->unpack_opts.fn = threeway_merge;
- opt->unpack_opts.src_index = opt->repo->index;
- opt->unpack_opts.dst_index = &tmp_index;
- opt->unpack_opts.aggressive = !merge_detect_rename(opt);
- setup_unpack_trees_porcelain(&opt->unpack_opts, "merge");
+ opt->priv->unpack_opts.update = 1;
+ opt->priv->unpack_opts.merge = 1;
+ opt->priv->unpack_opts.head_idx = 2;
+ opt->priv->unpack_opts.fn = threeway_merge;
+ opt->priv->unpack_opts.src_index = opt->repo->index;
+ opt->priv->unpack_opts.dst_index = &tmp_index;
+ opt->priv->unpack_opts.aggressive = !merge_detect_rename(opt);
+ setup_unpack_trees_porcelain(&opt->priv->unpack_opts, "merge");
init_tree_desc_from_tree(t+0, common);
init_tree_desc_from_tree(t+1, head);
init_tree_desc_from_tree(t+2, merge);
- rc = unpack_trees(3, t, &opt->unpack_opts);
+ rc = unpack_trees(3, t, &opt->priv->unpack_opts);
cache_tree_free(&opt->repo->index->cache_tree);
/*
- * Update opt->repo->index to match the new results, AFTER saving a copy
- * in opt->orig_index. Update src_index to point to the saved copy.
- * (verify_uptodate() checks src_index, and the original index is
- * the one that had the necessary modification timestamps.)
+ * Update opt->repo->index to match the new results, AFTER saving a
+ * copy in opt->priv->orig_index. Update src_index to point to the
+ * saved copy. (verify_uptodate() checks src_index, and the original
+ * index is the one that had the necessary modification timestamps.)
*/
- opt->orig_index = *opt->repo->index;
+ opt->priv->orig_index = *opt->repo->index;
*opt->repo->index = tmp_index;
- opt->unpack_opts.src_index = &opt->orig_index;
+ opt->priv->unpack_opts.src_index = &opt->priv->orig_index;
return rc;
}
static void unpack_trees_finish(struct merge_options *opt)
{
- discard_index(&opt->orig_index);
- clear_unpack_trees_porcelain(&opt->unpack_opts);
-}
-
-struct tree *write_tree_from_memory(struct merge_options *opt)
-{
- struct tree *result = NULL;
- struct index_state *istate = opt->repo->index;
-
- if (unmerged_index(istate)) {
- int i;
- fprintf(stderr, "BUG: There are unmerged index entries:\n");
- for (i = 0; i < istate->cache_nr; i++) {
- const struct cache_entry *ce = istate->cache[i];
- if (ce_stage(ce))
- fprintf(stderr, "BUG: %d %.*s\n", ce_stage(ce),
- (int)ce_namelen(ce), ce->name);
- }
- BUG("unmerged index entries in merge-recursive.c");
- }
-
- if (!istate->cache_tree)
- istate->cache_tree = cache_tree();
-
- if (!cache_tree_fully_valid(istate->cache_tree) &&
- cache_tree_update(istate, 0) < 0) {
- err(opt, _("error building trees"));
- return NULL;
- }
-
- result = lookup_tree(opt->repo, &istate->cache_tree->oid);
-
- return result;
+ discard_index(&opt->priv->orig_index);
+ clear_unpack_trees_porcelain(&opt->priv->unpack_opts);
}
static int save_files_dirs(const struct object_id *oid,
@@ -454,8 +473,8 @@ static int save_files_dirs(const struct object_id *oid,
strbuf_addstr(base, path);
FLEX_ALLOC_MEM(entry, path, base->buf, base->len);
- hashmap_entry_init(entry, path_hash(entry->path));
- hashmap_add(&opt->current_file_dir_set, entry);
+ hashmap_entry_init(&entry->e, path_hash(entry->path));
+ hashmap_add(&opt->priv->current_file_dir_set, &entry->e);
strbuf_setlen(base, baselen);
return (S_ISDIR(mode) ? READ_TREE_RECURSIVE : 0);
@@ -586,7 +605,7 @@ static void record_df_conflict_files(struct merge_options *opt,
* If we're merging merge-bases, we don't want to bother with
* any working directory changes.
*/
- if (opt->call_depth)
+ if (opt->priv->call_depth)
return;
/* Ensure D/F conflicts are adjacent in the entries list. */
@@ -598,7 +617,7 @@ static void record_df_conflict_files(struct merge_options *opt,
df_sorted_entries.cmp = string_list_df_name_compare;
string_list_sort(&df_sorted_entries);
- string_list_clear(&opt->df_conflict_file_set, 1);
+ string_list_clear(&opt->priv->df_conflict_file_set, 1);
for (i = 0; i < df_sorted_entries.nr; i++) {
const char *path = df_sorted_entries.items[i].string;
int len = strlen(path);
@@ -614,7 +633,7 @@ static void record_df_conflict_files(struct merge_options *opt,
len > last_len &&
memcmp(path, last_file, last_len) == 0 &&
path[last_len] == '/') {
- string_list_insert(&opt->df_conflict_file_set, last_file);
+ string_list_insert(&opt->priv->df_conflict_file_set, last_file);
}
/*
@@ -681,8 +700,8 @@ static void update_entry(struct stage_data *entry,
static int remove_file(struct merge_options *opt, int clean,
const char *path, int no_wd)
{
- int update_cache = opt->call_depth || clean;
- int update_working_directory = !opt->call_depth && !no_wd;
+ int update_cache = opt->priv->call_depth || clean;
+ int update_working_directory = !opt->priv->call_depth && !no_wd;
if (update_cache) {
if (remove_file_from_index(opt->repo->index, path))
@@ -712,7 +731,9 @@ static void add_flattened_path(struct strbuf *out, const char *s)
out->buf[i] = '_';
}
-static char *unique_path(struct merge_options *opt, const char *path, const char *branch)
+static char *unique_path(struct merge_options *opt,
+ const char *path,
+ const char *branch)
{
struct path_hashmap_entry *entry;
struct strbuf newpath = STRBUF_INIT;
@@ -723,16 +744,16 @@ static char *unique_path(struct merge_options *opt, const char *path, const char
add_flattened_path(&newpath, branch);
base_len = newpath.len;
- while (hashmap_get_from_hash(&opt->current_file_dir_set,
+ while (hashmap_get_from_hash(&opt->priv->current_file_dir_set,
path_hash(newpath.buf), newpath.buf) ||
- (!opt->call_depth && file_exists(newpath.buf))) {
+ (!opt->priv->call_depth && file_exists(newpath.buf))) {
strbuf_setlen(&newpath, base_len);
strbuf_addf(&newpath, "_%d", suffix++);
}
FLEX_ALLOC_MEM(entry, path, newpath.buf, newpath.len);
- hashmap_entry_init(entry, path_hash(entry->path));
- hashmap_add(&opt->current_file_dir_set, entry);
+ hashmap_entry_init(&entry->e, path_hash(entry->path));
+ hashmap_add(&opt->priv->current_file_dir_set, &entry->e);
return strbuf_detach(&newpath, NULL);
}
@@ -764,7 +785,8 @@ static int dir_in_way(struct index_state *istate, const char *path,
strbuf_release(&dirpath);
return check_working_copy && !lstat(path, &st) && S_ISDIR(st.st_mode) &&
- !(empty_ok && is_empty_dir(path));
+ !(empty_ok && is_empty_dir(path)) &&
+ !has_symlink_leading_path(path, strlen(path));
}
/*
@@ -774,7 +796,7 @@ static int dir_in_way(struct index_state *istate, const char *path,
static int was_tracked_and_matches(struct merge_options *opt, const char *path,
const struct diff_filespec *blob)
{
- int pos = index_name_pos(&opt->orig_index, path, strlen(path));
+ int pos = index_name_pos(&opt->priv->orig_index, path, strlen(path));
struct cache_entry *ce;
if (0 > pos)
@@ -782,7 +804,7 @@ static int was_tracked_and_matches(struct merge_options *opt, const char *path,
return 0;
/* See if the file we were tracking before matches */
- ce = opt->orig_index.cache[pos];
+ ce = opt->priv->orig_index.cache[pos];
return (oid_eq(&ce->oid, &blob->oid) && ce->ce_mode == blob->mode);
}
@@ -791,7 +813,7 @@ static int was_tracked_and_matches(struct merge_options *opt, const char *path,
*/
static int was_tracked(struct merge_options *opt, const char *path)
{
- int pos = index_name_pos(&opt->orig_index, path, strlen(path));
+ int pos = index_name_pos(&opt->priv->orig_index, path, strlen(path));
if (0 <= pos)
/* we were tracking this path before the merge */
@@ -848,12 +870,12 @@ static int was_dirty(struct merge_options *opt, const char *path)
struct cache_entry *ce;
int dirty = 1;
- if (opt->call_depth || !was_tracked(opt, path))
+ if (opt->priv->call_depth || !was_tracked(opt, path))
return !dirty;
- ce = index_file_exists(opt->unpack_opts.src_index,
+ ce = index_file_exists(opt->priv->unpack_opts.src_index,
path, strlen(path), ignore_case);
- dirty = verify_uptodate(ce, &opt->unpack_opts) != 0;
+ dirty = verify_uptodate(ce, &opt->priv->unpack_opts) != 0;
return dirty;
}
@@ -863,8 +885,8 @@ static int make_room_for_path(struct merge_options *opt, const char *path)
const char *msg = _("failed to create path '%s'%s");
/* Unlink any D/F conflict files that are in the way */
- for (i = 0; i < opt->df_conflict_file_set.nr; i++) {
- const char *df_path = opt->df_conflict_file_set.items[i].string;
+ for (i = 0; i < opt->priv->df_conflict_file_set.nr; i++) {
+ const char *df_path = opt->priv->df_conflict_file_set.items[i].string;
size_t pathlen = strlen(path);
size_t df_pathlen = strlen(df_path);
if (df_pathlen < pathlen &&
@@ -874,7 +896,7 @@ static int make_room_for_path(struct merge_options *opt, const char *path)
_("Removing %s to make room for subdirectory\n"),
df_path);
unlink(df_path);
- unsorted_string_list_delete_item(&opt->df_conflict_file_set,
+ unsorted_string_list_delete_item(&opt->priv->df_conflict_file_set,
i, 0);
break;
}
@@ -915,7 +937,7 @@ static int update_file_flags(struct merge_options *opt,
{
int ret = 0;
- if (opt->call_depth)
+ if (opt->priv->call_depth)
update_wd = 0;
if (update_wd) {
@@ -934,9 +956,11 @@ static int update_file_flags(struct merge_options *opt,
}
buf = read_object_file(&contents->oid, &type, &size);
- if (!buf)
- return err(opt, _("cannot read object %s '%s'"),
- oid_to_hex(&contents->oid), path);
+ if (!buf) {
+ ret = err(opt, _("cannot read object %s '%s'"),
+ oid_to_hex(&contents->oid), path);
+ goto free_buf;
+ }
if (type != OBJ_BLOB) {
ret = err(opt, _("blob expected for %s '%s'"),
oid_to_hex(&contents->oid), path);
@@ -944,7 +968,8 @@ static int update_file_flags(struct merge_options *opt,
}
if (S_ISREG(contents->mode)) {
struct strbuf strbuf = STRBUF_INIT;
- if (convert_to_working_tree(opt->repo->index, path, buf, size, &strbuf)) {
+ if (convert_to_working_tree(opt->repo->index,
+ path, buf, size, &strbuf)) {
free(buf);
size = strbuf.len;
buf = strbuf_detach(&strbuf, NULL);
@@ -997,7 +1022,7 @@ static int update_file(struct merge_options *opt,
const char *path)
{
return update_file_flags(opt, contents, path,
- opt->call_depth || clean, !opt->call_depth);
+ opt->priv->call_depth || clean, !opt->priv->call_depth);
}
/* Low level file merging, update and removal */
@@ -1019,22 +1044,22 @@ static int merge_3way(struct merge_options *opt,
{
mmfile_t orig, src1, src2;
struct ll_merge_options ll_opts = {0};
- char *base_name, *name1, *name2;
+ char *base, *name1, *name2;
int merge_status;
ll_opts.renormalize = opt->renormalize;
ll_opts.extra_marker_size = extra_marker_size;
ll_opts.xdl_opts = opt->xdl_opts;
- if (opt->call_depth) {
+ if (opt->priv->call_depth) {
ll_opts.virtual_ancestor = 1;
ll_opts.variant = 0;
} else {
switch (opt->recursive_variant) {
- case MERGE_RECURSIVE_OURS:
+ case MERGE_VARIANT_OURS:
ll_opts.variant = XDL_MERGE_FAVOR_OURS;
break;
- case MERGE_RECURSIVE_THEIRS:
+ case MERGE_VARIANT_THEIRS:
ll_opts.variant = XDL_MERGE_FAVOR_THEIRS;
break;
default:
@@ -1043,16 +1068,13 @@ static int merge_3way(struct merge_options *opt,
}
}
- assert(a->path && b->path);
- if (strcmp(a->path, b->path) ||
- (opt->ancestor != NULL && strcmp(a->path, o->path) != 0)) {
- base_name = opt->ancestor == NULL ? NULL :
- mkpathdup("%s:%s", opt->ancestor, o->path);
+ assert(a->path && b->path && o->path && opt->ancestor);
+ if (strcmp(a->path, b->path) || strcmp(a->path, o->path) != 0) {
+ base = mkpathdup("%s:%s", opt->ancestor, o->path);
name1 = mkpathdup("%s:%s", branch1, a->path);
name2 = mkpathdup("%s:%s", branch2, b->path);
} else {
- base_name = opt->ancestor == NULL ? NULL :
- mkpathdup("%s", opt->ancestor);
+ base = mkpathdup("%s", opt->ancestor);
name1 = mkpathdup("%s", branch1);
name2 = mkpathdup("%s", branch2);
}
@@ -1061,11 +1083,11 @@ static int merge_3way(struct merge_options *opt,
read_mmblob(&src1, &a->oid);
read_mmblob(&src2, &b->oid);
- merge_status = ll_merge(result_buf, a->path, &orig, base_name,
+ merge_status = ll_merge(result_buf, a->path, &orig, base,
&src1, name1, &src2, name2,
opt->repo->index, &ll_opts);
- free(base_name);
+ free(base);
free(name1);
free(name2);
free(orig.ptr);
@@ -1160,7 +1182,7 @@ static int merge_submodule(struct merge_options *opt,
struct object_array merges;
int i;
- int search = !opt->call_depth;
+ int search = !opt->priv->call_depth;
/* store a in result in case we fail */
oidcpy(result, a);
@@ -1344,15 +1366,15 @@ static int merge_mode_and_contents(struct merge_options *opt,
&b->oid);
} else if (S_ISLNK(a->mode)) {
switch (opt->recursive_variant) {
- case MERGE_RECURSIVE_NORMAL:
+ case MERGE_VARIANT_NORMAL:
oidcpy(&result->blob.oid, &a->oid);
if (!oid_eq(&a->oid, &b->oid))
result->clean = 0;
break;
- case MERGE_RECURSIVE_OURS:
+ case MERGE_VARIANT_OURS:
oidcpy(&result->blob.oid, &a->oid);
break;
- case MERGE_RECURSIVE_THEIRS:
+ case MERGE_VARIANT_THEIRS:
oidcpy(&result->blob.oid, &b->oid);
break;
}
@@ -1378,10 +1400,11 @@ static int handle_rename_via_dir(struct merge_options *opt,
const struct rename *ren = ci->ren1;
const struct diff_filespec *dest = ren->pair->two;
char *file_path = dest->path;
- int mark_conflicted = (opt->detect_directory_renames == 1);
+ int mark_conflicted = (opt->detect_directory_renames ==
+ MERGE_DIRECTORY_RENAMES_CONFLICT);
assert(ren->dir_rename_original_dest);
- if (!opt->call_depth && would_lose_untracked(opt, dest->path)) {
+ if (!opt->priv->call_depth && would_lose_untracked(opt, dest->path)) {
mark_conflicted = 1;
file_path = unique_path(opt, dest->path, ren->branch);
output(opt, 1, _("Error: Refusing to lose untracked file at %s; "
@@ -1424,12 +1447,12 @@ static int handle_change_delete(struct merge_options *opt,
const char *update_path = path;
int ret = 0;
- if (dir_in_way(opt->repo->index, path, !opt->call_depth, 0) ||
- (!opt->call_depth && would_lose_untracked(opt, path))) {
+ if (dir_in_way(opt->repo->index, path, !opt->priv->call_depth, 0) ||
+ (!opt->priv->call_depth && would_lose_untracked(opt, path))) {
update_path = alt_path = unique_path(opt, path, change_branch);
}
- if (opt->call_depth) {
+ if (opt->priv->call_depth) {
/*
* We cannot arbitrarily accept either a_sha or b_sha as
* correct; since there is no true "middle point" between
@@ -1504,14 +1527,14 @@ static int handle_rename_delete(struct merge_options *opt,
opt->branch2 : opt->branch1);
if (handle_change_delete(opt,
- opt->call_depth ? orig->path : dest->path,
- opt->call_depth ? NULL : orig->path,
+ opt->priv->call_depth ? orig->path : dest->path,
+ opt->priv->call_depth ? NULL : orig->path,
orig, dest,
rename_branch, delete_branch,
_("rename"), _("renamed")))
return -1;
- if (opt->call_depth)
+ if (opt->priv->call_depth)
return remove_file_from_index(opt->repo->index, dest->path);
else
return update_stages(opt, dest->path, NULL,
@@ -1548,7 +1571,7 @@ static int handle_file_collision(struct merge_options *opt,
/*
* In the recursive case, we just opt to undo renames
*/
- if (opt->call_depth && (prev_path1 || prev_path2)) {
+ if (opt->priv->call_depth && (prev_path1 || prev_path2)) {
/* Put first file (a->oid, a->mode) in its original spot */
if (prev_path1) {
if (update_file(opt, 1, a, prev_path1))
@@ -1577,10 +1600,10 @@ static int handle_file_collision(struct merge_options *opt,
/* Remove rename sources if rename/add or rename/rename(2to1) */
if (prev_path1)
remove_file(opt, 1, prev_path1,
- opt->call_depth || would_lose_untracked(opt, prev_path1));
+ opt->priv->call_depth || would_lose_untracked(opt, prev_path1));
if (prev_path2)
remove_file(opt, 1, prev_path2,
- opt->call_depth || would_lose_untracked(opt, prev_path2));
+ opt->priv->call_depth || would_lose_untracked(opt, prev_path2));
/*
* Remove the collision path, if it wouldn't cause dirty contents
@@ -1622,12 +1645,12 @@ static int handle_file_collision(struct merge_options *opt,
null.mode = 0;
if (merge_mode_and_contents(opt, &null, a, b, collide_path,
- branch1, branch2, opt->call_depth * 2, &mfi))
+ branch1, branch2, opt->priv->call_depth * 2, &mfi))
return -1;
mfi.clean &= !alt_path;
if (update_file(opt, mfi.clean, &mfi.blob, update_path))
return -1;
- if (!mfi.clean && !opt->call_depth &&
+ if (!mfi.clean && !opt->priv->call_depth &&
update_stages(opt, collide_path, NULL, a, b))
return -1;
free(alt_path);
@@ -1667,7 +1690,7 @@ static int handle_rename_add(struct merge_options *opt,
&ci->ren1->src_entry->stages[other_stage],
prev_path_desc,
opt->branch1, opt->branch2,
- 1 + opt->call_depth * 2, &mfi))
+ 1 + opt->priv->call_depth * 2, &mfi))
return -1;
free(prev_path_desc);
@@ -1685,7 +1708,7 @@ static char *find_path_for_conflict(struct merge_options *opt,
const char *branch2)
{
char *new_path = NULL;
- if (dir_in_way(opt->repo->index, path, !opt->call_depth, 0)) {
+ if (dir_in_way(opt->repo->index, path, !opt->priv->call_depth, 0)) {
new_path = unique_path(opt, path, branch1);
output(opt, 1, _("%s is a directory in %s adding "
"as %s instead"),
@@ -1716,17 +1739,17 @@ static int handle_rename_rename_1to2(struct merge_options *opt,
"rename \"%s\"->\"%s\" in \"%s\"%s"),
o->path, a->path, ci->ren1->branch,
o->path, b->path, ci->ren2->branch,
- opt->call_depth ? _(" (left unresolved)") : "");
+ opt->priv->call_depth ? _(" (left unresolved)") : "");
path_desc = xstrfmt("%s and %s, both renamed from %s",
a->path, b->path, o->path);
if (merge_mode_and_contents(opt, o, a, b, path_desc,
ci->ren1->branch, ci->ren2->branch,
- opt->call_depth * 2, &mfi))
+ opt->priv->call_depth * 2, &mfi))
return -1;
free(path_desc);
- if (opt->call_depth) {
+ if (opt->priv->call_depth) {
/*
* FIXME: For rename/add-source conflicts (if we could detect
* such), this is wrong. We should instead find a unique
@@ -1841,12 +1864,12 @@ static int handle_rename_rename_2to1(struct merge_options *opt,
&ci->ren1->src_entry->stages[ostage1],
path_side_1_desc,
opt->branch1, opt->branch2,
- 1 + opt->call_depth * 2, &mfi_c1) ||
+ 1 + opt->priv->call_depth * 2, &mfi_c1) ||
merge_mode_and_contents(opt, b,
&ci->ren2->src_entry->stages[ostage2],
c2, path_side_2_desc,
opt->branch1, opt->branch2,
- 1 + opt->call_depth * 2, &mfi_c2))
+ 1 + opt->priv->call_depth * 2, &mfi_c2))
return -1;
free(path_side_1_desc);
free(path_side_2_desc);
@@ -1880,17 +1903,15 @@ static struct diff_queue_struct *get_diffpairs(struct merge_options *opt,
*/
if (opts.detect_rename > DIFF_DETECT_RENAME)
opts.detect_rename = DIFF_DETECT_RENAME;
- opts.rename_limit = opt->merge_rename_limit >= 0 ? opt->merge_rename_limit :
- opt->diff_rename_limit >= 0 ? opt->diff_rename_limit :
- 1000;
+ opts.rename_limit = (opt->rename_limit >= 0) ? opt->rename_limit : 1000;
opts.rename_score = opt->rename_score;
opts.show_rename_progress = opt->show_rename_progress;
opts.output_format = DIFF_FORMAT_NO_OUTPUT;
diff_setup_done(&opts);
diff_tree_oid(&o_tree->object.oid, &tree->object.oid, "", &opts);
diffcore_std(&opts);
- if (opts.needed_rename_limit > opt->needed_rename_limit)
- opt->needed_rename_limit = opts.needed_rename_limit;
+ if (opts.needed_rename_limit > opt->priv->needed_rename_limit)
+ opt->priv->needed_rename_limit = opts.needed_rename_limit;
ret = xmalloc(sizeof(*ret));
*ret = diff_queued_diff;
@@ -2001,7 +2022,7 @@ static void remove_hashmap_entries(struct hashmap *dir_renames,
for (i = 0; i < items_to_remove->nr; i++) {
entry = items_to_remove->items[i].util;
- hashmap_remove(dir_renames, entry, NULL);
+ hashmap_remove(dir_renames, &entry->ent, NULL);
}
string_list_clear(items_to_remove, 0);
}
@@ -2124,8 +2145,8 @@ static void handle_directory_level_conflicts(struct merge_options *opt,
struct string_list remove_from_head = STRING_LIST_INIT_NODUP;
struct string_list remove_from_merge = STRING_LIST_INIT_NODUP;
- hashmap_iter_init(dir_re_head, &iter);
- while ((head_ent = hashmap_iter_next(&iter))) {
+ hashmap_for_each_entry(dir_re_head, &iter, head_ent,
+ ent /* member name */) {
merge_ent = dir_rename_find_entry(dir_re_merge, head_ent->dir);
if (merge_ent &&
!head_ent->non_unique_new_dir &&
@@ -2149,8 +2170,8 @@ static void handle_directory_level_conflicts(struct merge_options *opt,
remove_hashmap_entries(dir_re_head, &remove_from_head);
remove_hashmap_entries(dir_re_merge, &remove_from_merge);
- hashmap_iter_init(dir_re_merge, &iter);
- while ((merge_ent = hashmap_iter_next(&iter))) {
+ hashmap_for_each_entry(dir_re_merge, &iter, merge_ent,
+ ent /* member name */) {
head_ent = dir_rename_find_entry(dir_re_head, merge_ent->dir);
if (tree_has_path(opt->repo, merge, merge_ent->dir)) {
/* 2. This wasn't a directory rename after all */
@@ -2229,7 +2250,7 @@ static struct hashmap *get_directory_renames(struct diff_queue_struct *pairs)
if (!entry) {
entry = xmalloc(sizeof(*entry));
dir_rename_entry_init(entry, old_dir);
- hashmap_put(dir_renames, entry);
+ hashmap_put(dir_renames, &entry->ent);
} else {
free(old_dir);
}
@@ -2254,8 +2275,8 @@ static struct hashmap *get_directory_renames(struct diff_queue_struct *pairs)
* we set non_unique_new_dir. Once we've determined the winner (or
* that there is no winner), we no longer need possible_new_dirs.
*/
- hashmap_iter_init(dir_renames, &iter);
- while ((entry = hashmap_iter_next(&iter))) {
+ hashmap_for_each_entry(dir_renames, &iter, entry,
+ ent /* member name */) {
int max = 0;
int bad_max = 0;
char *best = NULL;
@@ -2358,8 +2379,9 @@ static void compute_collisions(struct hashmap *collisions,
if (!collision_ent) {
collision_ent = xcalloc(1,
sizeof(struct collision_entry));
- hashmap_entry_init(collision_ent, strhash(new_path));
- hashmap_put(collisions, collision_ent);
+ hashmap_entry_init(&collision_ent->ent,
+ strhash(new_path));
+ hashmap_put(collisions, &collision_ent->ent);
collision_ent->target_file = new_path;
} else {
free(new_path);
@@ -2612,12 +2634,12 @@ static struct string_list *get_renames(struct merge_options *opt,
entries);
}
- hashmap_iter_init(&collisions, &iter);
- while ((e = hashmap_iter_next(&iter))) {
+ hashmap_for_each_entry(&collisions, &iter, e,
+ ent /* member name */) {
free(e->target_file);
string_list_clear(&e->source_files, 0);
}
- hashmap_free(&collisions, 1);
+ hashmap_free_entries(&collisions, struct collision_entry, ent);
return renames;
}
@@ -2830,13 +2852,13 @@ static void initial_cleanup_rename(struct diff_queue_struct *pairs,
struct hashmap_iter iter;
struct dir_rename_entry *e;
- hashmap_iter_init(dir_renames, &iter);
- while ((e = hashmap_iter_next(&iter))) {
+ hashmap_for_each_entry(dir_renames, &iter, e,
+ ent /* member name */) {
free(e->dir);
strbuf_release(&e->new_dir);
/* possible_new_dirs already cleared in get_directory_renames */
}
- hashmap_free(dir_renames, 1);
+ hashmap_free_entries(dir_renames, struct dir_rename_entry, ent);
free(dir_renames);
free(pairs->queue);
@@ -2863,8 +2885,9 @@ static int detect_and_process_renames(struct merge_options *opt,
head_pairs = get_diffpairs(opt, common, head);
merge_pairs = get_diffpairs(opt, common, merge);
- if ((opt->detect_directory_renames == 2) ||
- (opt->detect_directory_renames == 1 && !opt->call_depth)) {
+ if ((opt->detect_directory_renames == MERGE_DIRECTORY_RENAMES_TRUE) ||
+ (opt->detect_directory_renames == MERGE_DIRECTORY_RENAMES_CONFLICT &&
+ !opt->priv->call_depth)) {
dir_re_head = get_directory_renames(head_pairs);
dir_re_merge = get_directory_renames(merge_pairs);
@@ -3021,13 +3044,13 @@ static int handle_content_merge(struct merge_file_info *mfi,
reason = _("add/add");
assert(o->path && a->path && b->path);
- if (ci && dir_in_way(opt->repo->index, path, !opt->call_depth,
+ if (ci && dir_in_way(opt->repo->index, path, !opt->priv->call_depth,
S_ISGITLINK(ci->ren1->pair->two->mode)))
df_conflict_remains = 1;
if (merge_mode_and_contents(opt, o, a, b, path,
opt->branch1, opt->branch2,
- opt->call_depth * 2, mfi))
+ opt->priv->call_depth * 2, mfi))
return -1;
/*
@@ -3043,7 +3066,7 @@ static int handle_content_merge(struct merge_file_info *mfi,
output(opt, 3, _("Skipped %s (merged same as existing)"), path);
if (add_cacheinfo(opt, &mfi->blob, path,
- 0, (!opt->call_depth && !is_dirty), 0))
+ 0, (!opt->priv->call_depth && !is_dirty), 0))
return -1;
/*
* However, add_cacheinfo() will delete the old cache entry
@@ -3051,8 +3074,8 @@ static int handle_content_merge(struct merge_file_info *mfi,
* flag to avoid making the file appear as if it were
* deleted by the user.
*/
- pos = index_name_pos(&opt->orig_index, path, strlen(path));
- ce = opt->orig_index.cache[pos];
+ pos = index_name_pos(&opt->priv->orig_index, path, strlen(path));
+ ce = opt->priv->orig_index.cache[pos];
if (ce_skip_worktree(ce)) {
pos = index_name_pos(opt->repo->index, path, strlen(path));
ce = opt->repo->index->cache[pos];
@@ -3073,7 +3096,7 @@ static int handle_content_merge(struct merge_file_info *mfi,
if (df_conflict_remains || is_dirty) {
char *new_path;
- if (opt->call_depth) {
+ if (opt->priv->call_depth) {
remove_file_from_index(opt->repo->index, path);
} else {
if (!mfi->clean) {
@@ -3122,7 +3145,8 @@ static int handle_rename_normal(struct merge_options *opt,
clean = handle_content_merge(&mfi, opt, path, was_dirty(opt, path),
o, a, b, ci);
- if (clean && opt->detect_directory_renames == 1 &&
+ if (clean &&
+ opt->detect_directory_renames == MERGE_DIRECTORY_RENAMES_CONFLICT &&
ren->dir_rename_original_dest) {
if (update_stages(opt, path,
NULL,
@@ -3167,12 +3191,12 @@ static int warn_about_dir_renamed_entries(struct merge_options *opt,
return clean;
/* Sanity checks */
- assert(opt->detect_directory_renames > 0);
+ assert(opt->detect_directory_renames > MERGE_DIRECTORY_RENAMES_NONE);
assert(ren->dir_rename_original_type == 'A' ||
ren->dir_rename_original_type == 'R');
/* Check whether to treat directory renames as a conflict */
- clean = (opt->detect_directory_renames == 2);
+ clean = (opt->detect_directory_renames == MERGE_DIRECTORY_RENAMES_TRUE);
is_add = (ren->dir_rename_original_type == 'A');
if (ren->dir_rename_original_type == 'A' && clean) {
@@ -3331,7 +3355,7 @@ static int process_entry(struct merge_options *opt,
conf = _("directory/file");
}
if (dir_in_way(opt->repo->index, path,
- !opt->call_depth && !S_ISGITLINK(a->mode),
+ !opt->priv->call_depth && !S_ISGITLINK(a->mode),
0)) {
char *new_path = unique_path(opt, path, add_branch);
clean_merge = 0;
@@ -3340,7 +3364,7 @@ static int process_entry(struct merge_options *opt,
conf, path, other_branch, path, new_path);
if (update_file(opt, 0, contents, new_path))
clean_merge = -1;
- else if (opt->call_depth)
+ else if (opt->priv->call_depth)
remove_file_from_index(opt->repo->index, path);
free(new_path);
} else {
@@ -3380,37 +3404,32 @@ static int process_entry(struct merge_options *opt,
return clean_merge;
}
-int merge_trees(struct merge_options *opt,
- struct tree *head,
- struct tree *merge,
- struct tree *common,
- struct tree **result)
+static int merge_trees_internal(struct merge_options *opt,
+ struct tree *head,
+ struct tree *merge,
+ struct tree *merge_base,
+ struct tree **result)
{
struct index_state *istate = opt->repo->index;
int code, clean;
- struct strbuf sb = STRBUF_INIT;
-
- if (!opt->call_depth && repo_index_has_changes(opt->repo, head, &sb)) {
- err(opt, _("Your local changes to the following files would be overwritten by merge:\n %s"),
- sb.buf);
- return -1;
- }
if (opt->subtree_shift) {
- merge = shift_tree_object(opt->repo, head, merge, opt->subtree_shift);
- common = shift_tree_object(opt->repo, head, common, opt->subtree_shift);
+ merge = shift_tree_object(opt->repo, head, merge,
+ opt->subtree_shift);
+ merge_base = shift_tree_object(opt->repo, head, merge_base,
+ opt->subtree_shift);
}
- if (oid_eq(&common->object.oid, &merge->object.oid)) {
+ if (oid_eq(&merge_base->object.oid, &merge->object.oid)) {
output(opt, 0, _("Already up to date!"));
*result = head;
return 1;
}
- code = unpack_trees_start(opt, common, head, merge);
+ code = unpack_trees_start(opt, merge_base, head, merge);
if (code != 0) {
- if (show(opt, 4) || opt->call_depth)
+ if (show(opt, 4) || opt->priv->call_depth)
err(opt, _("merging of trees %s and %s failed"),
oid_to_hex(&head->object.oid),
oid_to_hex(&merge->object.oid));
@@ -3429,12 +3448,13 @@ int merge_trees(struct merge_options *opt,
* opposed to decaring a local hashmap is for convenience
* so that we don't have to pass it to around.
*/
- hashmap_init(&opt->current_file_dir_set, path_hashmap_cmp, NULL, 512);
+ hashmap_init(&opt->priv->current_file_dir_set, path_hashmap_cmp,
+ NULL, 512);
get_files_dirs(opt, head);
get_files_dirs(opt, merge);
entries = get_unmerged(opt->repo->index);
- clean = detect_and_process_renames(opt, common, head, merge,
+ clean = detect_and_process_renames(opt, merge_base, head, merge,
entries, &re_info);
record_df_conflict_files(opt, entries);
if (clean < 0)
@@ -3465,7 +3485,8 @@ int merge_trees(struct merge_options *opt,
string_list_clear(entries, 1);
free(entries);
- hashmap_free(&opt->current_file_dir_set, 1);
+ hashmap_free_entries(&opt->priv->current_file_dir_set,
+ struct path_hashmap_entry, e);
if (clean < 0) {
unpack_trees_finish(opt);
@@ -3477,7 +3498,8 @@ int merge_trees(struct merge_options *opt,
unpack_trees_finish(opt);
- if (opt->call_depth && !(*result = write_tree_from_memory(opt)))
+ if (opt->priv->call_depth &&
+ !(*result = write_in_core_index_as_tree(opt->repo)))
return -1;
return clean;
@@ -3498,16 +3520,18 @@ static struct commit_list *reverse_commit_list(struct commit_list *list)
* Merge the commits h1 and h2, return the resulting virtual
* commit object and a flag indicating the cleanness of the merge.
*/
-int merge_recursive(struct merge_options *opt,
- struct commit *h1,
- struct commit *h2,
- struct commit_list *ca,
- struct commit **result)
+static int merge_recursive_internal(struct merge_options *opt,
+ struct commit *h1,
+ struct commit *h2,
+ struct commit_list *merge_bases,
+ struct commit **result)
{
struct commit_list *iter;
- struct commit *merged_common_ancestors;
- struct tree *mrtree;
+ struct commit *merged_merge_bases;
+ struct tree *result_tree;
int clean;
+ const char *ancestor_name;
+ struct strbuf merge_base_abbrev = STRBUF_INIT;
if (show(opt, 4)) {
output(opt, 4, _("Merging:"));
@@ -3515,32 +3539,43 @@ int merge_recursive(struct merge_options *opt,
output_commit_title(opt, h2);
}
- if (!ca) {
- ca = get_merge_bases(h1, h2);
- ca = reverse_commit_list(ca);
+ if (!merge_bases) {
+ merge_bases = get_merge_bases(h1, h2);
+ merge_bases = reverse_commit_list(merge_bases);
}
if (show(opt, 5)) {
- unsigned cnt = commit_list_count(ca);
+ unsigned cnt = commit_list_count(merge_bases);
output(opt, 5, Q_("found %u common ancestor:",
"found %u common ancestors:", cnt), cnt);
- for (iter = ca; iter; iter = iter->next)
+ for (iter = merge_bases; iter; iter = iter->next)
output_commit_title(opt, iter->item);
}
- merged_common_ancestors = pop_commit(&ca);
- if (merged_common_ancestors == NULL) {
+ merged_merge_bases = pop_commit(&merge_bases);
+ if (merged_merge_bases == NULL) {
/* if there is no common ancestor, use an empty tree */
struct tree *tree;
tree = lookup_tree(opt->repo, opt->repo->hash_algo->empty_tree);
- merged_common_ancestors = make_virtual_commit(opt->repo, tree, "ancestor");
+ merged_merge_bases = make_virtual_commit(opt->repo, tree,
+ "ancestor");
+ ancestor_name = "empty tree";
+ } else if (opt->ancestor && !opt->priv->call_depth) {
+ ancestor_name = opt->ancestor;
+ } else if (merge_bases) {
+ ancestor_name = "merged common ancestors";
+ } else {
+ strbuf_add_unique_abbrev(&merge_base_abbrev,
+ &merged_merge_bases->object.oid,
+ DEFAULT_ABBREV);
+ ancestor_name = merge_base_abbrev.buf;
}
- for (iter = ca; iter; iter = iter->next) {
+ for (iter = merge_bases; iter; iter = iter->next) {
const char *saved_b1, *saved_b2;
- opt->call_depth++;
+ opt->priv->call_depth++;
/*
* When the merge fails, the result contains files
* with conflict markers. The cleanness flag is
@@ -3554,45 +3589,134 @@ int merge_recursive(struct merge_options *opt,
saved_b2 = opt->branch2;
opt->branch1 = "Temporary merge branch 1";
opt->branch2 = "Temporary merge branch 2";
- if (merge_recursive(opt, merged_common_ancestors, iter->item,
- NULL, &merged_common_ancestors) < 0)
+ if (merge_recursive_internal(opt, merged_merge_bases, iter->item,
+ NULL, &merged_merge_bases) < 0)
return -1;
opt->branch1 = saved_b1;
opt->branch2 = saved_b2;
- opt->call_depth--;
+ opt->priv->call_depth--;
- if (!merged_common_ancestors)
+ if (!merged_merge_bases)
return err(opt, _("merge returned no commit"));
}
discard_index(opt->repo->index);
- if (!opt->call_depth)
+ if (!opt->priv->call_depth)
repo_read_index(opt->repo);
- opt->ancestor = "merged common ancestors";
- clean = merge_trees(opt, get_commit_tree(h1), get_commit_tree(h2),
- get_commit_tree(merged_common_ancestors),
- &mrtree);
+ opt->ancestor = ancestor_name;
+ clean = merge_trees_internal(opt,
+ repo_get_commit_tree(opt->repo, h1),
+ repo_get_commit_tree(opt->repo, h2),
+ repo_get_commit_tree(opt->repo,
+ merged_merge_bases),
+ &result_tree);
+ strbuf_release(&merge_base_abbrev);
+ opt->ancestor = NULL; /* avoid accidental re-use of opt->ancestor */
if (clean < 0) {
flush_output(opt);
return clean;
}
- if (opt->call_depth) {
- *result = make_virtual_commit(opt->repo, mrtree, "merged tree");
+ if (opt->priv->call_depth) {
+ *result = make_virtual_commit(opt->repo, result_tree,
+ "merged tree");
commit_list_insert(h1, &(*result)->parents);
commit_list_insert(h2, &(*result)->parents->next);
}
+ return clean;
+}
+
+static int merge_start(struct merge_options *opt, struct tree *head)
+{
+ struct strbuf sb = STRBUF_INIT;
+
+ /* Sanity checks on opt */
+ assert(opt->repo);
+
+ assert(opt->branch1 && opt->branch2);
+
+ assert(opt->detect_renames >= -1 &&
+ opt->detect_renames <= DIFF_DETECT_COPY);
+ assert(opt->detect_directory_renames >= MERGE_DIRECTORY_RENAMES_NONE &&
+ opt->detect_directory_renames <= MERGE_DIRECTORY_RENAMES_TRUE);
+ assert(opt->rename_limit >= -1);
+ assert(opt->rename_score >= 0 && opt->rename_score <= MAX_SCORE);
+ assert(opt->show_rename_progress >= 0 && opt->show_rename_progress <= 1);
+
+ assert(opt->xdl_opts >= 0);
+ assert(opt->recursive_variant >= MERGE_VARIANT_NORMAL &&
+ opt->recursive_variant <= MERGE_VARIANT_THEIRS);
+
+ assert(opt->verbosity >= 0 && opt->verbosity <= 5);
+ assert(opt->buffer_output <= 2);
+ assert(opt->obuf.len == 0);
+
+ assert(opt->priv == NULL);
+
+ /* Sanity check on repo state; index must match head */
+ if (repo_index_has_changes(opt->repo, head, &sb)) {
+ err(opt, _("Your local changes to the following files would be overwritten by merge:\n %s"),
+ sb.buf);
+ strbuf_release(&sb);
+ return -1;
+ }
+
+ opt->priv = xcalloc(1, sizeof(*opt->priv));
+ string_list_init(&opt->priv->df_conflict_file_set, 1);
+ return 0;
+}
+
+static void merge_finalize(struct merge_options *opt)
+{
flush_output(opt);
- if (!opt->call_depth && opt->buffer_output < 2)
+ if (!opt->priv->call_depth && opt->buffer_output < 2)
strbuf_release(&opt->obuf);
if (show(opt, 2))
diff_warn_rename_limit("merge.renamelimit",
- opt->needed_rename_limit, 0);
+ opt->priv->needed_rename_limit, 0);
+ FREE_AND_NULL(opt->priv);
+}
+
+int merge_trees(struct merge_options *opt,
+ struct tree *head,
+ struct tree *merge,
+ struct tree *merge_base)
+{
+ int clean;
+ struct tree *ignored;
+
+ assert(opt->ancestor != NULL);
+
+ if (merge_start(opt, head))
+ return -1;
+ clean = merge_trees_internal(opt, head, merge, merge_base, &ignored);
+ merge_finalize(opt);
+
return clean;
}
-static struct commit *get_ref(struct repository *repo, const struct object_id *oid,
+int merge_recursive(struct merge_options *opt,
+ struct commit *h1,
+ struct commit *h2,
+ struct commit_list *merge_bases,
+ struct commit **result)
+{
+ int clean;
+
+ assert(opt->ancestor == NULL ||
+ !strcmp(opt->ancestor, "constructed merge base"));
+
+ if (merge_start(opt, repo_get_commit_tree(opt->repo, h1)))
+ return -1;
+ clean = merge_recursive_internal(opt, h1, h2, merge_bases, result);
+ merge_finalize(opt);
+
+ return clean;
+}
+
+static struct commit *get_ref(struct repository *repo,
+ const struct object_id *oid,
const char *name)
{
struct object *object;
@@ -3613,8 +3737,8 @@ static struct commit *get_ref(struct repository *repo, const struct object_id *o
int merge_recursive_generic(struct merge_options *opt,
const struct object_id *head,
const struct object_id *merge,
- int num_base_list,
- const struct object_id **base_list,
+ int num_merge_bases,
+ const struct object_id **merge_bases,
struct commit **result)
{
int clean;
@@ -3623,15 +3747,18 @@ int merge_recursive_generic(struct merge_options *opt,
struct commit *next_commit = get_ref(opt->repo, merge, opt->branch2);
struct commit_list *ca = NULL;
- if (base_list) {
+ if (merge_bases) {
int i;
- for (i = 0; i < num_base_list; ++i) {
+ for (i = 0; i < num_merge_bases; ++i) {
struct commit *base;
- if (!(base = get_ref(opt->repo, base_list[i], oid_to_hex(base_list[i]))))
+ if (!(base = get_ref(opt->repo, merge_bases[i],
+ oid_to_hex(merge_bases[i]))))
return err(opt, _("Could not parse object '%s'"),
- oid_to_hex(base_list[i]));
+ oid_to_hex(merge_bases[i]));
commit_list_insert(base, &ca);
}
+ if (num_merge_bases == 1)
+ opt->ancestor = "constructed merge base";
}
repo_hold_locked_index(opt->repo, &lock, LOCK_DIE_ON_ERROR);
@@ -3653,22 +3780,25 @@ static void merge_recursive_config(struct merge_options *opt)
{
char *value = NULL;
git_config_get_int("merge.verbosity", &opt->verbosity);
- git_config_get_int("diff.renamelimit", &opt->diff_rename_limit);
- git_config_get_int("merge.renamelimit", &opt->merge_rename_limit);
+ git_config_get_int("diff.renamelimit", &opt->rename_limit);
+ git_config_get_int("merge.renamelimit", &opt->rename_limit);
if (!git_config_get_string("diff.renames", &value)) {
- opt->diff_detect_rename = git_config_rename("diff.renames", value);
+ opt->detect_renames = git_config_rename("diff.renames", value);
free(value);
}
if (!git_config_get_string("merge.renames", &value)) {
- opt->merge_detect_rename = git_config_rename("merge.renames", value);
+ opt->detect_renames = git_config_rename("merge.renames", value);
free(value);
}
if (!git_config_get_string("merge.directoryrenames", &value)) {
int boolval = git_parse_maybe_bool(value);
if (0 <= boolval) {
- opt->detect_directory_renames = boolval ? 2 : 0;
+ opt->detect_directory_renames = boolval ?
+ MERGE_DIRECTORY_RENAMES_TRUE :
+ MERGE_DIRECTORY_RENAMES_NONE;
} else if (!strcasecmp(value, "conflict")) {
- opt->detect_directory_renames = 1;
+ opt->detect_directory_renames =
+ MERGE_DIRECTORY_RENAMES_CONFLICT;
} /* avoid erroring on values from future versions of git */
free(value);
}
@@ -3680,23 +3810,25 @@ void init_merge_options(struct merge_options *opt,
{
const char *merge_verbosity;
memset(opt, 0, sizeof(struct merge_options));
+
opt->repo = repo;
+
+ opt->detect_renames = -1;
+ opt->detect_directory_renames = MERGE_DIRECTORY_RENAMES_CONFLICT;
+ opt->rename_limit = -1;
+
opt->verbosity = 2;
opt->buffer_output = 1;
- opt->diff_rename_limit = -1;
- opt->merge_rename_limit = -1;
+ strbuf_init(&opt->obuf, 0);
+
opt->renormalize = 0;
- opt->diff_detect_rename = -1;
- opt->merge_detect_rename = -1;
- opt->detect_directory_renames = 1;
+
merge_recursive_config(opt);
merge_verbosity = getenv("GIT_MERGE_VERBOSITY");
if (merge_verbosity)
opt->verbosity = strtol(merge_verbosity, NULL, 10);
if (opt->verbosity >= 5)
opt->buffer_output = 0;
- strbuf_init(&opt->obuf, 0);
- string_list_init(&opt->df_conflict_file_set, 1);
}
int parse_merge_opt(struct merge_options *opt, const char *s)
@@ -3706,9 +3838,9 @@ int parse_merge_opt(struct merge_options *opt, const char *s)
if (!s || !*s)
return -1;
if (!strcmp(s, "ours"))
- opt->recursive_variant = MERGE_RECURSIVE_OURS;
+ opt->recursive_variant = MERGE_VARIANT_OURS;
else if (!strcmp(s, "theirs"))
- opt->recursive_variant = MERGE_RECURSIVE_THEIRS;
+ opt->recursive_variant = MERGE_VARIANT_THEIRS;
else if (!strcmp(s, "subtree"))
opt->subtree_shift = "";
else if (skip_prefix(s, "subtree=", &arg))
@@ -3739,16 +3871,16 @@ int parse_merge_opt(struct merge_options *opt, const char *s)
else if (!strcmp(s, "no-renormalize"))
opt->renormalize = 0;
else if (!strcmp(s, "no-renames"))
- opt->merge_detect_rename = 0;
+ opt->detect_renames = 0;
else if (!strcmp(s, "find-renames")) {
- opt->merge_detect_rename = 1;
+ opt->detect_renames = 1;
opt->rename_score = 0;
}
else if (skip_prefix(s, "find-renames=", &arg) ||
skip_prefix(s, "rename-threshold=", &arg)) {
if ((opt->rename_score = parse_rename_score(&arg)) == -1 || *arg != 0)
return -1;
- opt->merge_detect_rename = 1;
+ opt->detect_renames = 1;
}
/*
* Please update $__git_merge_strategy_options in
diff --git a/merge-recursive.h b/merge-recursive.h
index c2b7bb6..978847e 100644
--- a/merge-recursive.h
+++ b/merge-recursive.h
@@ -1,104 +1,124 @@
#ifndef MERGE_RECURSIVE_H
#define MERGE_RECURSIVE_H
-#include "string-list.h"
-#include "unpack-trees.h"
+#include "strbuf.h"
struct commit;
-
+struct commit_list;
+struct object_id;
struct repository;
+struct tree;
+struct merge_options_internal;
struct merge_options {
+ struct repository *repo;
+
+ /* ref names used in console messages and conflict markers */
const char *ancestor;
const char *branch1;
const char *branch2;
+
+ /* rename related options */
+ int detect_renames;
enum {
- MERGE_RECURSIVE_NORMAL = 0,
- MERGE_RECURSIVE_OURS,
- MERGE_RECURSIVE_THEIRS
+ MERGE_DIRECTORY_RENAMES_NONE = 0,
+ MERGE_DIRECTORY_RENAMES_CONFLICT = 1,
+ MERGE_DIRECTORY_RENAMES_TRUE = 2
+ } detect_directory_renames;
+ int rename_limit;
+ int rename_score;
+ int show_rename_progress;
+
+ /* xdiff-related options (patience, ignore whitespace, ours/theirs) */
+ long xdl_opts;
+ enum {
+ MERGE_VARIANT_NORMAL = 0,
+ MERGE_VARIANT_OURS,
+ MERGE_VARIANT_THEIRS
} recursive_variant;
- const char *subtree_shift;
+
+ /* console output related options */
+ int verbosity;
unsigned buffer_output; /* 1: output at end, 2: keep buffered */
+ struct strbuf obuf; /* output buffer; if buffer_output == 2, caller
+ * must handle and call strbuf_release */
+
+ /* miscellaneous control options */
+ const char *subtree_shift;
unsigned renormalize : 1;
- long xdl_opts;
- int verbosity;
- int detect_directory_renames;
- int diff_detect_rename;
- int merge_detect_rename;
- int diff_rename_limit;
- int merge_rename_limit;
- int rename_score;
- int needed_rename_limit;
- int show_rename_progress;
- int call_depth;
- struct strbuf obuf;
- struct hashmap current_file_dir_set;
- struct string_list df_conflict_file_set;
- struct unpack_trees_options unpack_opts;
- struct index_state orig_index;
- struct repository *repo;
+
+ /* internal fields used by the implementation */
+ struct merge_options_internal *priv;
};
+void init_merge_options(struct merge_options *opt, struct repository *repo);
+
+/* parse the option in s and update the relevant field of opt */
+int parse_merge_opt(struct merge_options *opt, const char *s);
+
/*
- * For dir_rename_entry, directory names are stored as a full path from the
- * toplevel of the repository and do not include a trailing '/'. Also:
- *
- * dir: original name of directory being renamed
- * non_unique_new_dir: if true, could not determine new_dir
- * new_dir: final name of directory being renamed
- * possible_new_dirs: temporary used to help determine new_dir; see comments
- * in get_directory_renames() for details
+ * RETURN VALUES: All the merge_* functions below return a value as follows:
+ * > 0 Merge was clean
+ * = 0 Merge had conflicts
+ * < 0 Merge hit an unexpected and unrecoverable problem (e.g. disk
+ * full) and aborted merge part-way through.
*/
-struct dir_rename_entry {
- struct hashmap_entry ent; /* must be the first member! */
- char *dir;
- unsigned non_unique_new_dir:1;
- struct strbuf new_dir;
- struct string_list possible_new_dirs;
-};
-
-struct collision_entry {
- struct hashmap_entry ent; /* must be the first member! */
- char *target_file;
- struct string_list source_files;
- unsigned reported_already:1;
-};
-static inline int merge_detect_rename(struct merge_options *o)
-{
- return o->merge_detect_rename >= 0 ? o->merge_detect_rename :
- o->diff_detect_rename >= 0 ? o->diff_detect_rename : 1;
-}
+/*
+ * rename-detecting three-way merge, no recursion.
+ *
+ * Outputs:
+ * - See RETURN VALUES above
+ * - No commit is created
+ * - opt->repo->index has the new index
+ * - $GIT_INDEX_FILE is not updated
+ * - The working tree is updated with results of the merge
+ */
+int merge_trees(struct merge_options *opt,
+ struct tree *head,
+ struct tree *merge,
+ struct tree *merge_base);
-/* merge_trees() but with recursive ancestor consolidation */
-int merge_recursive(struct merge_options *o,
+/*
+ * merge_recursive is like merge_trees() but with recursive ancestor
+ * consolidation and, if the commit is clean, creation of a commit.
+ *
+ * NOTE: empirically, about a decade ago it was determined that with more
+ * than two merge bases, optimal behavior was found when the
+ * merge_bases were passed in the order of oldest commit to newest
+ * commit. Also, merge_bases will be consumed (emptied) so make a
+ * copy if you need it.
+ *
+ * Outputs:
+ * - See RETURN VALUES above
+ * - If merge is clean, a commit is created and its address written to *result
+ * - opt->repo->index has the new index
+ * - $GIT_INDEX_FILE is not updated
+ * - The working tree is updated with results of the merge
+ */
+int merge_recursive(struct merge_options *opt,
struct commit *h1,
struct commit *h2,
- struct commit_list *ancestors,
+ struct commit_list *merge_bases,
struct commit **result);
-/* rename-detecting three-way merge, no recursion */
-int merge_trees(struct merge_options *o,
- struct tree *head,
- struct tree *merge,
- struct tree *common,
- struct tree **result);
-
/*
- * "git-merge-recursive" can be fed trees; wrap them into
- * virtual commits and call merge_recursive() proper.
+ * merge_recursive_generic can operate on trees instead of commits, by
+ * wrapping the trees into virtual commits, and calling merge_recursive().
+ * It also writes out the in-memory index to disk if the merge is successful.
+ *
+ * Outputs:
+ * - See RETURN VALUES above
+ * - If merge is clean, a commit is created and its address written to *result
+ * - opt->repo->index has the new index
+ * - $GIT_INDEX_FILE is updated
+ * - The working tree is updated with results of the merge
*/
-int merge_recursive_generic(struct merge_options *o,
+int merge_recursive_generic(struct merge_options *opt,
const struct object_id *head,
const struct object_id *merge,
- int num_ca,
- const struct object_id **ca,
+ int num_merge_bases,
+ const struct object_id **merge_bases,
struct commit **result);
-void init_merge_options(struct merge_options *o,
- struct repository *repo);
-struct tree *write_tree_from_memory(struct merge_options *o);
-
-int parse_merge_opt(struct merge_options *out, const char *s);
-
#endif
diff --git a/midx.c b/midx.c
index d649644..f29afc0 100644
--- a/midx.c
+++ b/midx.c
@@ -19,8 +19,7 @@
#define MIDX_BYTE_NUM_PACKS 8
#define MIDX_HASH_VERSION 1
#define MIDX_HEADER_SIZE 12
-#define MIDX_HASH_LEN 20
-#define MIDX_MIN_SIZE (MIDX_HEADER_SIZE + MIDX_HASH_LEN)
+#define MIDX_MIN_SIZE (MIDX_HEADER_SIZE + the_hash_algo->rawsz)
#define MIDX_MAX_CHUNKS 5
#define MIDX_CHUNK_ALIGNMENT 4
@@ -93,7 +92,7 @@ struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local
hash_version = m->data[MIDX_BYTE_HASH_VERSION];
if (hash_version != MIDX_HASH_VERSION)
die(_("hash version %u does not match"), hash_version);
- m->hash_len = MIDX_HASH_LEN;
+ m->hash_len = the_hash_algo->rawsz;
m->num_chunks = m->data[MIDX_BYTE_NUM_CHUNKS];
@@ -234,7 +233,7 @@ int prepare_midx_pack(struct repository *r, struct multi_pack_index *m, uint32_t
int bsearch_midx(const struct object_id *oid, struct multi_pack_index *m, uint32_t *result)
{
return bsearch_hash(oid->hash, m->chunk_oid_fanout, m->chunk_oid_lookup,
- MIDX_HASH_LEN, result);
+ the_hash_algo->rawsz, result);
}
struct object_id *nth_midxed_object_oid(struct object_id *oid,
@@ -928,7 +927,7 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
cur_chunk++;
chunk_ids[cur_chunk] = MIDX_CHUNKID_OBJECTOFFSETS;
- chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + nr_entries * MIDX_HASH_LEN;
+ chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + nr_entries * the_hash_algo->rawsz;
cur_chunk++;
chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + nr_entries * MIDX_CHUNK_OFFSET_WIDTH;
@@ -976,7 +975,7 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
break;
case MIDX_CHUNKID_OIDLOOKUP:
- written += write_midx_oid_lookup(f, MIDX_HASH_LEN, entries, nr_entries);
+ written += write_midx_oid_lookup(f, the_hash_algo->rawsz, entries, nr_entries);
break;
case MIDX_CHUNKID_OBJECTOFFSETS:
diff --git a/name-hash.c b/name-hash.c
index 6959086..ceb1d7b 100644
--- a/name-hash.c
+++ b/name-hash.c
@@ -17,14 +17,16 @@ struct dir_entry {
};
static int dir_entry_cmp(const void *unused_cmp_data,
- const void *entry,
- const void *entry_or_key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *keydata)
{
- const struct dir_entry *e1 = entry;
- const struct dir_entry *e2 = entry_or_key;
+ const struct dir_entry *e1, *e2;
const char *name = keydata;
+ e1 = container_of(eptr, const struct dir_entry, ent);
+ e2 = container_of(entry_or_key, const struct dir_entry, ent);
+
return e1->namelen != e2->namelen || strncasecmp(e1->name,
name ? name : e2->name, e1->namelen);
}
@@ -33,9 +35,9 @@ static struct dir_entry *find_dir_entry__hash(struct index_state *istate,
const char *name, unsigned int namelen, unsigned int hash)
{
struct dir_entry key;
- hashmap_entry_init(&key, hash);
+ hashmap_entry_init(&key.ent, hash);
key.namelen = namelen;
- return hashmap_get(&istate->dir_hash, &key, name);
+ return hashmap_get_entry(&istate->dir_hash, &key, ent, name);
}
static struct dir_entry *find_dir_entry(struct index_state *istate,
@@ -68,9 +70,9 @@ static struct dir_entry *hash_dir_entry(struct index_state *istate,
if (!dir) {
/* not found, create it and add to hash table */
FLEX_ALLOC_MEM(dir, name, ce->name, namelen);
- hashmap_entry_init(dir, memihash(ce->name, namelen));
+ hashmap_entry_init(&dir->ent, memihash(ce->name, namelen));
dir->namelen = namelen;
- hashmap_add(&istate->dir_hash, dir);
+ hashmap_add(&istate->dir_hash, &dir->ent);
/* recursively add missing parent directories */
dir->parent = hash_dir_entry(istate, ce, namelen);
@@ -95,7 +97,7 @@ static void remove_dir_entry(struct index_state *istate, struct cache_entry *ce)
struct dir_entry *dir = hash_dir_entry(istate, ce, ce_namelen(ce));
while (dir && !(--dir->nr)) {
struct dir_entry *parent = dir->parent;
- hashmap_remove(&istate->dir_hash, dir, NULL);
+ hashmap_remove(&istate->dir_hash, &dir->ent, NULL);
free(dir);
dir = parent;
}
@@ -106,20 +108,23 @@ static void hash_index_entry(struct index_state *istate, struct cache_entry *ce)
if (ce->ce_flags & CE_HASHED)
return;
ce->ce_flags |= CE_HASHED;
- hashmap_entry_init(ce, memihash(ce->name, ce_namelen(ce)));
- hashmap_add(&istate->name_hash, ce);
+ hashmap_entry_init(&ce->ent, memihash(ce->name, ce_namelen(ce)));
+ hashmap_add(&istate->name_hash, &ce->ent);
if (ignore_case)
add_dir_entry(istate, ce);
}
static int cache_entry_cmp(const void *unused_cmp_data,
- const void *entry,
- const void *entry_or_key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *remove)
{
- const struct cache_entry *ce1 = entry;
- const struct cache_entry *ce2 = entry_or_key;
+ const struct cache_entry *ce1, *ce2;
+
+ ce1 = container_of(eptr, const struct cache_entry, ent);
+ ce2 = container_of(entry_or_key, const struct cache_entry, ent);
+
/*
* For remove_name_hash, find the exact entry (pointer equality); for
* index_file_exists, find all entries with matching hash code and
@@ -280,10 +285,10 @@ static struct dir_entry *hash_dir_entry_with_parent_and_prefix(
dir = find_dir_entry__hash(istate, prefix->buf, prefix->len, hash);
if (!dir) {
FLEX_ALLOC_MEM(dir, name, prefix->buf, prefix->len);
- hashmap_entry_init(dir, hash);
+ hashmap_entry_init(&dir->ent, hash);
dir->namelen = prefix->len;
dir->parent = parent;
- hashmap_add(&istate->dir_hash, dir);
+ hashmap_add(&istate->dir_hash, &dir->ent);
if (parent) {
unlock_dir_mutex(lock_nr);
@@ -472,8 +477,8 @@ static void *lazy_name_thread_proc(void *_data)
for (k = 0; k < d->istate->cache_nr; k++) {
struct cache_entry *ce_k = d->istate->cache[k];
ce_k->ce_flags |= CE_HASHED;
- hashmap_entry_init(ce_k, d->lazy_entries[k].hash_name);
- hashmap_add(&d->istate->name_hash, ce_k);
+ hashmap_entry_init(&ce_k->ent, d->lazy_entries[k].hash_name);
+ hashmap_add(&d->istate->name_hash, &ce_k->ent);
}
return NULL;
@@ -625,7 +630,7 @@ void remove_name_hash(struct index_state *istate, struct cache_entry *ce)
if (!istate->name_hash_initialized || !(ce->ce_flags & CE_HASHED))
return;
ce->ce_flags &= ~CE_HASHED;
- hashmap_remove(&istate->name_hash, ce, ce);
+ hashmap_remove(&istate->name_hash, &ce->ent, ce);
if (ignore_case)
remove_dir_entry(istate, ce);
@@ -702,15 +707,15 @@ void adjust_dirname_case(struct index_state *istate, char *name)
struct cache_entry *index_file_exists(struct index_state *istate, const char *name, int namelen, int icase)
{
struct cache_entry *ce;
+ unsigned int hash = memihash(name, namelen);
lazy_init_name_hash(istate);
- ce = hashmap_get_from_hash(&istate->name_hash,
- memihash(name, namelen), NULL);
- while (ce) {
+ ce = hashmap_get_entry_from_hash(&istate->name_hash, hash, NULL,
+ struct cache_entry, ent);
+ hashmap_for_each_entry_from(&istate->name_hash, ce, ent) {
if (same_name(ce, name, namelen, icase))
return ce;
- ce = hashmap_get_next(&istate->name_hash, ce);
}
return NULL;
}
@@ -721,6 +726,6 @@ void free_name_hash(struct index_state *istate)
return;
istate->name_hash_initialized = 0;
- hashmap_free(&istate->name_hash, 0);
- hashmap_free(&istate->dir_hash, 1);
+ hashmap_free(&istate->name_hash);
+ hashmap_free_entries(&istate->dir_hash, struct dir_entry, ent);
}
diff --git a/object.c b/object.c
index 07bdd5b..3b8b8c5 100644
--- a/object.c
+++ b/object.c
@@ -7,7 +7,6 @@
#include "commit.h"
#include "tag.h"
#include "alloc.h"
-#include "object-store.h"
#include "packfile.h"
#include "commit-graph.h"
diff --git a/oidmap.c b/oidmap.c
index 6d6e840..423aa01 100644
--- a/oidmap.c
+++ b/oidmap.c
@@ -2,14 +2,18 @@
#include "oidmap.h"
static int oidmap_neq(const void *hashmap_cmp_fn_data,
- const void *entry, const void *entry_or_key,
+ const struct hashmap_entry *e1,
+ const struct hashmap_entry *e2,
const void *keydata)
{
- const struct oidmap_entry *entry_ = entry;
+ const struct oidmap_entry *a, *b;
+
+ a = container_of(e1, const struct oidmap_entry, internal_entry);
+ b = container_of(e2, const struct oidmap_entry, internal_entry);
+
if (keydata)
- return !oideq(&entry_->oid, (const struct object_id *) keydata);
- return !oideq(&entry_->oid,
- &((const struct oidmap_entry *) entry_or_key)->oid);
+ return !oideq(&a->oid, (const struct object_id *) keydata);
+ return !oideq(&a->oid, &b->oid);
}
void oidmap_init(struct oidmap *map, size_t initial_size)
@@ -21,7 +25,9 @@ void oidmap_free(struct oidmap *map, int free_entries)
{
if (!map)
return;
- hashmap_free(&map->map, free_entries);
+
+ /* TODO: make oidmap itself not depend on struct layouts */
+ hashmap_free_(&map->map, free_entries ? 0 : -1);
}
void *oidmap_get(const struct oidmap *map, const struct object_id *key)
@@ -51,5 +57,5 @@ void *oidmap_put(struct oidmap *map, void *entry)
oidmap_init(map, 0);
hashmap_entry_init(&to_put->internal_entry, oidhash(&to_put->oid));
- return hashmap_put(&map->map, to_put);
+ return hashmap_put(&map->map, &to_put->internal_entry);
}
diff --git a/oidmap.h b/oidmap.h
index 7a93946..c66a83a 100644
--- a/oidmap.h
+++ b/oidmap.h
@@ -78,14 +78,16 @@ static inline void oidmap_iter_init(struct oidmap *map, struct oidmap_iter *iter
static inline void *oidmap_iter_next(struct oidmap_iter *iter)
{
- return hashmap_iter_next(&iter->h_iter);
+ /* TODO: this API could be reworked to do compile-time type checks */
+ return (void *)hashmap_iter_next(&iter->h_iter);
}
static inline void *oidmap_iter_first(struct oidmap *map,
struct oidmap_iter *iter)
{
oidmap_iter_init(map, iter);
- return oidmap_iter_next(iter);
+ /* TODO: this API could be reworked to do compile-time type checks */
+ return (void *)oidmap_iter_next(iter);
}
#endif
diff --git a/pack-bitmap.h b/pack-bitmap.h
index 00de3ec..466c5af 100644
--- a/pack-bitmap.h
+++ b/pack-bitmap.h
@@ -9,16 +9,16 @@ struct commit;
struct repository;
struct rev_info;
+static const char BITMAP_IDX_SIGNATURE[] = {'B', 'I', 'T', 'M'};
+
struct bitmap_disk_header {
- char magic[4];
+ char magic[ARRAY_SIZE(BITMAP_IDX_SIGNATURE)];
uint16_t version;
uint16_t options;
uint32_t entry_count;
unsigned char checksum[GIT_MAX_RAWSZ];
};
-static const char BITMAP_IDX_SIGNATURE[] = {'B', 'I', 'T', 'M'};
-
#define NEEDS_BITMAP (1u<<22)
enum pack_bitmap_opts {
diff --git a/pack-write.c b/pack-write.c
index 29d17a9..f0017be 100644
--- a/pack-write.c
+++ b/pack-write.c
@@ -349,7 +349,7 @@ void finish_tmp_packfile(struct strbuf *name_buffer,
struct pack_idx_entry **written_list,
uint32_t nr_written,
struct pack_idx_option *pack_idx_opts,
- unsigned char sha1[])
+ unsigned char hash[])
{
const char *idx_tmp_name;
int basename_len = name_buffer->len;
@@ -358,18 +358,18 @@ void finish_tmp_packfile(struct strbuf *name_buffer,
die_errno("unable to make temporary pack file readable");
idx_tmp_name = write_idx_file(NULL, written_list, nr_written,
- pack_idx_opts, sha1);
+ pack_idx_opts, hash);
if (adjust_shared_perm(idx_tmp_name))
die_errno("unable to make temporary index file readable");
- strbuf_addf(name_buffer, "%s.pack", sha1_to_hex(sha1));
+ strbuf_addf(name_buffer, "%s.pack", hash_to_hex(hash));
if (rename(pack_tmp_name, name_buffer->buf))
die_errno("unable to rename temporary pack file");
strbuf_setlen(name_buffer, basename_len);
- strbuf_addf(name_buffer, "%s.idx", sha1_to_hex(sha1));
+ strbuf_addf(name_buffer, "%s.idx", hash_to_hex(hash));
if (rename(idx_tmp_name, name_buffer->buf))
die_errno("unable to rename temporary index file");
diff --git a/packfile.c b/packfile.c
index f3f962a..355066d 100644
--- a/packfile.c
+++ b/packfile.c
@@ -6,7 +6,6 @@
#include "mergesort.h"
#include "packfile.h"
#include "delta.h"
-#include "list.h"
#include "streaming.h"
#include "sha1-lookup.h"
#include "commit.h"
@@ -20,12 +19,12 @@
#include "promisor-remote.h"
char *odb_pack_name(struct strbuf *buf,
- const unsigned char *sha1,
+ const unsigned char *hash,
const char *ext)
{
strbuf_reset(buf);
strbuf_addf(buf, "%s/pack/pack-%s.%s", get_object_directory(),
- sha1_to_hex(sha1), ext);
+ hash_to_hex(hash), ext);
return buf->buf;
}
@@ -1344,7 +1343,7 @@ struct delta_base_cache_key {
};
struct delta_base_cache_entry {
- struct hashmap hash;
+ struct hashmap_entry ent;
struct delta_base_cache_key key;
struct list_head lru;
void *data;
@@ -1364,7 +1363,7 @@ static unsigned int pack_entry_hash(struct packed_git *p, off_t base_offset)
static struct delta_base_cache_entry *
get_delta_base_cache_entry(struct packed_git *p, off_t base_offset)
{
- struct hashmap_entry entry;
+ struct hashmap_entry entry, *e;
struct delta_base_cache_key key;
if (!delta_base_cache.cmpfn)
@@ -1373,7 +1372,8 @@ get_delta_base_cache_entry(struct packed_git *p, off_t base_offset)
hashmap_entry_init(&entry, pack_entry_hash(p, base_offset));
key.p = p;
key.base_offset = base_offset;
- return hashmap_get(&delta_base_cache, &entry, &key);
+ e = hashmap_get(&delta_base_cache, &entry, &key);
+ return e ? container_of(e, struct delta_base_cache_entry, ent) : NULL;
}
static int delta_base_cache_key_eq(const struct delta_base_cache_key *a,
@@ -1383,11 +1383,16 @@ static int delta_base_cache_key_eq(const struct delta_base_cache_key *a,
}
static int delta_base_cache_hash_cmp(const void *unused_cmp_data,
- const void *va, const void *vb,
+ const struct hashmap_entry *va,
+ const struct hashmap_entry *vb,
const void *vkey)
{
- const struct delta_base_cache_entry *a = va, *b = vb;
+ const struct delta_base_cache_entry *a, *b;
const struct delta_base_cache_key *key = vkey;
+
+ a = container_of(va, const struct delta_base_cache_entry, ent);
+ b = container_of(vb, const struct delta_base_cache_entry, ent);
+
if (key)
return !delta_base_cache_key_eq(&a->key, key);
else
@@ -1406,7 +1411,7 @@ static int in_delta_base_cache(struct packed_git *p, off_t base_offset)
*/
static void detach_delta_base_cache_entry(struct delta_base_cache_entry *ent)
{
- hashmap_remove(&delta_base_cache, ent, &ent->key);
+ hashmap_remove(&delta_base_cache, &ent->ent, &ent->key);
list_del(&ent->lru);
delta_base_cached -= ent->size;
free(ent);
@@ -1470,8 +1475,8 @@ static void add_delta_base_cache(struct packed_git *p, off_t base_offset,
if (!delta_base_cache.cmpfn)
hashmap_init(&delta_base_cache, delta_base_cache_hash_cmp, NULL, 0);
- hashmap_entry_init(ent, pack_entry_hash(p, base_offset));
- hashmap_add(&delta_base_cache, ent);
+ hashmap_entry_init(&ent->ent, pack_entry_hash(p, base_offset));
+ hashmap_add(&delta_base_cache, &ent->ent);
}
int packed_object_info(struct repository *r, struct packed_git *p,
diff --git a/patch-ids.c b/patch-ids.c
index e8c150d..12aa6d4 100644
--- a/patch-ids.c
+++ b/patch-ids.c
@@ -36,14 +36,16 @@ int commit_patch_id(struct commit *commit, struct diff_options *options,
* any significance; only that it is non-zero matters.
*/
static int patch_id_neq(const void *cmpfn_data,
- const void *entry,
- const void *entry_or_key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *unused_keydata)
{
/* NEEDSWORK: const correctness? */
struct diff_options *opt = (void *)cmpfn_data;
- struct patch_id *a = (void *)entry;
- struct patch_id *b = (void *)entry_or_key;
+ struct patch_id *a, *b;
+
+ a = container_of(eptr, struct patch_id, ent);
+ b = container_of(entry_or_key, struct patch_id, ent);
if (is_null_oid(&a->patch_id) &&
commit_patch_id(a->commit, opt, &a->patch_id, 0, 0))
@@ -69,7 +71,7 @@ int init_patch_ids(struct repository *r, struct patch_ids *ids)
int free_patch_ids(struct patch_ids *ids)
{
- hashmap_free(&ids->patches, 1);
+ hashmap_free_entries(&ids->patches, struct patch_id, ent);
return 0;
}
@@ -83,7 +85,7 @@ static int init_patch_id_entry(struct patch_id *patch,
if (commit_patch_id(commit, &ids->diffopts, &header_only_patch_id, 1, 0))
return -1;
- hashmap_entry_init(patch, oidhash(&header_only_patch_id));
+ hashmap_entry_init(&patch->ent, oidhash(&header_only_patch_id));
return 0;
}
@@ -99,7 +101,7 @@ struct patch_id *has_commit_patch_id(struct commit *commit,
if (init_patch_id_entry(&patch, commit, ids))
return NULL;
- return hashmap_get(&ids->patches, &patch, NULL);
+ return hashmap_get_entry(&ids->patches, &patch, ent, NULL);
}
struct patch_id *add_commit_patch_id(struct commit *commit,
@@ -116,6 +118,6 @@ struct patch_id *add_commit_patch_id(struct commit *commit,
return NULL;
}
- hashmap_add(&ids->patches, key);
+ hashmap_add(&ids->patches, &key->ent);
return key;
}
diff --git a/perl/Git/SVN.pm b/perl/Git/SVN.pm
index 76b2965..4b28b87 100644
--- a/perl/Git/SVN.pm
+++ b/perl/Git/SVN.pm
@@ -1491,6 +1491,10 @@ sub call_authors_prog {
sub check_author {
my ($author) = @_;
+ if (defined $author) {
+ $author =~ s/^\s+//g;
+ $author =~ s/\s+$//g;
+ }
if (!defined $author || length $author == 0) {
$author = '(no author)';
}
diff --git a/pretty.c b/pretty.c
index e4ed14e..b32f036 100644
--- a/pretty.c
+++ b/pretty.c
@@ -1239,11 +1239,9 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
strbuf_addstr(sb, get_revision_mark(NULL, commit));
return 1;
case 'd':
- load_ref_decorations(NULL, DECORATE_SHORT_REFS);
format_decorations(sb, commit, c->auto_color);
return 1;
case 'D':
- load_ref_decorations(NULL, DECORATE_SHORT_REFS);
format_decorations_extended(sb, commit, c->auto_color, "", ", ", "");
return 1;
case 'S': /* tag/branch like --source */
diff --git a/progress.c b/progress.c
index 277db8a..0063559 100644
--- a/progress.c
+++ b/progress.c
@@ -45,6 +45,19 @@ struct progress {
static volatile sig_atomic_t progress_update;
+/*
+ * These are only intended for testing the progress output, i.e. exclusively
+ * for 'test-tool progress'.
+ */
+int progress_testing;
+uint64_t progress_test_ns = 0;
+void progress_test_force_update(void); /* To silence -Wmissing-prototypes */
+void progress_test_force_update(void)
+{
+ progress_update = 1;
+}
+
+
static void progress_interval(int signum)
{
progress_update = 1;
@@ -55,6 +68,9 @@ static void set_progress_signal(void)
struct sigaction sa;
struct itimerval v;
+ if (progress_testing)
+ return;
+
progress_update = 0;
memset(&sa, 0, sizeof(sa));
@@ -72,6 +88,10 @@ static void set_progress_signal(void)
static void clear_progress_signal(void)
{
struct itimerval v = {{0,},};
+
+ if (progress_testing)
+ return;
+
setitimer(ITIMER_REAL, &v, NULL);
signal(SIGALRM, SIG_IGN);
progress_update = 0;
@@ -88,6 +108,7 @@ static void display(struct progress *progress, uint64_t n, const char *done)
const char *tp;
struct strbuf *counters_sb = &progress->counters_sb;
int show_update = 0;
+ int last_count_len = counters_sb->len;
if (progress->delay && (!progress_update || --progress->delay))
return;
@@ -115,21 +136,27 @@ static void display(struct progress *progress, uint64_t n, const char *done)
if (show_update) {
if (is_foreground_fd(fileno(stderr)) || done) {
const char *eol = done ? done : "\r";
+ size_t clear_len = counters_sb->len < last_count_len ?
+ last_count_len - counters_sb->len + 1 :
+ 0;
+ /* The "+ 2" accounts for the ": ". */
+ size_t progress_line_len = progress->title_len +
+ counters_sb->len + 2;
+ int cols = term_columns();
- term_clear_line();
if (progress->split) {
- fprintf(stderr, " %s%s", counters_sb->buf,
- eol);
- } else if (!done &&
- /* The "+ 2" accounts for the ": ". */
- term_columns() < progress->title_len +
- counters_sb->len + 2) {
- fprintf(stderr, "%s:\n %s%s",
- progress->title, counters_sb->buf, eol);
+ fprintf(stderr, " %s%*s", counters_sb->buf,
+ (int) clear_len, eol);
+ } else if (!done && cols < progress_line_len) {
+ clear_len = progress->title_len + 1 < cols ?
+ cols - progress->title_len - 1 : 0;
+ fprintf(stderr, "%s:%*s\n %s%s",
+ progress->title, (int) clear_len, "",
+ counters_sb->buf, eol);
progress->split = 1;
} else {
- fprintf(stderr, "%s: %s%s", progress->title,
- counters_sb->buf, eol);
+ fprintf(stderr, "%s: %s%*s", progress->title,
+ counters_sb->buf, (int) clear_len, eol);
}
fflush(stderr);
}
@@ -147,6 +174,14 @@ static void throughput_string(struct strbuf *buf, uint64_t total,
strbuf_humanise_rate(buf, rate * 1024);
}
+static uint64_t progress_getnanotime(struct progress *progress)
+{
+ if (progress_testing)
+ return progress->start_ns + progress_test_ns;
+ else
+ return getnanotime();
+}
+
void display_throughput(struct progress *progress, uint64_t total)
{
struct throughput *tp;
@@ -157,7 +192,7 @@ void display_throughput(struct progress *progress, uint64_t total)
return;
tp = progress->throughput;
- now_ns = getnanotime();
+ now_ns = progress_getnanotime(progress);
if (!tp) {
progress->throughput = tp = xcalloc(1, sizeof(*tp));
@@ -289,7 +324,7 @@ void stop_progress_msg(struct progress **p_progress, const char *msg)
struct throughput *tp = progress->throughput;
if (tp) {
- uint64_t now_ns = getnanotime();
+ uint64_t now_ns = progress_getnanotime(progress);
unsigned int misecs, rate;
misecs = ((now_ns - progress->start_ns) * 4398) >> 32;
rate = tp->curr_total / (misecs ? misecs : 1);
diff --git a/promisor-remote.c b/promisor-remote.c
index 9bc296c..9bd5b79 100644
--- a/promisor-remote.c
+++ b/promisor-remote.c
@@ -89,6 +89,9 @@ static struct promisor_remote *promisor_remote_lookup(const char *remote_name,
static void promisor_remote_move_to_tail(struct promisor_remote *r,
struct promisor_remote *previous)
{
+ if (r->next == NULL)
+ return;
+
if (previous)
previous->next = r->next;
else
diff --git a/promisor-remote.h b/promisor-remote.h
index 8200dfc..737bac3 100644
--- a/promisor-remote.h
+++ b/promisor-remote.h
@@ -1,6 +1,8 @@
#ifndef PROMISOR_REMOTE_H
#define PROMISOR_REMOTE_H
+#include "repository.h"
+
struct object_id;
/*
@@ -15,17 +17,17 @@ struct promisor_remote {
const char name[FLEX_ARRAY];
};
-extern void promisor_remote_reinit(void);
-extern struct promisor_remote *promisor_remote_find(const char *remote_name);
-extern int has_promisor_remote(void);
-extern int promisor_remote_get_direct(struct repository *repo,
- const struct object_id *oids,
- int oid_nr);
+void promisor_remote_reinit(void);
+struct promisor_remote *promisor_remote_find(const char *remote_name);
+int has_promisor_remote(void);
+int promisor_remote_get_direct(struct repository *repo,
+ const struct object_id *oids,
+ int oid_nr);
/*
* This should be used only once from setup.c to set the value we got
* from the extensions.partialclone config option.
*/
-extern void set_repository_format_partial_clone(char *partial_clone);
+void set_repository_format_partial_clone(char *partial_clone);
#endif /* PROMISOR_REMOTE_H */
diff --git a/quote.c b/quote.c
index c8ba6b3..24a58ba 100644
--- a/quote.c
+++ b/quote.c
@@ -48,6 +48,12 @@ void sq_quote_buf_pretty(struct strbuf *dst, const char *src)
static const char ok_punct[] = "+,-./:=@_^";
const char *p;
+ /* Avoid losing a zero-length string by adding '' */
+ if (!*src) {
+ strbuf_addstr(dst, "''");
+ return;
+ }
+
for (p = src; *p; p++) {
if (!isalpha(*p) && !isdigit(*p) && !strchr(ok_punct, *p)) {
sq_quote_buf(dst, src);
diff --git a/range-diff.c b/range-diff.c
index ba1e9a4..7fed5a3 100644
--- a/range-diff.c
+++ b/range-diff.c
@@ -52,6 +52,7 @@ static int read_patches(const char *range, struct string_list *list)
argv_array_pushl(&cp.args, "log", "--no-color", "-p", "--no-merges",
"--reverse", "--date-order", "--decorate=no",
+ "--no-prefix",
/*
* Choose indicators that are not used anywhere
* else in diffs, but still look reasonable
@@ -111,7 +112,7 @@ static int read_patches(const char *range, struct string_list *list)
if (!util->diff_offset)
util->diff_offset = buf.len;
line[len - 1] = '\n';
- len = parse_git_diff_header(&root, &linenr, 1, line,
+ len = parse_git_diff_header(&root, &linenr, 0, line,
len, size, &patch);
if (len < 0)
die(_("could not parse git header '%.*s'"), (int)len, line);
@@ -217,8 +218,8 @@ static void find_exact_matches(struct string_list *a, struct string_list *b)
util->i = i;
util->patch = a->items[i].string;
util->diff = util->patch + util->diff_offset;
- hashmap_entry_init(util, strhash(util->diff));
- hashmap_add(&map, util);
+ hashmap_entry_init(&util->e, strhash(util->diff));
+ hashmap_add(&map, &util->e);
}
/* Now try to find exact matches in b */
@@ -228,8 +229,8 @@ static void find_exact_matches(struct string_list *a, struct string_list *b)
util->i = i;
util->patch = b->items[i].string;
util->diff = util->patch + util->diff_offset;
- hashmap_entry_init(util, strhash(util->diff));
- other = hashmap_remove(&map, util, NULL);
+ hashmap_entry_init(&util->e, strhash(util->diff));
+ other = hashmap_remove_entry(&map, util, e, NULL);
if (other) {
if (other->matching >= 0)
BUG("already assigned!");
@@ -239,7 +240,7 @@ static void find_exact_matches(struct string_list *a, struct string_list *b)
}
}
- hashmap_free(&map, 0);
+ hashmap_free(&map);
}
static void diffsize_consume(void *data, char *line, unsigned long len)
diff --git a/read-cache.c b/read-cache.c
index cff1280..133f790 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -1276,7 +1276,7 @@ static int add_index_entry_with_check(struct index_state *istate, struct cache_e
*/
if (istate->cache_nr > 0 &&
strcmp(ce->name, istate->cache[istate->cache_nr - 1]->name) > 0)
- pos = -istate->cache_nr - 1;
+ pos = index_pos_to_insert_pos(istate->cache_nr);
else
pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce));
@@ -1472,6 +1472,27 @@ static void show_file(const char * fmt, const char * name, int in_porcelain,
printf(fmt, name);
}
+int repo_refresh_and_write_index(struct repository *repo,
+ unsigned int refresh_flags,
+ unsigned int write_flags,
+ int gentle,
+ const struct pathspec *pathspec,
+ char *seen, const char *header_msg)
+{
+ struct lock_file lock_file = LOCK_INIT;
+ int fd, ret = 0;
+
+ fd = repo_hold_locked_index(repo, &lock_file, 0);
+ if (!gentle && fd < 0)
+ return -1;
+ if (refresh_index(repo->index, refresh_flags, pathspec, seen, header_msg))
+ ret = 1;
+ if (0 <= fd && write_locked_index(repo->index, &lock_file, COMMIT_LOCK | write_flags))
+ ret = -1;
+ return ret;
+}
+
+
int refresh_index(struct index_state *istate, unsigned int flags,
const struct pathspec *pathspec,
char *seen, const char *header_msg)
@@ -1894,7 +1915,7 @@ static size_t estimate_cache_size(size_t ondisk_size, unsigned int entries)
/*
* Account for potential alignment differences.
*/
- per_entry += align_padding_size(sizeof(struct cache_entry), -sizeof(struct ondisk_cache_entry));
+ per_entry += align_padding_size(per_entry, 0);
return ondisk_size + entries * per_entry;
}
diff --git a/ref-filter.c b/ref-filter.c
index 220e9bd..6867e33 100644
--- a/ref-filter.c
+++ b/ref-filter.c
@@ -79,17 +79,20 @@ static struct expand_data {
} oi, oi_deref;
struct ref_to_worktree_entry {
- struct hashmap_entry ent; /* must be the first member! */
+ struct hashmap_entry ent;
struct worktree *wt; /* key is wt->head_ref */
};
static int ref_to_worktree_map_cmpfnc(const void *unused_lookupdata,
- const void *existing_hashmap_entry_to_test,
- const void *key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *kptr,
const void *keydata_aka_refname)
{
- const struct ref_to_worktree_entry *e = existing_hashmap_entry_to_test;
- const struct ref_to_worktree_entry *k = key;
+ const struct ref_to_worktree_entry *e, *k;
+
+ e = container_of(eptr, const struct ref_to_worktree_entry, ent);
+ k = container_of(kptr, const struct ref_to_worktree_entry, ent);
+
return strcmp(e->wt->head_ref,
keydata_aka_refname ? keydata_aka_refname : k->wt->head_ref);
}
@@ -1565,9 +1568,10 @@ static void populate_worktree_map(struct hashmap *map, struct worktree **worktre
struct ref_to_worktree_entry *entry;
entry = xmalloc(sizeof(*entry));
entry->wt = worktrees[i];
- hashmap_entry_init(entry, strhash(worktrees[i]->head_ref));
+ hashmap_entry_init(&entry->ent,
+ strhash(worktrees[i]->head_ref));
- hashmap_add(map, entry);
+ hashmap_add(map, &entry->ent);
}
}
}
@@ -1584,18 +1588,20 @@ static void lazy_init_worktree_map(void)
static char *get_worktree_path(const struct used_atom *atom, const struct ref_array_item *ref)
{
- struct hashmap_entry entry;
+ struct hashmap_entry entry, *e;
struct ref_to_worktree_entry *lookup_result;
lazy_init_worktree_map();
hashmap_entry_init(&entry, strhash(ref->refname));
- lookup_result = hashmap_get(&(ref_to_worktree_map.map), &entry, ref->refname);
+ e = hashmap_get(&(ref_to_worktree_map.map), &entry, ref->refname);
- if (lookup_result)
- return xstrdup(lookup_result->wt->path);
- else
+ if (!e)
return xstrdup("");
+
+ lookup_result = container_of(e, struct ref_to_worktree_entry, ent);
+
+ return xstrdup(lookup_result->wt->path);
}
/*
@@ -2166,7 +2172,8 @@ void ref_array_clear(struct ref_array *array)
used_atom_cnt = 0;
if (ref_to_worktree_map.worktrees) {
- hashmap_free(&(ref_to_worktree_map.map), 1);
+ hashmap_free_entries(&(ref_to_worktree_map.map),
+ struct ref_to_worktree_entry, ent);
free_worktrees(ref_to_worktree_map.worktrees);
ref_to_worktree_map.worktrees = NULL;
}
diff --git a/refs.c b/refs.c
index cd297ee..1ab0bb5 100644
--- a/refs.c
+++ b/refs.c
@@ -1772,7 +1772,7 @@ int resolve_gitlink_ref(const char *submodule, const char *refname,
struct ref_store_hash_entry
{
- struct hashmap_entry ent; /* must be the first member! */
+ struct hashmap_entry ent;
struct ref_store *refs;
@@ -1781,11 +1781,16 @@ struct ref_store_hash_entry
};
static int ref_store_hash_cmp(const void *unused_cmp_data,
- const void *entry, const void *entry_or_key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *keydata)
{
- const struct ref_store_hash_entry *e1 = entry, *e2 = entry_or_key;
- const char *name = keydata ? keydata : e2->name;
+ const struct ref_store_hash_entry *e1, *e2;
+ const char *name;
+
+ e1 = container_of(eptr, const struct ref_store_hash_entry, ent);
+ e2 = container_of(entry_or_key, const struct ref_store_hash_entry, ent);
+ name = keydata ? keydata : e2->name;
return strcmp(e1->name, name);
}
@@ -1796,7 +1801,7 @@ static struct ref_store_hash_entry *alloc_ref_store_hash_entry(
struct ref_store_hash_entry *entry;
FLEX_ALLOC_STR(entry, name, name);
- hashmap_entry_init(entry, strhash(name));
+ hashmap_entry_init(&entry->ent, strhash(name));
entry->refs = refs;
return entry;
}
@@ -1815,12 +1820,15 @@ static struct ref_store *lookup_ref_store_map(struct hashmap *map,
const char *name)
{
struct ref_store_hash_entry *entry;
+ unsigned int hash;
if (!map->tablesize)
/* It's initialized on demand in register_ref_store(). */
return NULL;
- entry = hashmap_get_from_hash(map, strhash(name), name);
+ hash = strhash(name);
+ entry = hashmap_get_entry_from_hash(map, hash, name,
+ struct ref_store_hash_entry, ent);
return entry ? entry->refs : NULL;
}
@@ -1863,10 +1871,13 @@ static void register_ref_store_map(struct hashmap *map,
struct ref_store *refs,
const char *name)
{
+ struct ref_store_hash_entry *entry;
+
if (!map->tablesize)
hashmap_init(map, ref_store_hash_cmp, NULL, 0);
- if (hashmap_put(map, alloc_ref_store_hash_entry(name, refs)))
+ entry = alloc_ref_store_hash_entry(name, refs);
+ if (hashmap_put(map, &entry->ent))
BUG("%s ref_store '%s' initialized twice", type, name);
}
diff --git a/remote.c b/remote.c
index e50f760..5c4666b 100644
--- a/remote.c
+++ b/remote.c
@@ -111,14 +111,16 @@ struct remotes_hash_key {
};
static int remotes_hash_cmp(const void *unused_cmp_data,
- const void *entry,
- const void *entry_or_key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *keydata)
{
- const struct remote *a = entry;
- const struct remote *b = entry_or_key;
+ const struct remote *a, *b;
const struct remotes_hash_key *key = keydata;
+ a = container_of(eptr, const struct remote, ent);
+ b = container_of(entry_or_key, const struct remote, ent);
+
if (key)
return strncmp(a->name, key->str, key->len) || a->name[key->len];
else
@@ -135,7 +137,7 @@ static struct remote *make_remote(const char *name, int len)
{
struct remote *ret, *replaced;
struct remotes_hash_key lookup;
- struct hashmap_entry lookup_entry;
+ struct hashmap_entry lookup_entry, *e;
if (!len)
len = strlen(name);
@@ -145,8 +147,9 @@ static struct remote *make_remote(const char *name, int len)
lookup.len = len;
hashmap_entry_init(&lookup_entry, memhash(name, len));
- if ((ret = hashmap_get(&remotes_hash, &lookup_entry, &lookup)) != NULL)
- return ret;
+ e = hashmap_get(&remotes_hash, &lookup_entry, &lookup);
+ if (e)
+ return container_of(e, struct remote, ent);
ret = xcalloc(1, sizeof(struct remote));
ret->prune = -1; /* unspecified */
@@ -158,8 +161,8 @@ static struct remote *make_remote(const char *name, int len)
ALLOC_GROW(remotes, remotes_nr + 1, remotes_alloc);
remotes[remotes_nr++] = ret;
- hashmap_entry_init(ret, lookup_entry.hash);
- replaced = hashmap_put(&remotes_hash, ret);
+ hashmap_entry_init(&ret->ent, lookup_entry.hash);
+ replaced = hashmap_put_entry(&remotes_hash, ret, ent);
assert(replaced == NULL); /* no previous entry overwritten */
return ret;
}
diff --git a/remote.h b/remote.h
index 83e8856..0e1d2b2 100644
--- a/remote.h
+++ b/remote.h
@@ -14,7 +14,7 @@ enum {
};
struct remote {
- struct hashmap_entry ent; /* must be first */
+ struct hashmap_entry ent;
const char *name;
int origin, configured_in_repo;
diff --git a/repository.h b/repository.h
index fe0b5f5..fe42197 100644
--- a/repository.h
+++ b/repository.h
@@ -125,6 +125,9 @@ struct repository {
/* A unique-id for tracing purposes. */
int trace2_repo_id;
+ /* True if commit-graph has been disabled within this process. */
+ int commit_graph_disabled;
+
/* Configurations */
/* Indicate if a repository has a different 'commondir' from 'gitdir' */
diff --git a/rerere.c b/rerere.c
index 17abb47..3e51fdf 100644
--- a/rerere.c
+++ b/rerere.c
@@ -52,7 +52,7 @@ static void free_rerere_id(struct string_list_item *item)
static const char *rerere_id_hex(const struct rerere_id *id)
{
- return sha1_to_hex(id->collection->hash);
+ return hash_to_hex(id->collection->hash);
}
static void fit_variant(struct rerere_dir *rr_dir, int variant)
@@ -115,7 +115,7 @@ static int is_rr_file(const char *name, const char *filename, int *variant)
static void scan_rerere_dir(struct rerere_dir *rr_dir)
{
struct dirent *de;
- DIR *dir = opendir(git_path("rr-cache/%s", sha1_to_hex(rr_dir->hash)));
+ DIR *dir = opendir(git_path("rr-cache/%s", hash_to_hex(rr_dir->hash)));
if (!dir)
return;
@@ -186,9 +186,9 @@ static struct rerere_id *new_rerere_id_hex(char *hex)
return id;
}
-static struct rerere_id *new_rerere_id(unsigned char *sha1)
+static struct rerere_id *new_rerere_id(unsigned char *hash)
{
- return new_rerere_id_hex(sha1_to_hex(sha1));
+ return new_rerere_id_hex(hash_to_hex(hash));
}
/*
diff --git a/revision.c b/revision.c
index a2406c4..0e39b2b 100644
--- a/revision.c
+++ b/revision.c
@@ -28,6 +28,7 @@
#include "commit-graph.h"
#include "prio-queue.h"
#include "hashmap.h"
+#include "utf8.h"
volatile show_early_output_fn_t show_early_output;
@@ -107,30 +108,34 @@ struct path_and_oids_entry {
};
static int path_and_oids_cmp(const void *hashmap_cmp_fn_data,
- const struct path_and_oids_entry *e1,
- const struct path_and_oids_entry *e2,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *keydata)
{
+ const struct path_and_oids_entry *e1, *e2;
+
+ e1 = container_of(eptr, const struct path_and_oids_entry, ent);
+ e2 = container_of(entry_or_key, const struct path_and_oids_entry, ent);
+
return strcmp(e1->path, e2->path);
}
static void paths_and_oids_init(struct hashmap *map)
{
- hashmap_init(map, (hashmap_cmp_fn) path_and_oids_cmp, NULL, 0);
+ hashmap_init(map, path_and_oids_cmp, NULL, 0);
}
static void paths_and_oids_clear(struct hashmap *map)
{
struct hashmap_iter iter;
struct path_and_oids_entry *entry;
- hashmap_iter_init(map, &iter);
- while ((entry = (struct path_and_oids_entry *)hashmap_iter_next(&iter))) {
+ hashmap_for_each_entry(map, &iter, entry, ent /* member name */) {
oidset_clear(&entry->trees);
free(entry->path);
}
- hashmap_free(map, 1);
+ hashmap_free_entries(map, struct path_and_oids_entry, ent);
}
static void paths_and_oids_insert(struct hashmap *map,
@@ -141,18 +146,19 @@ static void paths_and_oids_insert(struct hashmap *map,
struct path_and_oids_entry key;
struct path_and_oids_entry *entry;
- hashmap_entry_init(&key, hash);
+ hashmap_entry_init(&key.ent, hash);
/* use a shallow copy for the lookup */
key.path = (char *)path;
oidset_init(&key.trees, 0);
- if (!(entry = (struct path_and_oids_entry *)hashmap_get(map, &key, NULL))) {
+ entry = hashmap_get_entry(map, &key, ent, NULL);
+ if (!entry) {
entry = xcalloc(1, sizeof(struct path_and_oids_entry));
- hashmap_entry_init(entry, hash);
+ hashmap_entry_init(&entry->ent, hash);
entry->path = xstrdup(key.path);
oidset_init(&entry->trees, 16);
- hashmap_put(map, entry);
+ hashmap_put(map, &entry->ent);
}
oidset_insert(&entry->trees, oid);
@@ -235,8 +241,7 @@ void mark_trees_uninteresting_sparse(struct repository *r,
add_children_by_path(r, tree, &map);
}
- hashmap_iter_init(&map, &map_iter);
- while ((entry = hashmap_iter_next(&map_iter)))
+ hashmap_for_each_entry(&map, &map_iter, entry, ent /* member name */)
mark_trees_uninteresting_sparse(r, &entry->trees);
paths_and_oids_clear(&map);
@@ -2061,7 +2066,6 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg
revs->simplify_by_decoration = 1;
revs->limited = 1;
revs->prune = 1;
- load_ref_decorations(NULL, DECORATE_SHORT_REFS);
} else if (!strcmp(arg, "--date-order")) {
revs->sort_order = REV_SORT_BY_COMMIT_DATE;
revs->topo_order = 1;
@@ -2688,6 +2692,8 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s
grep_commit_pattern_type(GREP_PATTERN_TYPE_UNSPECIFIED,
&revs->grep_filter);
+ if (!is_encoding_utf8(get_log_output_encoding()))
+ revs->grep_filter.ignore_locale = 1;
compile_grep_patterns(&revs->grep_filter);
if (revs->reverse && revs->reflog_info)
diff --git a/send-pack.c b/send-pack.c
index 6dc16c3..34c77cb 100644
--- a/send-pack.c
+++ b/send-pack.c
@@ -40,7 +40,8 @@ int option_parse_push_signed(const struct option *opt,
static void feed_object(const struct object_id *oid, FILE *fh, int negative)
{
- if (negative && !has_object_file(oid))
+ if (negative &&
+ !has_object_file_with_flags(oid, OBJECT_INFO_SKIP_FETCH_OBJECT))
return;
if (negative)
diff --git a/sequencer.c b/sequencer.c
index d648aaf..9d5964f 100644
--- a/sequencer.c
+++ b/sequencer.c
@@ -586,7 +586,7 @@ static int do_recursive_merge(struct repository *r,
struct replay_opts *opts)
{
struct merge_options o;
- struct tree *result, *next_tree, *base_tree, *head_tree;
+ struct tree *next_tree, *base_tree, *head_tree;
int clean;
char **xopt;
struct lock_file index_lock = LOCK_INIT;
@@ -613,11 +613,10 @@ static int do_recursive_merge(struct repository *r,
clean = merge_trees(&o,
head_tree,
- next_tree, base_tree, &result);
+ next_tree, base_tree);
if (is_rebase_i(opts) && clean <= 0)
fputs(o.obuf.buf, stdout);
strbuf_release(&o.obuf);
- diff_warn_rename_limit("merge.renamelimit", o.needed_rename_limit, 0);
if (clean < 0) {
rollback_lock_file(&index_lock);
return clean;
@@ -869,34 +868,6 @@ static char *get_author(const char *message)
return NULL;
}
-/* Read author-script and return an ident line (author <email> timestamp) */
-static const char *read_author_ident(struct strbuf *buf)
-{
- struct strbuf out = STRBUF_INIT;
- char *name, *email, *date;
-
- if (read_author_script(rebase_path_author_script(),
- &name, &email, &date, 0))
- return NULL;
-
- /* validate date since fmt_ident() will die() on bad value */
- if (parse_date(date, &out)){
- warning(_("invalid date format '%s' in '%s'"),
- date, rebase_path_author_script());
- strbuf_release(&out);
- return NULL;
- }
-
- strbuf_reset(&out);
- strbuf_addstr(&out, fmt_ident(name, email, WANT_AUTHOR_IDENT, date, 0));
- strbuf_swap(buf, &out);
- strbuf_release(&out);
- free(name);
- free(email);
- free(date);
- return buf->buf;
-}
-
static const char staged_changes_advice[] =
N_("you have staged changes in your working tree\n"
"If these changes are meant to be squashed into the previous commit, run:\n"
@@ -954,47 +925,6 @@ static int run_git_commit(struct repository *r,
{
struct child_process cmd = CHILD_PROCESS_INIT;
- if ((flags & CREATE_ROOT_COMMIT) && !(flags & AMEND_MSG)) {
- struct strbuf msg = STRBUF_INIT, script = STRBUF_INIT;
- const char *author = NULL;
- struct object_id root_commit, *cache_tree_oid;
- int res = 0;
-
- if (is_rebase_i(opts)) {
- author = read_author_ident(&script);
- if (!author) {
- strbuf_release(&script);
- return -1;
- }
- }
-
- if (!defmsg)
- BUG("root commit without message");
-
- if (!(cache_tree_oid = get_cache_tree_oid(r->index)))
- res = -1;
-
- if (!res)
- res = strbuf_read_file(&msg, defmsg, 0);
-
- if (res <= 0)
- res = error_errno(_("could not read '%s'"), defmsg);
- else
- res = commit_tree(msg.buf, msg.len, cache_tree_oid,
- NULL, &root_commit, author,
- opts->gpg_sign);
-
- strbuf_release(&msg);
- strbuf_release(&script);
- if (!res) {
- update_ref(NULL, "CHERRY_PICK_HEAD", &root_commit, NULL,
- REF_NO_DEREF, UPDATE_REFS_MSG_ON_ERR);
- res = update_ref(NULL, "HEAD", &root_commit, NULL, 0,
- UPDATE_REFS_MSG_ON_ERR);
- }
- return res < 0 ? error(_("writing root commit")) : 0;
- }
-
cmd.git_cmd = 1;
if (is_rebase_i(opts) && read_env_script(&cmd.env_array)) {
@@ -1378,7 +1308,7 @@ static int try_to_commit(struct repository *r,
struct object_id *oid)
{
struct object_id tree;
- struct commit *current_head;
+ struct commit *current_head = NULL;
struct commit_list *parents = NULL;
struct commit_extra_header *extra = NULL;
struct strbuf err = STRBUF_INIT;
@@ -1413,7 +1343,8 @@ static int try_to_commit(struct repository *r,
}
parents = copy_commit_list(current_head->parents);
extra = read_commit_extra_headers(current_head, exclude_gpgsig);
- } else if (current_head) {
+ } else if (current_head &&
+ (!(flags & CREATE_ROOT_COMMIT) || (flags & AMEND_MSG))) {
commit_list_insert(current_head, &parents);
}
@@ -1490,8 +1421,7 @@ static int do_commit(struct repository *r,
{
int res = 1;
- if (!(flags & EDIT_MSG) && !(flags & VERIFY_MSG) &&
- !(flags & CREATE_ROOT_COMMIT)) {
+ if (!(flags & EDIT_MSG) && !(flags & VERIFY_MSG)) {
struct object_id oid;
struct strbuf sb = STRBUF_INIT;
@@ -1775,7 +1705,7 @@ static int do_pick_commit(struct repository *r,
enum todo_command command,
struct commit *commit,
struct replay_opts *opts,
- int final_fixup)
+ int final_fixup, int *check_todo)
{
unsigned int flags = opts->edit ? EDIT_MSG : 0;
const char *msg_file = opts->edit ? NULL : git_path_merge_msg(r);
@@ -1785,7 +1715,7 @@ static int do_pick_commit(struct repository *r,
char *author = NULL;
struct commit_message msg = { NULL, NULL, NULL, NULL };
struct strbuf msgbuf = STRBUF_INIT;
- int res, unborn = 0, allow;
+ int res, unborn = 0, reword = 0, allow;
if (opts->no_commit) {
/*
@@ -1855,7 +1785,7 @@ static int do_pick_commit(struct repository *r,
opts);
if (res || command != TODO_REWORD)
goto leave;
- flags |= EDIT_MSG | AMEND_MSG | VERIFY_MSG;
+ reword = 1;
msg_file = NULL;
goto fast_forward_edit;
}
@@ -1913,7 +1843,7 @@ static int do_pick_commit(struct repository *r,
}
if (command == TODO_REWORD)
- flags |= EDIT_MSG | VERIFY_MSG;
+ reword = 1;
else if (is_fixup(command)) {
if (update_squash_messages(r, command, commit, opts))
return -1;
@@ -1997,13 +1927,21 @@ static int do_pick_commit(struct repository *r,
} else if (allow)
flags |= ALLOW_EMPTY;
if (!opts->no_commit) {
-fast_forward_edit:
if (author || command == TODO_REVERT || (flags & AMEND_MSG))
res = do_commit(r, msg_file, author, opts, flags);
else
res = error(_("unable to parse commit author"));
+ *check_todo = !!(flags & EDIT_MSG);
+ if (!res && reword) {
+fast_forward_edit:
+ res = run_git_commit(r, NULL, opts, EDIT_MSG |
+ VERIFY_MSG | AMEND_MSG |
+ (flags & ALLOW_EMPTY));
+ *check_todo = 1;
+ }
}
+
if (!res && final_fixup) {
unlink(rebase_path_fixup_msg());
unlink(rebase_path_squash_msg());
@@ -3578,7 +3516,7 @@ static int do_merge(struct repository *r,
goto leave_merge;
}
- write_message(oid_to_hex(&merge_commit->object.oid), GIT_SHA1_HEXSZ,
+ write_message(oid_to_hex(&merge_commit->object.oid), the_hash_algo->hexsz,
git_path_merge_head(r), 0);
write_message("no-ff", 5, git_path_merge_mode(r), 0);
@@ -3828,6 +3766,7 @@ static int pick_commits(struct repository *r,
while (todo_list->current < todo_list->nr) {
struct todo_item *item = todo_list->items + todo_list->current;
const char *arg = todo_item_get_arg(todo_list, item);
+ int check_todo = 0;
if (save_todo(todo_list, opts))
return -1;
@@ -3866,7 +3805,8 @@ static int pick_commits(struct repository *r,
command_to_string(item->command), NULL),
1);
res = do_pick_commit(r, item->command, item->commit,
- opts, is_final_fixup(todo_list));
+ opts, is_final_fixup(todo_list),
+ &check_todo);
if (is_rebase_i(opts) && res < 0) {
/* Reschedule */
advise(_(rescheduled_advice),
@@ -3923,7 +3863,6 @@ static int pick_commits(struct repository *r,
} else if (item->command == TODO_EXEC) {
char *end_of_arg = (char *)(arg + item->arg_len);
int saved = *end_of_arg;
- struct stat st;
if (!opts->verbose)
term_clear_line();
@@ -3934,17 +3873,8 @@ static int pick_commits(struct repository *r,
if (res) {
if (opts->reschedule_failed_exec)
reschedule = 1;
- } else if (stat(get_todo_path(opts), &st))
- res = error_errno(_("could not stat '%s'"),
- get_todo_path(opts));
- else if (match_stat_data(&todo_list->stat, &st)) {
- /* Reread the todo file if it has changed. */
- todo_list_release(todo_list);
- if (read_populate_todo(r, todo_list, opts))
- res = -1; /* message was printed */
- /* `current` will be incremented below */
- todo_list->current = -1;
}
+ check_todo = 1;
} else if (item->command == TODO_LABEL) {
if ((res = do_label(r, arg, item->arg_len)))
reschedule = 1;
@@ -3980,6 +3910,20 @@ static int pick_commits(struct repository *r,
item->commit,
arg, item->arg_len,
opts, res, 0);
+ } else if (check_todo && !res) {
+ struct stat st;
+
+ if (stat(get_todo_path(opts), &st)) {
+ res = error_errno(_("could not stat '%s'"),
+ get_todo_path(opts));
+ } else if (match_stat_data(&todo_list->stat, &st)) {
+ /* Reread the todo file if it has changed. */
+ todo_list_release(todo_list);
+ if (read_populate_todo(r, todo_list, opts))
+ res = -1; /* message was printed */
+ /* `current` will be incremented below */
+ todo_list->current = -1;
+ }
}
todo_list->current++;
@@ -4306,9 +4250,12 @@ static int single_pick(struct repository *r,
struct commit *cmit,
struct replay_opts *opts)
{
+ int check_todo;
+
setenv(GIT_REFLOG_ACTION, action_name(opts), 0);
return do_pick_commit(r, opts->action == REPLAY_PICK ?
- TODO_PICK : TODO_REVERT, cmit, opts, 0);
+ TODO_PICK : TODO_REVERT, cmit, opts, 0,
+ &check_todo);
}
int sequencer_pick_revisions(struct repository *r,
@@ -4450,9 +4397,14 @@ struct labels_entry {
char label[FLEX_ARRAY];
};
-static int labels_cmp(const void *fndata, const struct labels_entry *a,
- const struct labels_entry *b, const void *key)
+static int labels_cmp(const void *fndata, const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key, const void *key)
{
+ const struct labels_entry *a, *b;
+
+ a = container_of(eptr, const struct labels_entry, entry);
+ b = container_of(entry_or_key, const struct labels_entry, entry);
+
return key ? strcmp(a->label, key) : strcmp(a->label, b->label);
}
@@ -4497,7 +4449,7 @@ static const char *label_oid(struct object_id *oid, const char *label,
char *p;
strbuf_reset(&state->buf);
- strbuf_grow(&state->buf, GIT_SHA1_HEXSZ);
+ strbuf_grow(&state->buf, GIT_MAX_HEXSZ);
label = p = state->buf.buf;
find_unique_abbrev_r(p, oid, default_abbrev);
@@ -4510,7 +4462,7 @@ static const char *label_oid(struct object_id *oid, const char *label,
size_t i = strlen(p) + 1;
oid_to_hex_r(p, oid);
- for (; i < GIT_SHA1_HEXSZ; i++) {
+ for (; i < the_hash_algo->hexsz; i++) {
char save = p[i];
p[i] = '\0';
if (!hashmap_get_from_hash(&state->labels,
@@ -4548,8 +4500,8 @@ static const char *label_oid(struct object_id *oid, const char *label,
}
FLEX_ALLOC_STR(labels_entry, label, label);
- hashmap_entry_init(labels_entry, strihash(label));
- hashmap_add(&state->labels, labels_entry);
+ hashmap_entry_init(&labels_entry->entry, strihash(label));
+ hashmap_add(&state->labels, &labels_entry->entry);
FLEX_ALLOC_STR(string_entry, string, label);
oidcpy(&string_entry->entry.oid, oid);
@@ -4584,7 +4536,7 @@ static int make_script_with_merges(struct pretty_print_context *pp,
oidmap_init(&commit2todo, 0);
oidmap_init(&state.commit2label, 0);
- hashmap_init(&state.labels, (hashmap_cmp_fn) labels_cmp, NULL, 0);
+ hashmap_init(&state.labels, labels_cmp, NULL, 0);
strbuf_init(&state.buf, 32);
if (revs->cmdline.nr && (revs->cmdline.rev[0].flags & BOTTOM)) {
@@ -4779,7 +4731,7 @@ static int make_script_with_merges(struct pretty_print_context *pp,
oidmap_free(&commit2todo, 1);
oidmap_free(&state.commit2label, 1);
- hashmap_free(&state.labels, 1);
+ hashmap_free_entries(&state.labels, struct labels_entry, entry);
strbuf_release(&state.buf);
return 0;
@@ -5150,9 +5102,15 @@ struct subject2item_entry {
};
static int subject2item_cmp(const void *fndata,
- const struct subject2item_entry *a,
- const struct subject2item_entry *b, const void *key)
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
+ const void *key)
{
+ const struct subject2item_entry *a, *b;
+
+ a = container_of(eptr, const struct subject2item_entry, entry);
+ b = container_of(entry_or_key, const struct subject2item_entry, entry);
+
return key ? strcmp(a->subject, key) : strcmp(a->subject, b->subject);
}
@@ -5185,8 +5143,7 @@ int todo_list_rearrange_squash(struct todo_list *todo_list)
* In that case, last[i] will indicate the index of the latest item to
* be moved to appear after the i'th.
*/
- hashmap_init(&subject2item, (hashmap_cmp_fn) subject2item_cmp,
- NULL, todo_list->nr);
+ hashmap_init(&subject2item, subject2item_cmp, NULL, todo_list->nr);
ALLOC_ARRAY(next, todo_list->nr);
ALLOC_ARRAY(tail, todo_list->nr);
ALLOC_ARRAY(subjects, todo_list->nr);
@@ -5229,8 +5186,11 @@ int todo_list_rearrange_squash(struct todo_list *todo_list)
break;
}
- if ((entry = hashmap_get_from_hash(&subject2item,
- strhash(p), p)))
+ entry = hashmap_get_entry_from_hash(&subject2item,
+ strhash(p), p,
+ struct subject2item_entry,
+ entry);
+ if (entry)
/* found by title */
i2 = entry->i;
else if (!strchr(p, ' ') &&
@@ -5264,8 +5224,9 @@ int todo_list_rearrange_squash(struct todo_list *todo_list)
strhash(subject), subject)) {
FLEX_ALLOC_MEM(entry, subject, subject, subject_len);
entry->i = i;
- hashmap_entry_init(entry, strhash(entry->subject));
- hashmap_put(&subject2item, entry);
+ hashmap_entry_init(&entry->entry,
+ strhash(entry->subject));
+ hashmap_put(&subject2item, &entry->entry);
}
}
@@ -5299,7 +5260,7 @@ int todo_list_rearrange_squash(struct todo_list *todo_list)
for (i = 0; i < todo_list->nr; i++)
free(subjects[i]);
free(subjects);
- hashmap_free(&subject2item, 1);
+ hashmap_free_entries(&subject2item, struct subject2item_entry, entry);
clear_commit_todo_item(&commit_todo);
diff --git a/sha1-file.c b/sha1-file.c
index e85f249..188de57 100644
--- a/sha1-file.c
+++ b/sha1-file.c
@@ -55,7 +55,6 @@
"\x6f\xe1\x41\xf7\x74\x91\x20\xa3\x03\x72" \
"\x18\x13"
-const unsigned char null_sha1[GIT_MAX_RAWSZ];
const struct object_id null_oid;
static const struct object_id empty_tree_oid = {
EMPTY_TREE_SHA1_BIN_LITERAL
diff --git a/sha1-lookup.c b/sha1-lookup.c
index 796ab68..2918584 100644
--- a/sha1-lookup.c
+++ b/sha1-lookup.c
@@ -50,7 +50,7 @@ static uint32_t take2(const unsigned char *sha1)
* The sha1 of element i (between 0 and nr - 1) should be returned
* by "fn(i, table)".
*/
-int sha1_pos(const unsigned char *sha1, void *table, size_t nr,
+int sha1_pos(const unsigned char *hash, void *table, size_t nr,
sha1_access_fn fn)
{
size_t hi = nr;
@@ -63,14 +63,14 @@ int sha1_pos(const unsigned char *sha1, void *table, size_t nr,
if (nr != 1) {
size_t lov, hiv, miv, ofs;
- for (ofs = 0; ofs < 18; ofs += 2) {
+ for (ofs = 0; ofs < the_hash_algo->rawsz - 2; ofs += 2) {
lov = take2(fn(0, table) + ofs);
hiv = take2(fn(nr - 1, table) + ofs);
- miv = take2(sha1 + ofs);
+ miv = take2(hash + ofs);
if (miv < lov)
return -1;
if (hiv < miv)
- return -1 - nr;
+ return index_pos_to_insert_pos(nr);
if (lov != hiv) {
/*
* At this point miv could be equal
@@ -88,7 +88,7 @@ int sha1_pos(const unsigned char *sha1, void *table, size_t nr,
do {
int cmp;
- cmp = hashcmp(fn(mi, table), sha1);
+ cmp = hashcmp(fn(mi, table), hash);
if (!cmp)
return mi;
if (cmp > 0)
@@ -97,7 +97,7 @@ int sha1_pos(const unsigned char *sha1, void *table, size_t nr,
lo = mi + 1;
mi = lo + (hi - lo) / 2;
} while (lo < hi);
- return -lo-1;
+ return index_pos_to_insert_pos(lo);
}
int bsearch_hash(const unsigned char *sha1, const uint32_t *fanout_nbo,
diff --git a/sha1-name.c b/sha1-name.c
index c665e3f..200eb37 100644
--- a/sha1-name.c
+++ b/sha1-name.c
@@ -1160,13 +1160,22 @@ static enum get_oid_result get_oid_1(struct repository *r,
}
if (has_suffix) {
- int num = 0;
+ unsigned int num = 0;
int len1 = cp - name;
cp++;
- while (cp < name + len)
- num = num * 10 + *cp++ - '0';
+ while (cp < name + len) {
+ unsigned int digit = *cp++ - '0';
+ if (unsigned_mult_overflows(num, 10))
+ return MISSING_OBJECT;
+ num *= 10;
+ if (unsigned_add_overflows(num, digit))
+ return MISSING_OBJECT;
+ num += digit;
+ }
if (!num && len1 == len - 1)
num = 1;
+ else if (num > INT_MAX)
+ return MISSING_OBJECT;
if (has_suffix == '^')
return get_parent(r, name, len1, oid, num);
/* else if (has_suffix == '~') -- goes without saying */
@@ -1286,7 +1295,7 @@ static int get_oid_oneline(struct repository *r,
struct grab_nth_branch_switch_cbdata {
int remaining;
- struct strbuf buf;
+ struct strbuf *sb;
};
static int grab_nth_branch_switch(struct object_id *ooid, struct object_id *noid,
@@ -1304,8 +1313,8 @@ static int grab_nth_branch_switch(struct object_id *ooid, struct object_id *noid
return 0;
if (--(cb->remaining) == 0) {
len = target - match;
- strbuf_reset(&cb->buf);
- strbuf_add(&cb->buf, match, len);
+ strbuf_reset(cb->sb);
+ strbuf_add(cb->sb, match, len);
return 1; /* we are done */
}
return 0;
@@ -1338,18 +1347,15 @@ static int interpret_nth_prior_checkout(struct repository *r,
if (nth <= 0)
return -1;
cb.remaining = nth;
- strbuf_init(&cb.buf, 20);
+ cb.sb = buf;
retval = refs_for_each_reflog_ent_reverse(get_main_ref_store(r),
"HEAD", grab_nth_branch_switch, &cb);
if (0 < retval) {
- strbuf_reset(buf);
- strbuf_addbuf(buf, &cb.buf);
retval = brace - name + 1;
} else
retval = 0;
- strbuf_release(&cb.buf);
return retval;
}
diff --git a/shallow.c b/shallow.c
index 5fa2b15..7fd04af 100644
--- a/shallow.c
+++ b/shallow.c
@@ -12,10 +12,7 @@
#include "diff.h"
#include "revision.h"
#include "commit-slab.h"
-#include "revision.h"
#include "list-objects.h"
-#include "commit-slab.h"
-#include "repository.h"
#include "commit-reach.h"
void set_alternate_shallow_file(struct repository *r, const char *path, int override)
@@ -156,6 +153,8 @@ struct commit_list *get_shallow_commits(struct object_array *heads, int depth,
for (i = 0; i < depths.slab_count; i++) {
int j;
+ if (!depths.slab[i])
+ continue;
for (j = 0; j < depths.slab_size; j++)
free(depths.slab[i][j]);
}
diff --git a/compat/qsort.c b/stable-qsort.c
index 7d071af..6cbaf39 100644
--- a/compat/qsort.c
+++ b/stable-qsort.c
@@ -1,4 +1,4 @@
-#include "../git-compat-util.h"
+#include "git-compat-util.h"
/*
* A merge sort implementation, simplified from the qsort implementation
@@ -44,8 +44,8 @@ static void msort_with_tmp(void *b, size_t n, size_t s,
memcpy(b, t, (n - n2) * s);
}
-void git_qsort(void *b, size_t n, size_t s,
- int (*cmp)(const void *, const void *))
+void git_stable_qsort(void *b, size_t n, size_t s,
+ int (*cmp)(const void *, const void *))
{
const size_t size = st_mult(n, s);
char buf[1024];
diff --git a/sub-process.c b/sub-process.c
index 3f4af93..1b1af9d 100644
--- a/sub-process.c
+++ b/sub-process.c
@@ -6,12 +6,14 @@
#include "pkt-line.h"
int cmd2process_cmp(const void *unused_cmp_data,
- const void *entry,
- const void *entry_or_key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *unused_keydata)
{
- const struct subprocess_entry *e1 = entry;
- const struct subprocess_entry *e2 = entry_or_key;
+ const struct subprocess_entry *e1, *e2;
+
+ e1 = container_of(eptr, const struct subprocess_entry, ent);
+ e2 = container_of(entry_or_key, const struct subprocess_entry, ent);
return strcmp(e1->cmd, e2->cmd);
}
@@ -20,9 +22,9 @@ struct subprocess_entry *subprocess_find_entry(struct hashmap *hashmap, const ch
{
struct subprocess_entry key;
- hashmap_entry_init(&key, strhash(cmd));
+ hashmap_entry_init(&key.ent, strhash(cmd));
key.cmd = cmd;
- return hashmap_get(hashmap, &key, NULL);
+ return hashmap_get_entry(hashmap, &key, ent, NULL);
}
int subprocess_read_status(int fd, struct strbuf *status)
@@ -58,7 +60,7 @@ void subprocess_stop(struct hashmap *hashmap, struct subprocess_entry *entry)
kill(entry->process.pid, SIGTERM);
finish_command(&entry->process);
- hashmap_remove(hashmap, entry, NULL);
+ hashmap_remove(hashmap, &entry->ent, NULL);
}
static void subprocess_exit_handler(struct child_process *process)
@@ -96,7 +98,7 @@ int subprocess_start(struct hashmap *hashmap, struct subprocess_entry *entry, co
return err;
}
- hashmap_entry_init(entry, strhash(cmd));
+ hashmap_entry_init(&entry->ent, strhash(cmd));
err = startfn(entry);
if (err) {
@@ -105,7 +107,7 @@ int subprocess_start(struct hashmap *hashmap, struct subprocess_entry *entry, co
return err;
}
- hashmap_add(hashmap, entry);
+ hashmap_add(hashmap, &entry->ent);
return 0;
}
diff --git a/sub-process.h b/sub-process.h
index 5c182fa..e85f21f 100644
--- a/sub-process.h
+++ b/sub-process.h
@@ -24,7 +24,7 @@
/* Members should not be accessed directly. */
struct subprocess_entry {
- struct hashmap_entry ent; /* must be the first member! */
+ struct hashmap_entry ent;
const char *cmd;
struct child_process process;
};
@@ -43,8 +43,8 @@ struct subprocess_capability {
/* Function to test two subprocess hashmap entries for equality. */
int cmd2process_cmp(const void *unused_cmp_data,
- const void *e1,
- const void *e2,
+ const struct hashmap_entry *e,
+ const struct hashmap_entry *entry_or_key,
const void *unused_keydata);
/*
diff --git a/submodule-config.c b/submodule-config.c
index 4264ee2..401a9b2 100644
--- a/submodule-config.c
+++ b/submodule-config.c
@@ -38,24 +38,28 @@ enum lookup_type {
};
static int config_path_cmp(const void *unused_cmp_data,
- const void *entry,
- const void *entry_or_key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *unused_keydata)
{
- const struct submodule_entry *a = entry;
- const struct submodule_entry *b = entry_or_key;
+ const struct submodule_entry *a, *b;
+
+ a = container_of(eptr, const struct submodule_entry, ent);
+ b = container_of(entry_or_key, const struct submodule_entry, ent);
return strcmp(a->config->path, b->config->path) ||
!oideq(&a->config->gitmodules_oid, &b->config->gitmodules_oid);
}
static int config_name_cmp(const void *unused_cmp_data,
- const void *entry,
- const void *entry_or_key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *unused_keydata)
{
- const struct submodule_entry *a = entry;
- const struct submodule_entry *b = entry_or_key;
+ const struct submodule_entry *a, *b;
+
+ a = container_of(eptr, const struct submodule_entry, ent);
+ b = container_of(entry_or_key, const struct submodule_entry, ent);
return strcmp(a->config->name, b->config->name) ||
!oideq(&a->config->gitmodules_oid, &b->config->gitmodules_oid);
@@ -95,12 +99,12 @@ static void submodule_cache_clear(struct submodule_cache *cache)
* allocation of struct submodule entries. Each is allocated by
* their .gitmodules blob sha1 and submodule name.
*/
- hashmap_iter_init(&cache->for_name, &iter);
- while ((entry = hashmap_iter_next(&iter)))
+ hashmap_for_each_entry(&cache->for_name, &iter, entry,
+ ent /* member name */)
free_one_config(entry);
- hashmap_free(&cache->for_path, 1);
- hashmap_free(&cache->for_name, 1);
+ hashmap_free_entries(&cache->for_path, struct submodule_entry, ent);
+ hashmap_free_entries(&cache->for_name, struct submodule_entry, ent);
cache->initialized = 0;
cache->gitmodules_read = 0;
}
@@ -123,9 +127,9 @@ static void cache_put_path(struct submodule_cache *cache,
unsigned int hash = hash_oid_string(&submodule->gitmodules_oid,
submodule->path);
struct submodule_entry *e = xmalloc(sizeof(*e));
- hashmap_entry_init(e, hash);
+ hashmap_entry_init(&e->ent, hash);
e->config = submodule;
- hashmap_put(&cache->for_path, e);
+ hashmap_put(&cache->for_path, &e->ent);
}
static void cache_remove_path(struct submodule_cache *cache,
@@ -135,9 +139,9 @@ static void cache_remove_path(struct submodule_cache *cache,
submodule->path);
struct submodule_entry e;
struct submodule_entry *removed;
- hashmap_entry_init(&e, hash);
+ hashmap_entry_init(&e.ent, hash);
e.config = submodule;
- removed = hashmap_remove(&cache->for_path, &e, NULL);
+ removed = hashmap_remove_entry(&cache->for_path, &e, ent, NULL);
free(removed);
}
@@ -147,9 +151,9 @@ static void cache_add(struct submodule_cache *cache,
unsigned int hash = hash_oid_string(&submodule->gitmodules_oid,
submodule->name);
struct submodule_entry *e = xmalloc(sizeof(*e));
- hashmap_entry_init(e, hash);
+ hashmap_entry_init(&e->ent, hash);
e->config = submodule;
- hashmap_add(&cache->for_name, e);
+ hashmap_add(&cache->for_name, &e->ent);
}
static const struct submodule *cache_lookup_path(struct submodule_cache *cache,
@@ -163,10 +167,10 @@ static const struct submodule *cache_lookup_path(struct submodule_cache *cache,
oidcpy(&key_config.gitmodules_oid, gitmodules_oid);
key_config.path = path;
- hashmap_entry_init(&key, hash);
+ hashmap_entry_init(&key.ent, hash);
key.config = &key_config;
- entry = hashmap_get(&cache->for_path, &key, NULL);
+ entry = hashmap_get_entry(&cache->for_path, &key, ent, NULL);
if (entry)
return entry->config;
return NULL;
@@ -183,10 +187,10 @@ static struct submodule *cache_lookup_name(struct submodule_cache *cache,
oidcpy(&key_config.gitmodules_oid, gitmodules_oid);
key_config.name = name;
- hashmap_entry_init(&key, hash);
+ hashmap_entry_init(&key.ent, hash);
key.config = &key_config;
- entry = hashmap_get(&cache->for_name, &key, NULL);
+ entry = hashmap_get_entry(&cache->for_name, &key, ent, NULL);
if (entry)
return entry->config;
return NULL;
@@ -550,7 +554,9 @@ static const struct submodule *config_from(struct submodule_cache *cache,
struct hashmap_iter iter;
struct submodule_entry *entry;
- entry = hashmap_iter_first(&cache->for_name, &iter);
+ entry = hashmap_iter_first_entry(&cache->for_name, &iter,
+ struct submodule_entry,
+ ent /* member name */);
if (!entry)
return NULL;
return entry->config;
diff --git a/t/helper/.gitignore b/t/helper/.gitignore
index 2bad28a..48c7bb0 100644
--- a/t/helper/.gitignore
+++ b/t/helper/.gitignore
@@ -1,5 +1,4 @@
-*
-!*.sh
-!*.[ch]
-!*.gitignore
-
+/test-tool
+/test-fake-ssh
+/test-line-buffer
+/test-svn-fe
diff --git a/t/helper/test-date.c b/t/helper/test-date.c
index 585347e..099eff4 100644
--- a/t/helper/test-date.c
+++ b/t/helper/test-date.c
@@ -12,13 +12,13 @@ static const char *usage_msg = "\n"
" test-tool date is64bit\n"
" test-tool date time_t-is64bit\n";
-static void show_relative_dates(const char **argv, struct timeval *now)
+static void show_relative_dates(const char **argv)
{
struct strbuf buf = STRBUF_INIT;
for (; *argv; argv++) {
time_t t = atoi(*argv);
- show_date_relative(t, now, &buf);
+ show_date_relative(t, &buf);
printf("%s -> %s\n", *argv, buf.buf);
}
strbuf_release(&buf);
@@ -74,20 +74,20 @@ static void parse_dates(const char **argv)
strbuf_release(&result);
}
-static void parse_approxidate(const char **argv, struct timeval *now)
+static void parse_approxidate(const char **argv)
{
for (; *argv; argv++) {
timestamp_t t;
- t = approxidate_relative(*argv, now);
+ t = approxidate_relative(*argv);
printf("%s -> %s\n", *argv, show_date(t, 0, DATE_MODE(ISO8601)));
}
}
-static void parse_approx_timestamp(const char **argv, struct timeval *now)
+static void parse_approx_timestamp(const char **argv)
{
for (; *argv; argv++) {
timestamp_t t;
- t = approxidate_relative(*argv, now);
+ t = approxidate_relative(*argv);
printf("%s -> %"PRItime"\n", *argv, t);
}
}
@@ -103,22 +103,13 @@ static void getnanos(const char **argv)
int cmd__date(int argc, const char **argv)
{
- struct timeval now;
const char *x;
- x = getenv("GIT_TEST_DATE_NOW");
- if (x) {
- now.tv_sec = atoi(x);
- now.tv_usec = 0;
- }
- else
- gettimeofday(&now, NULL);
-
argv++;
if (!*argv)
usage(usage_msg);
if (!strcmp(*argv, "relative"))
- show_relative_dates(argv+1, &now);
+ show_relative_dates(argv+1);
else if (!strcmp(*argv, "human"))
show_human_dates(argv+1);
else if (skip_prefix(*argv, "show:", &x))
@@ -126,9 +117,9 @@ int cmd__date(int argc, const char **argv)
else if (!strcmp(*argv, "parse"))
parse_dates(argv+1);
else if (!strcmp(*argv, "approxidate"))
- parse_approxidate(argv+1, &now);
+ parse_approxidate(argv+1);
else if (!strcmp(*argv, "timestamp"))
- parse_approx_timestamp(argv+1, &now);
+ parse_approx_timestamp(argv+1);
else if (!strcmp(*argv, "getnanos"))
getnanos(argv+1);
else if (!strcmp(*argv, "is64bit"))
diff --git a/t/helper/test-hashmap.c b/t/helper/test-hashmap.c
index aaf17b0..f387062 100644
--- a/t/helper/test-hashmap.c
+++ b/t/helper/test-hashmap.c
@@ -5,6 +5,7 @@
struct test_entry
{
+ int padding; /* hashmap entry no longer needs to be the first member */
struct hashmap_entry ent;
/* key and value as two \0-terminated strings */
char key[FLEX_ARRAY];
@@ -16,15 +17,17 @@ static const char *get_value(const struct test_entry *e)
}
static int test_entry_cmp(const void *cmp_data,
- const void *entry,
- const void *entry_or_key,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *keydata)
{
const int ignore_case = cmp_data ? *((int *)cmp_data) : 0;
- const struct test_entry *e1 = entry;
- const struct test_entry *e2 = entry_or_key;
+ const struct test_entry *e1, *e2;
const char *key = keydata;
+ e1 = container_of(eptr, const struct test_entry, ent);
+ e2 = container_of(entry_or_key, const struct test_entry, ent);
+
if (ignore_case)
return strcasecmp(e1->key, key ? key : e2->key);
else
@@ -37,7 +40,7 @@ static struct test_entry *alloc_test_entry(unsigned int hash,
size_t klen = strlen(key);
size_t vlen = strlen(value);
struct test_entry *entry = xmalloc(st_add4(sizeof(*entry), klen, vlen, 2));
- hashmap_entry_init(entry, hash);
+ hashmap_entry_init(&entry->ent, hash);
memcpy(entry->key, key, klen + 1);
memcpy(entry->key + klen + 1, value, vlen + 1);
return entry;
@@ -103,11 +106,11 @@ static void perf_hashmap(unsigned int method, unsigned int rounds)
/* add entries */
for (i = 0; i < TEST_SIZE; i++) {
- hashmap_entry_init(entries[i], hashes[i]);
- hashmap_add(&map, entries[i]);
+ hashmap_entry_init(&entries[i]->ent, hashes[i]);
+ hashmap_add(&map, &entries[i]->ent);
}
- hashmap_free(&map, 0);
+ hashmap_free(&map);
}
} else {
/* test map lookups */
@@ -116,8 +119,8 @@ static void perf_hashmap(unsigned int method, unsigned int rounds)
/* fill the map (sparsely if specified) */
j = (method & TEST_SPARSE) ? TEST_SIZE / 10 : TEST_SIZE;
for (i = 0; i < j; i++) {
- hashmap_entry_init(entries[i], hashes[i]);
- hashmap_add(&map, entries[i]);
+ hashmap_entry_init(&entries[i]->ent, hashes[i]);
+ hashmap_add(&map, &entries[i]->ent);
}
for (j = 0; j < rounds; j++) {
@@ -127,7 +130,7 @@ static void perf_hashmap(unsigned int method, unsigned int rounds)
}
}
- hashmap_free(&map, 0);
+ hashmap_free(&map);
}
}
@@ -179,7 +182,7 @@ int cmd__hashmap(int argc, const char **argv)
entry = alloc_test_entry(hash, p1, p2);
/* add to hashmap */
- hashmap_add(&map, entry);
+ hashmap_add(&map, &entry->ent);
} else if (!strcmp("put", cmd) && p1 && p2) {
@@ -187,43 +190,44 @@ int cmd__hashmap(int argc, const char **argv)
entry = alloc_test_entry(hash, p1, p2);
/* add / replace entry */
- entry = hashmap_put(&map, entry);
+ entry = hashmap_put_entry(&map, entry, ent);
/* print and free replaced entry, if any */
puts(entry ? get_value(entry) : "NULL");
free(entry);
} else if (!strcmp("get", cmd) && p1) {
-
/* lookup entry in hashmap */
- entry = hashmap_get_from_hash(&map, hash, p1);
+ entry = hashmap_get_entry_from_hash(&map, hash, p1,
+ struct test_entry, ent);
/* print result */
if (!entry)
puts("NULL");
- while (entry) {
+ hashmap_for_each_entry_from(&map, entry, ent)
puts(get_value(entry));
- entry = hashmap_get_next(&map, entry);
- }
} else if (!strcmp("remove", cmd) && p1) {
/* setup static key */
struct hashmap_entry key;
+ struct hashmap_entry *rm;
hashmap_entry_init(&key, hash);
/* remove entry from hashmap */
- entry = hashmap_remove(&map, &key, p1);
+ rm = hashmap_remove(&map, &key, p1);
+ entry = rm ? container_of(rm, struct test_entry, ent)
+ : NULL;
/* print result and free entry*/
puts(entry ? get_value(entry) : "NULL");
free(entry);
} else if (!strcmp("iterate", cmd)) {
-
struct hashmap_iter iter;
- hashmap_iter_init(&map, &iter);
- while ((entry = hashmap_iter_next(&iter)))
+
+ hashmap_for_each_entry(&map, &iter, entry,
+ ent /* member name */)
printf("%s %s\n", entry->key, get_value(entry));
} else if (!strcmp("size", cmd)) {
@@ -258,6 +262,6 @@ int cmd__hashmap(int argc, const char **argv)
}
strbuf_release(&line);
- hashmap_free(&map, 1);
+ hashmap_free_entries(&map, struct test_entry, ent);
return 0;
}
diff --git a/t/helper/test-lazy-init-name-hash.c b/t/helper/test-lazy-init-name-hash.c
index b99a370..cd1b4c9 100644
--- a/t/helper/test-lazy-init-name-hash.c
+++ b/t/helper/test-lazy-init-name-hash.c
@@ -41,17 +41,13 @@ static void dump_run(void)
die("non-threaded code path used");
}
- dir = hashmap_iter_first(&the_index.dir_hash, &iter_dir);
- while (dir) {
+ hashmap_for_each_entry(&the_index.dir_hash, &iter_dir, dir,
+ ent /* member name */)
printf("dir %08x %7d %s\n", dir->ent.hash, dir->nr, dir->name);
- dir = hashmap_iter_next(&iter_dir);
- }
- ce = hashmap_iter_first(&the_index.name_hash, &iter_cache);
- while (ce) {
+ hashmap_for_each_entry(&the_index.name_hash, &iter_cache, ce,
+ ent /* member name */)
printf("name %08x %s\n", ce->ent.hash, ce->name);
- ce = hashmap_iter_next(&iter_cache);
- }
discard_cache();
}
diff --git a/t/helper/test-progress.c b/t/helper/test-progress.c
new file mode 100644
index 0000000..4e9f7fa
--- /dev/null
+++ b/t/helper/test-progress.c
@@ -0,0 +1,81 @@
+/*
+ * A test helper to exercise the progress display.
+ *
+ * Reads instructions from standard input, one instruction per line:
+ *
+ * "progress <items>" - Call display_progress() with the given item count
+ * as parameter.
+ * "throughput <bytes> <millis> - Call display_throughput() with the given
+ * byte count as parameter. The 'millis'
+ * specify the time elapsed since the
+ * start_progress() call.
+ * "update" - Set the 'progress_update' flag.
+ *
+ * See 't0500-progress-display.sh' for examples.
+ */
+#include "test-tool.h"
+#include "gettext.h"
+#include "parse-options.h"
+#include "progress.h"
+#include "strbuf.h"
+
+/*
+ * These are defined in 'progress.c', but are not exposed in 'progress.h',
+ * because they are exclusively for testing.
+ */
+extern int progress_testing;
+extern uint64_t progress_test_ns;
+void progress_test_force_update(void);
+
+int cmd__progress(int argc, const char **argv)
+{
+ uint64_t total = 0;
+ const char *title;
+ struct strbuf line = STRBUF_INIT;
+ struct progress *progress;
+
+ const char *usage[] = {
+ "test-tool progress [--total=<n>] <progress-title>",
+ NULL
+ };
+ struct option options[] = {
+ OPT_INTEGER(0, "total", &total, "total number of items"),
+ OPT_END(),
+ };
+
+ argc = parse_options(argc, argv, NULL, options, usage, 0);
+ if (argc != 1)
+ die("need a title for the progress output");
+ title = argv[0];
+
+ progress_testing = 1;
+ progress = start_progress(title, total);
+ while (strbuf_getline(&line, stdin) != EOF) {
+ char *end;
+
+ if (skip_prefix(line.buf, "progress ", (const char **) &end)) {
+ uint64_t item_count = strtoull(end, &end, 10);
+ if (*end != '\0')
+ die("invalid input: '%s'\n", line.buf);
+ display_progress(progress, item_count);
+ } else if (skip_prefix(line.buf, "throughput ",
+ (const char **) &end)) {
+ uint64_t byte_count, test_ms;
+
+ byte_count = strtoull(end, &end, 10);
+ if (*end != ' ')
+ die("invalid input: '%s'\n", line.buf);
+ test_ms = strtoull(end + 1, &end, 10);
+ if (*end != '\0')
+ die("invalid input: '%s'\n", line.buf);
+ progress_test_ns = test_ms * 1000 * 1000;
+ display_throughput(progress, byte_count);
+ } else if (!strcmp(line.buf, "update"))
+ progress_test_force_update();
+ else
+ die("invalid input: '%s'\n", line.buf);
+ }
+ stop_progress(&progress);
+
+ return 0;
+}
diff --git a/t/helper/test-run-command.c b/t/helper/test-run-command.c
index 2cc93bb..ead6dc6 100644
--- a/t/helper/test-run-command.c
+++ b/t/helper/test-run-command.c
@@ -10,9 +10,14 @@
#include "test-tool.h"
#include "git-compat-util.h"
+#include "cache.h"
#include "run-command.h"
#include "argv-array.h"
#include "strbuf.h"
+#include "parse-options.h"
+#include "string-list.h"
+#include "thread-utils.h"
+#include "wildmatch.h"
#include <string.h>
#include <errno.h>
@@ -50,11 +55,159 @@ static int task_finished(int result,
return 1;
}
+struct testsuite {
+ struct string_list tests, failed;
+ int next;
+ int quiet, immediate, verbose, verbose_log, trace, write_junit_xml;
+};
+#define TESTSUITE_INIT \
+ { STRING_LIST_INIT_DUP, STRING_LIST_INIT_DUP, -1, 0, 0, 0, 0, 0, 0 }
+
+static int next_test(struct child_process *cp, struct strbuf *err, void *cb,
+ void **task_cb)
+{
+ struct testsuite *suite = cb;
+ const char *test;
+ if (suite->next >= suite->tests.nr)
+ return 0;
+
+ test = suite->tests.items[suite->next++].string;
+ argv_array_pushl(&cp->args, "sh", test, NULL);
+ if (suite->quiet)
+ argv_array_push(&cp->args, "--quiet");
+ if (suite->immediate)
+ argv_array_push(&cp->args, "-i");
+ if (suite->verbose)
+ argv_array_push(&cp->args, "-v");
+ if (suite->verbose_log)
+ argv_array_push(&cp->args, "-V");
+ if (suite->trace)
+ argv_array_push(&cp->args, "-x");
+ if (suite->write_junit_xml)
+ argv_array_push(&cp->args, "--write-junit-xml");
+
+ strbuf_addf(err, "Output of '%s':\n", test);
+ *task_cb = (void *)test;
+
+ return 1;
+}
+
+static int test_finished(int result, struct strbuf *err, void *cb,
+ void *task_cb)
+{
+ struct testsuite *suite = cb;
+ const char *name = (const char *)task_cb;
+
+ if (result)
+ string_list_append(&suite->failed, name);
+
+ strbuf_addf(err, "%s: '%s'\n", result ? "FAIL" : "SUCCESS", name);
+
+ return 0;
+}
+
+static int test_failed(struct strbuf *out, void *cb, void *task_cb)
+{
+ struct testsuite *suite = cb;
+ const char *name = (const char *)task_cb;
+
+ string_list_append(&suite->failed, name);
+ strbuf_addf(out, "FAILED TO START: '%s'\n", name);
+
+ return 0;
+}
+
+static const char * const testsuite_usage[] = {
+ "test-run-command testsuite [<options>] [<pattern>...]",
+ NULL
+};
+
+static int testsuite(int argc, const char **argv)
+{
+ struct testsuite suite = TESTSUITE_INIT;
+ int max_jobs = 1, i, ret;
+ DIR *dir;
+ struct dirent *d;
+ struct option options[] = {
+ OPT_BOOL('i', "immediate", &suite.immediate,
+ "stop at first failed test case(s)"),
+ OPT_INTEGER('j', "jobs", &max_jobs, "run <N> jobs in parallel"),
+ OPT_BOOL('q', "quiet", &suite.quiet, "be terse"),
+ OPT_BOOL('v', "verbose", &suite.verbose, "be verbose"),
+ OPT_BOOL('V', "verbose-log", &suite.verbose_log,
+ "be verbose, redirected to a file"),
+ OPT_BOOL('x', "trace", &suite.trace, "trace shell commands"),
+ OPT_BOOL(0, "write-junit-xml", &suite.write_junit_xml,
+ "write JUnit-style XML files"),
+ OPT_END()
+ };
+
+ memset(&suite, 0, sizeof(suite));
+ suite.tests.strdup_strings = suite.failed.strdup_strings = 1;
+
+ argc = parse_options(argc, argv, NULL, options,
+ testsuite_usage, PARSE_OPT_STOP_AT_NON_OPTION);
+
+ if (max_jobs <= 0)
+ max_jobs = online_cpus();
+
+ dir = opendir(".");
+ if (!dir)
+ die("Could not open the current directory");
+ while ((d = readdir(dir))) {
+ const char *p = d->d_name;
+
+ if (*p != 't' || !isdigit(p[1]) || !isdigit(p[2]) ||
+ !isdigit(p[3]) || !isdigit(p[4]) || p[5] != '-' ||
+ !ends_with(p, ".sh"))
+ continue;
+
+ /* No pattern: match all */
+ if (!argc) {
+ string_list_append(&suite.tests, p);
+ continue;
+ }
+
+ for (i = 0; i < argc; i++)
+ if (!wildmatch(argv[i], p, 0)) {
+ string_list_append(&suite.tests, p);
+ break;
+ }
+ }
+ closedir(dir);
+
+ if (!suite.tests.nr)
+ die("No tests match!");
+ if (max_jobs > suite.tests.nr)
+ max_jobs = suite.tests.nr;
+
+ fprintf(stderr, "Running %d tests (%d at a time)\n",
+ suite.tests.nr, max_jobs);
+
+ ret = run_processes_parallel(max_jobs, next_test, test_failed,
+ test_finished, &suite);
+
+ if (suite.failed.nr > 0) {
+ ret = 1;
+ fprintf(stderr, "%d tests failed:\n\n", suite.failed.nr);
+ for (i = 0; i < suite.failed.nr; i++)
+ fprintf(stderr, "\t%s\n", suite.failed.items[i].string);
+ }
+
+ string_list_clear(&suite.tests, 0);
+ string_list_clear(&suite.failed, 0);
+
+ return !!ret;
+}
+
int cmd__run_command(int argc, const char **argv)
{
struct child_process proc = CHILD_PROCESS_INIT;
int jobs;
+ if (argc > 1 && !strcmp(argv[1], "testsuite"))
+ exit(testsuite(argc - 1, argv + 1));
+
if (argc < 3)
return 1;
while (!strcmp(argv[1], "env")) {
diff --git a/t/helper/test-tool.c b/t/helper/test-tool.c
index ce7e890..19ee26d 100644
--- a/t/helper/test-tool.c
+++ b/t/helper/test-tool.c
@@ -42,6 +42,7 @@ static struct test_cmd cmds[] = {
{ "path-utils", cmd__path_utils },
{ "pkt-line", cmd__pkt_line },
{ "prio-queue", cmd__prio_queue },
+ { "progress", cmd__progress },
{ "reach", cmd__reach },
{ "read-cache", cmd__read_cache },
{ "read-midx", cmd__read_midx },
diff --git a/t/helper/test-tool.h b/t/helper/test-tool.h
index f805bb3..c2aa56e 100644
--- a/t/helper/test-tool.h
+++ b/t/helper/test-tool.h
@@ -32,6 +32,7 @@ int cmd__parse_options(int argc, const char **argv);
int cmd__path_utils(int argc, const char **argv);
int cmd__pkt_line(int argc, const char **argv);
int cmd__prio_queue(int argc, const char **argv);
+int cmd__progress(int argc, const char **argv);
int cmd__reach(int argc, const char **argv);
int cmd__read_cache(int argc, const char **argv);
int cmd__read_midx(int argc, const char **argv);
diff --git a/t/t0000-basic.sh b/t/t0000-basic.sh
index 4c01f60..4d3f7ba 100755
--- a/t/t0000-basic.sh
+++ b/t/t0000-basic.sh
@@ -391,6 +391,44 @@ test_expect_success 'GIT_SKIP_TESTS sh pattern' "
)
"
+test_expect_success 'GIT_SKIP_TESTS entire suite' "
+ (
+ GIT_SKIP_TESTS='git' && export GIT_SKIP_TESTS &&
+ run_sub_test_lib_test git-skip-tests-entire-suite \
+ 'GIT_SKIP_TESTS entire suite' <<-\\EOF &&
+ for i in 1 2 3
+ do
+ test_expect_success \"passing test #\$i\" 'true'
+ done
+ test_done
+ EOF
+ check_sub_test_lib_test git-skip-tests-entire-suite <<-\\EOF
+ > 1..0 # SKIP skip all tests in git
+ EOF
+ )
+"
+
+test_expect_success 'GIT_SKIP_TESTS does not skip unmatched suite' "
+ (
+ GIT_SKIP_TESTS='notgit' && export GIT_SKIP_TESTS &&
+ run_sub_test_lib_test git-skip-tests-unmatched-suite \
+ 'GIT_SKIP_TESTS does not skip unmatched suite' <<-\\EOF &&
+ for i in 1 2 3
+ do
+ test_expect_success \"passing test #\$i\" 'true'
+ done
+ test_done
+ EOF
+ check_sub_test_lib_test git-skip-tests-unmatched-suite <<-\\EOF
+ > ok 1 - passing test #1
+ > ok 2 - passing test #2
+ > ok 3 - passing test #3
+ > # passed all 3 test(s)
+ > 1..3
+ EOF
+ )
+"
+
test_expect_success '--run basic' "
run_sub_test_lib_test run-basic \
'--run basic' --run='1 3 5' <<-\\EOF &&
diff --git a/t/t0014-alias.sh b/t/t0014-alias.sh
index a070e64..2694c81 100755
--- a/t/t0014-alias.sh
+++ b/t/t0014-alias.sh
@@ -37,4 +37,11 @@ test_expect_success 'looping aliases - internal execution' '
# test_i18ngrep "^fatal: alias loop detected: expansion of" output
#'
+test_expect_success 'run-command formats empty args properly' '
+ GIT_TRACE=1 git frotz a "" b " " c 2>&1 |
+ sed -ne "/run_command:/s/.*trace: run_command: //p" >actual &&
+ echo "git-frotz a '\'''\'' b '\'' '\'' c" >expect &&
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t0028-working-tree-encoding.sh b/t/t0028-working-tree-encoding.sh
index 1090e65..7aa0945 100755
--- a/t/t0028-working-tree-encoding.sh
+++ b/t/t0028-working-tree-encoding.sh
@@ -40,7 +40,7 @@ test_expect_success 'setup test files' '
printf "$text" | write_utf16 >test.utf16.raw &&
printf "$text" | write_utf32 >test.utf32.raw &&
printf "\377\376" >test.utf16lebom.raw &&
- printf "$text" | iconv -f UTF-8 -t UTF-32LE >>test.utf16lebom.raw &&
+ printf "$text" | iconv -f UTF-8 -t UTF-16LE >>test.utf16lebom.raw &&
# Line ending tests
printf "one\ntwo\nthree\n" >lf.utf8.raw &&
@@ -280,4 +280,43 @@ test_expect_success ICONV_SHIFT_JIS 'check roundtrip encoding' '
git reset
'
+# $1: checkout encoding
+# $2: test string
+# $3: binary test string in checkout encoding
+test_commit_utf8_checkout_other () {
+ encoding="$1"
+ orig_string="$2"
+ expect_bytes="$3"
+
+ test_expect_success "Commit UTF-8, checkout $encoding" '
+ test_when_finished "git checkout HEAD -- .gitattributes" &&
+
+ test_ext="commit_utf8_checkout_$encoding" &&
+ test_file="test.$test_ext" &&
+
+ # Commit as UTF-8
+ echo "*.$test_ext text working-tree-encoding=UTF-8" >.gitattributes &&
+ printf "$orig_string" >$test_file &&
+ git add $test_file &&
+ git commit -m "Test data" &&
+
+ # Checkout in tested encoding
+ rm $test_file &&
+ echo "*.$test_ext text working-tree-encoding=$encoding" >.gitattributes &&
+ git checkout HEAD -- $test_file &&
+
+ # Test
+ printf $expect_bytes >$test_file.raw &&
+ test_cmp_bin $test_file.raw $test_file
+ '
+}
+
+test_commit_utf8_checkout_other "UTF-8" "Test Тест" "\124\145\163\164\040\320\242\320\265\321\201\321\202"
+test_commit_utf8_checkout_other "UTF-16LE" "Test Тест" "\124\000\145\000\163\000\164\000\040\000\042\004\065\004\101\004\102\004"
+test_commit_utf8_checkout_other "UTF-16BE" "Test Тест" "\000\124\000\145\000\163\000\164\000\040\004\042\004\065\004\101\004\102"
+test_commit_utf8_checkout_other "UTF-16LE-BOM" "Test Тест" "\377\376\124\000\145\000\163\000\164\000\040\000\042\004\065\004\101\004\102\004"
+test_commit_utf8_checkout_other "UTF-16BE-BOM" "Test Тест" "\376\377\000\124\000\145\000\163\000\164\000\040\004\042\004\065\004\101\004\102"
+test_commit_utf8_checkout_other "UTF-32LE" "Test Тест" "\124\000\000\000\145\000\000\000\163\000\000\000\164\000\000\000\040\000\000\000\042\004\000\000\065\004\000\000\101\004\000\000\102\004\000\000"
+test_commit_utf8_checkout_other "UTF-32BE" "Test Тест" "\000\000\000\124\000\000\000\145\000\000\000\163\000\000\000\164\000\000\000\040\000\000\004\042\000\000\004\065\000\000\004\101\000\000\004\102"
+
test_done
diff --git a/t/t0050-filesystem.sh b/t/t0050-filesystem.sh
index 192c94e..608673f 100755
--- a/t/t0050-filesystem.sh
+++ b/t/t0050-filesystem.sh
@@ -131,4 +131,24 @@ $test_unicode 'merge (silent unicode normalization)' '
git merge topic
'
+test_expect_success CASE_INSENSITIVE_FS 'checkout with no pathspec and a case insensitive fs' '
+ git init repo &&
+ (
+ cd repo &&
+
+ >Gitweb &&
+ git add Gitweb &&
+ git commit -m "add Gitweb" &&
+
+ git checkout --orphan todo &&
+ git reset --hard &&
+ mkdir -p gitweb/subdir &&
+ >gitweb/subdir/file &&
+ git add gitweb &&
+ git commit -m "add gitweb/subdir/file" &&
+
+ git checkout master
+ )
+'
+
test_done
diff --git a/t/t0061-run-command.sh b/t/t0061-run-command.sh
index 015fac8..17c9c0f 100755
--- a/t/t0061-run-command.sh
+++ b/t/t0061-run-command.sh
@@ -210,10 +210,23 @@ test_expect_success MINGW 'verify curlies are quoted properly' '
test_cmp expect actual
'
-test_expect_success MINGW 'can spawn with argv[0] containing spaces' '
- cp "$GIT_BUILD_DIR/t/helper/test-fake-ssh$X" ./ &&
- test_must_fail "$PWD/test-fake-ssh$X" 2>err &&
- grep TRASH_DIRECTORY err
+test_expect_success MINGW 'can spawn .bat with argv[0] containing spaces' '
+ bat="$TRASH_DIRECTORY/bat with spaces in name.bat" &&
+
+ # Every .bat invocation will log its arguments to file "out"
+ rm -f out &&
+ echo "echo %* >>out" >"$bat" &&
+
+ # Ask git to invoke .bat; clone will fail due to fake SSH helper
+ test_must_fail env GIT_SSH="$bat" git clone myhost:src ssh-clone &&
+
+ # Spawning .bat can fail if there are two quoted cmd.exe arguments.
+ # .bat itself is first (due to spaces in name), so just one more is
+ # needed to verify. GIT_SSH will invoke .bat multiple times:
+ # 1) -G myhost
+ # 2) myhost "git-upload-pack src"
+ # First invocation will always succeed. Test the second one.
+ grep "git-upload-pack" out
'
test_done
diff --git a/t/t0212-trace2-event.sh b/t/t0212-trace2-event.sh
index ff5b9cc..7065a1b 100755
--- a/t/t0212-trace2-event.sh
+++ b/t/t0212-trace2-event.sh
@@ -265,4 +265,23 @@ test_expect_success JSON_PP 'using global config, event stream, error event' '
test_cmp expect actual
'
+test_expect_success 'discard traces when there are too many files' '
+ mkdir trace_target_dir &&
+ test_when_finished "rm -r trace_target_dir" &&
+ (
+ GIT_TRACE2_MAX_FILES=5 &&
+ export GIT_TRACE2_MAX_FILES &&
+ cd trace_target_dir &&
+ test_seq $GIT_TRACE2_MAX_FILES >../expected_filenames.txt &&
+ xargs touch <../expected_filenames.txt &&
+ cd .. &&
+ GIT_TRACE2_EVENT="$(pwd)/trace_target_dir" test-tool trace2 001return 0
+ ) &&
+ echo git-trace2-discard >>expected_filenames.txt &&
+ ls trace_target_dir >ls_output.txt &&
+ test_cmp expected_filenames.txt ls_output.txt &&
+ head -n1 trace_target_dir/git-trace2-discard | grep \"event\":\"version\" &&
+ head -n2 trace_target_dir/git-trace2-discard | tail -n1 | grep \"event\":\"too_many_files\"
+'
+
test_done
diff --git a/t/t0410-partial-clone.sh b/t/t0410-partial-clone.sh
index d4b7e53..a3988bd 100755
--- a/t/t0410-partial-clone.sh
+++ b/t/t0410-partial-clone.sh
@@ -429,6 +429,19 @@ test_expect_success 'rev-list dies for missing objects on cmd line' '
done
'
+test_expect_success 'single promisor remote can be re-initialized gracefully' '
+ # ensure one promisor is in the promisors list
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_create_repo other &&
+ git -C repo remote add foo "file://$(pwd)/other" &&
+ git -C repo config remote.foo.promisor true &&
+ git -C repo config extensions.partialclone foo &&
+
+ # reinitialize the promisors list
+ git -C repo fetch --filter=blob:none foo
+'
+
test_expect_success 'gc repacks promisor objects separately from non-promisor objects' '
rm -rf repo &&
test_create_repo repo &&
@@ -540,6 +553,20 @@ test_expect_success 'gc stops traversal when a missing but promised object is re
! grep "$TREE_HASH" out
'
+test_expect_success 'do not fetch when checking existence of tree we construct ourselves' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo base &&
+ test_commit -C repo side1 &&
+ git -C repo checkout base &&
+ test_commit -C repo side2 &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+
+ git -C repo cherry-pick side1
+'
+
. "$TEST_DIRECTORY"/lib-httpd.sh
start_httpd
diff --git a/t/t0500-progress-display.sh b/t/t0500-progress-display.sh
new file mode 100755
index 0000000..24ccbd8
--- /dev/null
+++ b/t/t0500-progress-display.sh
@@ -0,0 +1,286 @@
+#!/bin/sh
+
+test_description='progress display'
+
+. ./test-lib.sh
+
+show_cr () {
+ tr '\015' Q | sed -e "s/Q/<CR>\\$LF/g"
+}
+
+test_expect_success 'simple progress display' '
+ cat >expect <<-\EOF &&
+ Working hard: 1<CR>
+ Working hard: 2<CR>
+ Working hard: 5<CR>
+ Working hard: 5, done.
+ EOF
+
+ cat >in <<-\EOF &&
+ update
+ progress 1
+ update
+ progress 2
+ progress 3
+ progress 4
+ update
+ progress 5
+ EOF
+ test-tool progress "Working hard" <in 2>stderr &&
+
+ show_cr <stderr >out &&
+ test_i18ncmp expect out
+'
+
+test_expect_success 'progress display with total' '
+ cat >expect <<-\EOF &&
+ Working hard: 33% (1/3)<CR>
+ Working hard: 66% (2/3)<CR>
+ Working hard: 100% (3/3)<CR>
+ Working hard: 100% (3/3), done.
+ EOF
+
+ cat >in <<-\EOF &&
+ progress 1
+ progress 2
+ progress 3
+ EOF
+ test-tool progress --total=3 "Working hard" <in 2>stderr &&
+
+ show_cr <stderr >out &&
+ test_i18ncmp expect out
+'
+
+test_expect_success 'progress display breaks long lines #1' '
+ sed -e "s/Z$//" >expect <<\EOF &&
+Working hard.......2.........3.........4.........5.........6: 0% (100/100000)<CR>
+Working hard.......2.........3.........4.........5.........6: 1% (1000/100000)<CR>
+Working hard.......2.........3.........4.........5.........6: Z
+ 10% (10000/100000)<CR>
+ 100% (100000/100000)<CR>
+ 100% (100000/100000), done.
+EOF
+
+ cat >in <<-\EOF &&
+ progress 100
+ progress 1000
+ progress 10000
+ progress 100000
+ EOF
+ test-tool progress --total=100000 \
+ "Working hard.......2.........3.........4.........5.........6" \
+ <in 2>stderr &&
+
+ show_cr <stderr >out &&
+ test_i18ncmp expect out
+'
+
+test_expect_success 'progress display breaks long lines #2' '
+ # Note: we dont need that many spaces after the title to cover up
+ # the last line before breaking the progress line.
+ sed -e "s/Z$//" >expect <<\EOF &&
+Working hard.......2.........3.........4.........5.........6: 0% (1/100000)<CR>
+Working hard.......2.........3.........4.........5.........6: 0% (2/100000)<CR>
+Working hard.......2.........3.........4.........5.........6: Z
+ 10% (10000/100000)<CR>
+ 100% (100000/100000)<CR>
+ 100% (100000/100000), done.
+EOF
+
+ cat >in <<-\EOF &&
+ update
+ progress 1
+ update
+ progress 2
+ progress 10000
+ progress 100000
+ EOF
+ test-tool progress --total=100000 \
+ "Working hard.......2.........3.........4.........5.........6" \
+ <in 2>stderr &&
+
+ show_cr <stderr >out &&
+ test_i18ncmp expect out
+'
+
+test_expect_success 'progress display breaks long lines #3 - even the first is too long' '
+ # Note: we dont actually need any spaces at the end of the title
+ # line, because there is no previous progress line to cover up.
+ sed -e "s/Z$//" >expect <<\EOF &&
+Working hard.......2.........3.........4.........5.........6: Z
+ 25% (25000/100000)<CR>
+ 50% (50000/100000)<CR>
+ 75% (75000/100000)<CR>
+ 100% (100000/100000)<CR>
+ 100% (100000/100000), done.
+EOF
+
+ cat >in <<-\EOF &&
+ progress 25000
+ progress 50000
+ progress 75000
+ progress 100000
+ EOF
+ test-tool progress --total=100000 \
+ "Working hard.......2.........3.........4.........5.........6" \
+ <in 2>stderr &&
+
+ show_cr <stderr >out &&
+ test_i18ncmp expect out
+'
+
+test_expect_success 'progress display breaks long lines #4 - title line matches terminal width' '
+ cat >expect <<\EOF &&
+Working hard.......2.........3.........4.........5.........6.........7.........:
+ 25% (25000/100000)<CR>
+ 50% (50000/100000)<CR>
+ 75% (75000/100000)<CR>
+ 100% (100000/100000)<CR>
+ 100% (100000/100000), done.
+EOF
+
+ cat >in <<-\EOF &&
+ progress 25000
+ progress 50000
+ progress 75000
+ progress 100000
+ EOF
+ test-tool progress --total=100000 \
+ "Working hard.......2.........3.........4.........5.........6.........7........." \
+ <in 2>stderr &&
+
+ show_cr <stderr >out &&
+ test_i18ncmp expect out
+'
+
+# Progress counter goes backwards, this should not happen in practice.
+test_expect_success 'progress shortens - crazy caller' '
+ cat >expect <<-\EOF &&
+ Working hard: 10% (100/1000)<CR>
+ Working hard: 20% (200/1000)<CR>
+ Working hard: 0% (1/1000) <CR>
+ Working hard: 100% (1000/1000)<CR>
+ Working hard: 100% (1000/1000), done.
+ EOF
+
+ cat >in <<-\EOF &&
+ progress 100
+ progress 200
+ progress 1
+ progress 1000
+ EOF
+ test-tool progress --total=1000 "Working hard" <in 2>stderr &&
+
+ show_cr <stderr >out &&
+ test_i18ncmp expect out
+'
+
+test_expect_success 'progress display with throughput' '
+ cat >expect <<-\EOF &&
+ Working hard: 10<CR>
+ Working hard: 20, 200.00 KiB | 100.00 KiB/s<CR>
+ Working hard: 30, 300.00 KiB | 100.00 KiB/s<CR>
+ Working hard: 40, 400.00 KiB | 100.00 KiB/s<CR>
+ Working hard: 40, 400.00 KiB | 100.00 KiB/s, done.
+ EOF
+
+ cat >in <<-\EOF &&
+ throughput 102400 1000
+ update
+ progress 10
+ throughput 204800 2000
+ update
+ progress 20
+ throughput 307200 3000
+ update
+ progress 30
+ throughput 409600 4000
+ update
+ progress 40
+ EOF
+ test-tool progress "Working hard" <in 2>stderr &&
+
+ show_cr <stderr >out &&
+ test_i18ncmp expect out
+'
+
+test_expect_success 'progress display with throughput and total' '
+ cat >expect <<-\EOF &&
+ Working hard: 25% (10/40)<CR>
+ Working hard: 50% (20/40), 200.00 KiB | 100.00 KiB/s<CR>
+ Working hard: 75% (30/40), 300.00 KiB | 100.00 KiB/s<CR>
+ Working hard: 100% (40/40), 400.00 KiB | 100.00 KiB/s<CR>
+ Working hard: 100% (40/40), 400.00 KiB | 100.00 KiB/s, done.
+ EOF
+
+ cat >in <<-\EOF &&
+ throughput 102400 1000
+ progress 10
+ throughput 204800 2000
+ progress 20
+ throughput 307200 3000
+ progress 30
+ throughput 409600 4000
+ progress 40
+ EOF
+ test-tool progress --total=40 "Working hard" <in 2>stderr &&
+
+ show_cr <stderr >out &&
+ test_i18ncmp expect out
+'
+
+test_expect_success 'cover up after throughput shortens' '
+ cat >expect <<-\EOF &&
+ Working hard: 1<CR>
+ Working hard: 2, 800.00 KiB | 400.00 KiB/s<CR>
+ Working hard: 3, 1.17 MiB | 400.00 KiB/s <CR>
+ Working hard: 4, 1.56 MiB | 400.00 KiB/s<CR>
+ Working hard: 4, 1.56 MiB | 400.00 KiB/s, done.
+ EOF
+
+ cat >in <<-\EOF &&
+ throughput 409600 1000
+ update
+ progress 1
+ throughput 819200 2000
+ update
+ progress 2
+ throughput 1228800 3000
+ update
+ progress 3
+ throughput 1638400 4000
+ update
+ progress 4
+ EOF
+ test-tool progress "Working hard" <in 2>stderr &&
+
+ show_cr <stderr >out &&
+ test_i18ncmp expect out
+'
+
+test_expect_success 'cover up after throughput shortens a lot' '
+ cat >expect <<-\EOF &&
+ Working hard: 1<CR>
+ Working hard: 2, 1000.00 KiB | 1000.00 KiB/s<CR>
+ Working hard: 3, 3.00 MiB | 1.50 MiB/s <CR>
+ Working hard: 3, 3.00 MiB | 1024.00 KiB/s, done.
+ EOF
+
+ cat >in <<-\EOF &&
+ throughput 1 1000
+ update
+ progress 1
+ throughput 1024000 2000
+ update
+ progress 2
+ throughput 3145728 3000
+ update
+ progress 3
+ EOF
+ test-tool progress "Working hard" <in 2>stderr &&
+
+ show_cr <stderr >out &&
+ test_i18ncmp expect out
+'
+
+test_done
diff --git a/t/t1450-fsck.sh b/t/t1450-fsck.sh
index b36e052..50d28e6 100755
--- a/t/t1450-fsck.sh
+++ b/t/t1450-fsck.sh
@@ -70,7 +70,6 @@ test_expect_success 'object with bad sha1' '
test_when_finished "git update-ref -d refs/heads/bogus" &&
test_must_fail git fsck 2>out &&
- cat out &&
test_i18ngrep "$sha.*corrupt" out
'
@@ -78,7 +77,6 @@ test_expect_success 'branch pointing to non-commit' '
git rev-parse HEAD^{tree} >.git/refs/heads/invalid &&
test_when_finished "git update-ref -d refs/heads/invalid" &&
test_must_fail git fsck 2>out &&
- cat out &&
test_i18ngrep "not a commit" out
'
@@ -88,7 +86,6 @@ test_expect_success 'HEAD link pointing at a funny object' '
echo $ZERO_OID >.git/HEAD &&
# avoid corrupt/broken HEAD from interfering with repo discovery
test_must_fail env GIT_DIR=.git git fsck 2>out &&
- cat out &&
test_i18ngrep "detached HEAD points" out
'
@@ -98,7 +95,6 @@ test_expect_success 'HEAD link pointing at a funny place' '
echo "ref: refs/funny/place" >.git/HEAD &&
# avoid corrupt/broken HEAD from interfering with repo discovery
test_must_fail env GIT_DIR=.git git fsck 2>out &&
- cat out &&
test_i18ngrep "HEAD points to something strange" out
'
@@ -157,7 +153,6 @@ test_expect_success 'email with embedded > is not okay' '
git update-ref refs/heads/bogus "$new" &&
test_when_finished "git update-ref -d refs/heads/bogus" &&
test_must_fail git fsck 2>out &&
- cat out &&
test_i18ngrep "error in commit $new" out
'
@@ -169,7 +164,6 @@ test_expect_success 'missing < email delimiter is reported nicely' '
git update-ref refs/heads/bogus "$new" &&
test_when_finished "git update-ref -d refs/heads/bogus" &&
test_must_fail git fsck 2>out &&
- cat out &&
test_i18ngrep "error in commit $new.* - bad name" out
'
@@ -181,7 +175,6 @@ test_expect_success 'missing email is reported nicely' '
git update-ref refs/heads/bogus "$new" &&
test_when_finished "git update-ref -d refs/heads/bogus" &&
test_must_fail git fsck 2>out &&
- cat out &&
test_i18ngrep "error in commit $new.* - missing email" out
'
@@ -193,7 +186,6 @@ test_expect_success '> in name is reported' '
git update-ref refs/heads/bogus "$new" &&
test_when_finished "git update-ref -d refs/heads/bogus" &&
test_must_fail git fsck 2>out &&
- cat out &&
test_i18ngrep "error in commit $new" out
'
@@ -207,7 +199,6 @@ test_expect_success 'integer overflow in timestamps is reported' '
git update-ref refs/heads/bogus "$new" &&
test_when_finished "git update-ref -d refs/heads/bogus" &&
test_must_fail git fsck 2>out &&
- cat out &&
test_i18ngrep "error in commit $new.*integer overflow" out
'
@@ -219,7 +210,6 @@ test_expect_success 'commit with NUL in header' '
git update-ref refs/heads/bogus "$new" &&
test_when_finished "git update-ref -d refs/heads/bogus" &&
test_must_fail git fsck 2>out &&
- cat out &&
test_i18ngrep "error in commit $new.*unterminated header: NUL at offset" out
'
@@ -297,7 +287,6 @@ test_expect_success 'tag pointing to nonexistent' '
echo $tag >.git/refs/tags/invalid &&
test_when_finished "git update-ref -d refs/tags/invalid" &&
test_must_fail git fsck --tags >out &&
- cat out &&
test_i18ngrep "broken link" out
'
@@ -378,7 +367,6 @@ test_expect_success 'tag with NUL in header' '
echo $tag >.git/refs/tags/wrong &&
test_when_finished "git update-ref -d refs/tags/wrong" &&
test_must_fail git fsck --tags 2>out &&
- cat out &&
test_i18ngrep "error in tag $tag.*unterminated header: NUL at offset" out
'
@@ -409,7 +397,6 @@ test_expect_success 'rev-list --verify-objects with bad sha1' '
test_when_finished "git update-ref -d refs/heads/bogus" &&
test_might_fail git rev-list --verify-objects refs/heads/bogus >/dev/null 2>out &&
- cat out &&
test_i18ngrep -q "error: hash mismatch $(dirname $new)$(test_oid ff_2)" out
'
@@ -433,7 +420,6 @@ test_expect_success 'fsck notices blob entry pointing to null sha1' '
sha=$(printf "100644 file$_bz$_bzoid" |
git hash-object -w --stdin -t tree) &&
git fsck 2>out &&
- cat out &&
test_i18ngrep "warning.*null sha1" out
)
'
@@ -444,7 +430,6 @@ test_expect_success 'fsck notices submodule entry pointing to null sha1' '
sha=$(printf "160000 submodule$_bz$_bzoid" |
git hash-object -w --stdin -t tree) &&
git fsck 2>out &&
- cat out &&
test_i18ngrep "warning.*null sha1" out
)
'
@@ -465,7 +450,6 @@ while read name path pretty; do
printf "$mode $type %s\t%s" "$value" "$path" >bad &&
bad_tree=$(git mktree <bad) &&
git fsck 2>out &&
- cat out &&
test_i18ngrep "warning.*tree $bad_tree" out
)'
done <<-\EOF
diff --git a/t/t1506-rev-parse-diagnosis.sh b/t/t1506-rev-parse-diagnosis.sh
index 21a9c8f..624d0a5 100755
--- a/t/t1506-rev-parse-diagnosis.sh
+++ b/t/t1506-rev-parse-diagnosis.sh
@@ -214,4 +214,12 @@ test_expect_success 'arg before dashdash must be a revision (ambiguous)' '
test_cmp expect actual
'
+test_expect_success 'reject Nth parent if N is too high' '
+ test_must_fail git rev-parse HEAD^100000000000000000000000000000000
+'
+
+test_expect_success 'reject Nth ancestor if N is too high' '
+ test_must_fail git rev-parse HEAD~100000000000000000000000000000000
+'
+
test_done
diff --git a/t/t3030-merge-recursive.sh b/t/t3030-merge-recursive.sh
index ff641b3..2170758 100755
--- a/t/t3030-merge-recursive.sh
+++ b/t/t3030-merge-recursive.sh
@@ -452,6 +452,34 @@ test_expect_success 'merge-recursive d/f conflict result' '
'
+test_expect_success SYMLINKS 'dir in working tree with symlink ancestor does not produce d/f conflict' '
+ git init sym &&
+ (
+ cd sym &&
+ ln -s . foo &&
+ mkdir bar &&
+ >bar/file &&
+ git add foo bar/file &&
+ git commit -m "foo symlink" &&
+
+ git checkout -b branch1 &&
+ git commit --allow-empty -m "empty commit" &&
+
+ git checkout master &&
+ git rm foo &&
+ mkdir foo &&
+ >foo/bar &&
+ git add foo/bar &&
+ git commit -m "replace foo symlink with real foo dir and foo/bar file" &&
+
+ git checkout branch1 &&
+
+ git cherry-pick master &&
+ test_path_is_dir foo &&
+ test_path_is_file foo/bar
+ )
+'
+
test_expect_success 'reset and 3-way merge' '
git reset --hard "$c2" &&
@@ -667,15 +695,22 @@ test_expect_success 'merging with triple rename across D/F conflict' '
test_expect_success 'merge-recursive remembers the names of all base trees' '
git reset --hard HEAD &&
+ # make the index match $c1 so that merge-recursive below does not
+ # fail early
+ git diff --binary HEAD $c1 -- | git apply --cached &&
+
# more trees than static slots used by oid_to_hex()
for commit in $c0 $c2 $c4 $c5 $c6 $c7
do
git rev-parse "$commit^{tree}"
done >trees &&
- # ignore the return code -- it only fails because the input is weird
+ # ignore the return code; it only fails because the input is weird...
test_must_fail git -c merge.verbosity=5 merge-recursive $(cat trees) -- $c1 $c3 >out &&
+ # ...but make sure it fails in the expected way
+ test_i18ngrep CONFLICT.*rename/rename out &&
+
# merge-recursive prints in reverse order, but we do not care
sort <trees >expect &&
sed -n "s/^virtual //p" out | sort >actual &&
diff --git a/t/t3206-range-diff.sh b/t/t3206-range-diff.sh
index 0120f76..0579cd9 100755
--- a/t/t3206-range-diff.sh
+++ b/t/t3206-range-diff.sh
@@ -333,6 +333,46 @@ test_expect_success 'renamed file' '
test_cmp expected actual
'
+test_expect_success 'file with mode only change' '
+ git range-diff --no-color --submodule=log topic...mode-only-change >actual &&
+ sed s/Z/\ /g >expected <<-EOF &&
+ 1: fccce22 ! 1: 4d39cb3 s/4/A/
+ @@ Metadata
+ ZAuthor: Thomas Rast <trast@inf.ethz.ch>
+ Z
+ Z ## Commit message ##
+ - s/4/A/
+ + s/4/A/ + add other-file
+ Z
+ Z ## file ##
+ Z@@
+ @@ file
+ Z A
+ Z 6
+ Z 7
+ +
+ + ## other-file (new) ##
+ 2: 147e64e ! 2: 26c107f s/11/B/
+ @@ Metadata
+ ZAuthor: Thomas Rast <trast@inf.ethz.ch>
+ Z
+ Z ## Commit message ##
+ - s/11/B/
+ + s/11/B/ + mode change other-file
+ Z
+ Z ## file ##
+ Z@@ file: A
+ @@ file: A
+ Z 12
+ Z 13
+ Z 14
+ +
+ + ## other-file (mode change 100644 => 100755) ##
+ 3: a63e992 = 3: 4c1e0f5 s/12/B/
+ EOF
+ test_cmp expected actual
+'
+
test_expect_success 'file added and later removed' '
git range-diff --no-color --submodule=log topic...added-removed >actual &&
sed s/Z/\ /g >expected <<-EOF &&
@@ -461,4 +501,8 @@ test_expect_success 'format-patch --range-diff as commentary' '
grep "> 1: .* new message" 0001-*
'
+test_expect_success 'range-diff overrides diff.noprefix internally' '
+ git -c diff.noprefix=true range-diff HEAD^...
+'
+
test_done
diff --git a/t/t3206/history.export b/t/t3206/history.export
index 7bb3814..4c808e5 100644
--- a/t/t3206/history.export
+++ b/t/t3206/history.export
@@ -55,7 +55,7 @@ A
19
20
-commit refs/heads/topic
+commit refs/heads/mode-only-change
mark :4
author Thomas Rast <trast@inf.ethz.ch> 1374485014 +0200
committer Thomas Rast <trast@inf.ethz.ch> 1374485014 +0200
@@ -678,3 +678,32 @@ s/12/B/
from :55
M 100644 :9 renamed-file
+commit refs/heads/mode-only-change
+mark :57
+author Thomas Rast <trast@inf.ethz.ch> 1374485024 +0200
+committer Thomas Gummerer <t.gummerer@gmail.com> 1570473767 +0100
+data 24
+s/4/A/ + add other-file
+from :4
+M 100644 :5 file
+M 100644 :49 other-file
+
+commit refs/heads/mode-only-change
+mark :58
+author Thomas Rast <trast@inf.ethz.ch> 1374485036 +0200
+committer Thomas Gummerer <t.gummerer@gmail.com> 1570473768 +0100
+data 33
+s/11/B/ + mode change other-file
+from :57
+M 100644 :7 file
+M 100755 :49 other-file
+
+commit refs/heads/mode-only-change
+mark :59
+author Thomas Rast <trast@inf.ethz.ch> 1374485044 +0200
+committer Thomas Gummerer <t.gummerer@gmail.com> 1570473768 +0100
+data 8
+s/12/B/
+from :58
+M 100644 :9 file
+
diff --git a/t/t3404-rebase-interactive.sh b/t/t3404-rebase-interactive.sh
index 29a3584..d2dfbe4 100755
--- a/t/t3404-rebase-interactive.sh
+++ b/t/t3404-rebase-interactive.sh
@@ -1014,9 +1014,9 @@ test_expect_success 'rebase -i --root fixup root commit' '
test 0 = $(git cat-file commit HEAD | grep -c ^parent\ )
'
-test_expect_success 'rebase -i --root reword root commit' '
+test_expect_success 'rebase -i --root reword original root commit' '
test_when_finished "test_might_fail git rebase --abort" &&
- git checkout -b reword-root-branch master &&
+ git checkout -b reword-original-root-branch master &&
set_fake_editor &&
FAKE_LINES="reword 1 2" FAKE_COMMIT_MESSAGE="A changed" \
git rebase -i --root &&
@@ -1024,6 +1024,16 @@ test_expect_success 'rebase -i --root reword root commit' '
test -z "$(git show -s --format=%p HEAD^)"
'
+test_expect_success 'rebase -i --root reword new root commit' '
+ test_when_finished "test_might_fail git rebase --abort" &&
+ git checkout -b reword-now-root-branch master &&
+ set_fake_editor &&
+ FAKE_LINES="reword 3 1" FAKE_COMMIT_MESSAGE="C changed" \
+ git rebase -i --root &&
+ git show HEAD^ | grep "C changed" &&
+ test -z "$(git show -s --format=%p HEAD^)"
+'
+
test_expect_success 'rebase -i --root when root has untracked file conflict' '
test_when_finished "reset_rebase" &&
git checkout -b failing-root-pick A &&
@@ -1052,7 +1062,7 @@ test_expect_success 'rebase -i --root reword root when root has untracked file c
'
test_expect_success C_LOCALE_OUTPUT 'rebase --edit-todo does not work on non-interactive rebase' '
- git checkout reword-root-branch &&
+ git checkout reword-original-root-branch &&
git reset --hard &&
git checkout conflict-branch &&
set_fake_editor &&
diff --git a/t/t3429-rebase-edit-todo.sh b/t/t3429-rebase-edit-todo.sh
index 76f6d30..8739cb6 100755
--- a/t/t3429-rebase-edit-todo.sh
+++ b/t/t3429-rebase-edit-todo.sh
@@ -3,9 +3,15 @@
test_description='rebase should reread the todo file if an exec modifies it'
. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-rebase.sh
+
+test_expect_success 'setup' '
+ test_commit first file &&
+ test_commit second file &&
+ test_commit third file
+'
test_expect_success 'rebase exec modifies rebase-todo' '
- test_commit initial &&
todo=.git/rebase-merge/git-rebase-todo &&
git rebase HEAD -x "echo exec touch F >>$todo" &&
test -e F
@@ -33,4 +39,17 @@ test_expect_success SHA1 'loose object cache vs re-reading todo list' '
git rebase HEAD -x "./append-todo.sh 5 6"
'
+test_expect_success 'todo is re-read after reword and squash' '
+ write_script reword-editor.sh <<-\EOS &&
+ GIT_SEQUENCE_EDITOR="echo \"exec echo $(cat file) >>actual\" >>" \
+ git rebase --edit-todo
+ EOS
+
+ test_write_lines first third >expected &&
+ set_fake_editor &&
+ GIT_SEQUENCE_EDITOR="$EDITOR" FAKE_LINES="reword 1 squash 2 fixup 3" \
+ GIT_EDITOR=./reword-editor.sh git rebase -i --root third &&
+ test_cmp expected actual
+'
+
test_done
diff --git a/t/t3701-add-interactive.sh b/t/t3701-add-interactive.sh
index 69991a3..d50e165 100755
--- a/t/t3701-add-interactive.sh
+++ b/t/t3701-add-interactive.sh
@@ -314,7 +314,7 @@ test_expect_success C_LOCALE_OUTPUT 'add first line works' '
git commit -am "clear local changes" &&
git apply patch &&
printf "%s\n" s y y | git add -p file 2>error |
- sed -n -e "s/^Stage this hunk[^@]*\(@@ .*\)/\1/" \
+ sed -n -e "s/^([1-2]\/[1-2]) Stage this hunk[^@]*\(@@ .*\)/\1/" \
-e "/^[-+@ \\\\]"/p >output &&
test_must_be_empty error &&
git diff --cached >diff &&
diff --git a/t/t3903-stash.sh b/t/t3903-stash.sh
index 820b350..580bfbd 100755
--- a/t/t3903-stash.sh
+++ b/t/t3903-stash.sh
@@ -1253,4 +1253,20 @@ test_expect_success 'stash --keep-index with file deleted in index does not resu
test_path_is_missing to-remove
'
+test_expect_success 'stash apply should succeed with unmodified file' '
+ echo base >file &&
+ git add file &&
+ git commit -m base &&
+
+ # now stash a modification
+ echo modified >file &&
+ git stash &&
+
+ # make the file stat dirty
+ cp file other &&
+ mv other file &&
+
+ git stash apply
+'
+
test_done
diff --git a/t/t3908-stash-in-worktree.sh b/t/t3908-stash-in-worktree.sh
new file mode 100755
index 0000000..2b2b366
--- /dev/null
+++ b/t/t3908-stash-in-worktree.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+#
+# Copyright (c) 2019 Johannes E Schindelin
+#
+
+test_description='Test git stash in a worktree'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ test_commit initial &&
+ git worktree add wt &&
+ test_commit -C wt in-worktree
+'
+
+test_expect_success 'apply in subdirectory' '
+ mkdir wt/subdir &&
+ (
+ cd wt/subdir &&
+ echo modified >../initial.t &&
+ git stash &&
+ git stash apply >out
+ ) &&
+ grep "\.\.\/initial\.t" wt/subdir/out
+'
+
+test_done
diff --git a/t/t4014-format-patch.sh b/t/t4014-format-patch.sh
index 83f5261..72b0989 100755
--- a/t/t4014-format-patch.sh
+++ b/t/t4014-format-patch.sh
@@ -1610,8 +1610,9 @@ test_expect_success 'format-patch format.outputDirectory option' '
test_config format.outputDirectory patches &&
rm -fr patches &&
git format-patch master..side &&
- git rev-list master..side >list &&
- test_line_count = $(ls patches | wc -l) list
+ count=$(git rev-list --count master..side) &&
+ ls patches >list &&
+ test_line_count = $count list
'
test_expect_success 'format-patch -o overrides format.outputDirectory' '
diff --git a/t/t4038-diff-combined.sh b/t/t4038-diff-combined.sh
index d4afe12..b9d876e 100755
--- a/t/t4038-diff-combined.sh
+++ b/t/t4038-diff-combined.sh
@@ -509,7 +509,7 @@ test_expect_success FUNNYNAMES '--combined-all-paths and --raw and funny names'
test_expect_success FUNNYNAMES '--combined-all-paths and --raw -and -z and funny names' '
printf "aaf8087c3cbd4db8e185a2d074cf27c53cfb75d7\0::100644 100644 100644 f00c965d8307308469e537302baa73048488f162 088bd5d92c2a8e0203ca8e7e4c2a5c692f6ae3f7 333b9c62519f285e1854830ade0fe1ef1d40ee1b RR\0file\twith\ttabs\0i\tam\ttabbed\0fickle\tnaming\0" >expect &&
git diff-tree -c -M --raw --combined-all-paths -z HEAD >actual &&
- test_cmp -a expect actual
+ test_cmp expect actual
'
test_expect_success FUNNYNAMES '--combined-all-paths and --cc and funny names' '
diff --git a/t/t4202-log.sh b/t/t4202-log.sh
index e88ccb0..e803ba4 100755
--- a/t/t4202-log.sh
+++ b/t/t4202-log.sh
@@ -837,6 +837,21 @@ test_expect_success 'decorate-refs and decorate-refs-exclude' '
test_cmp expect.decorate actual
'
+test_expect_success 'decorate-refs-exclude and simplify-by-decoration' '
+ cat >expect.decorate <<-\EOF &&
+ Merge-tag-reach (HEAD -> master)
+ reach (tag: reach, reach)
+ seventh (tag: seventh)
+ Merge-branch-tangle
+ Merge-branch-side-early-part-into-tangle (tangle)
+ tangle-a (tag: tangle-a)
+ EOF
+ git log -n6 --decorate=short --pretty="tformat:%f%d" \
+ --decorate-refs-exclude="*octopus*" \
+ --simplify-by-decoration >actual &&
+ test_cmp expect.decorate actual
+'
+
test_expect_success 'log.decorate config parsing' '
git log --oneline --decorate=full >expect.full &&
git log --oneline --decorate=short >expect.short &&
diff --git a/t/t4210-log-i18n.sh b/t/t4210-log-i18n.sh
index 7c51943..6e61f57 100755
--- a/t/t4210-log-i18n.sh
+++ b/t/t4210-log-i18n.sh
@@ -1,12 +1,15 @@
#!/bin/sh
test_description='test log with i18n features'
-. ./test-lib.sh
+. ./lib-gettext.sh
# two forms of é
utf8_e=$(printf '\303\251')
latin1_e=$(printf '\351')
+# invalid UTF-8
+invalid_e=$(printf '\303\50)') # ")" at end to close opening "("
+
test_expect_success 'create commits in different encodings' '
test_tick &&
cat >msg <<-EOF &&
@@ -48,9 +51,43 @@ test_expect_success !MINGW 'log --grep does not find non-reencoded values (utf8)
test_must_be_empty actual
'
-test_expect_success 'log --grep does not find non-reencoded values (latin1)' '
+test_expect_success !MINGW 'log --grep does not find non-reencoded values (latin1)' '
git log --encoding=ISO-8859-1 --format=%s --grep=$utf8_e >actual &&
test_must_be_empty actual
'
+for engine in fixed basic extended perl
+do
+ prereq=
+ if test $engine = "perl"
+ then
+ prereq="PCRE"
+ else
+ prereq=""
+ fi
+ force_regex=
+ if test $engine != "fixed"
+ then
+ force_regex=.*
+ fi
+ test_expect_success !MINGW,GETTEXT_LOCALE,$prereq "-c grep.patternType=$engine log --grep does not find non-reencoded values (latin1 + locale)" "
+ cat >expect <<-\EOF &&
+ latin1
+ utf8
+ EOF
+ LC_ALL=\"$is_IS_locale\" git -c grep.patternType=$engine log --encoding=ISO-8859-1 --format=%s --grep=\"$force_regex$latin1_e\" >actual &&
+ test_cmp expect actual
+ "
+
+ test_expect_success !MINGW,GETTEXT_LOCALE,$prereq "-c grep.patternType=$engine log --grep does not find non-reencoded values (latin1 + locale)" "
+ LC_ALL=\"$is_IS_locale\" git -c grep.patternType=$engine log --encoding=ISO-8859-1 --format=%s --grep=\"$force_regex$utf8_e\" >actual &&
+ test_must_be_empty actual
+ "
+
+ test_expect_success !MINGW,GETTEXT_LOCALE,$prereq "-c grep.patternType=$engine log --grep does not die on invalid UTF-8 value (latin1 + locale + invalid needle)" "
+ LC_ALL=\"$is_IS_locale\" git -c grep.patternType=$engine log --encoding=ISO-8859-1 --format=%s --grep=\"$force_regex$invalid_e\" >actual &&
+ test_must_be_empty actual
+ "
+done
+
test_done
diff --git a/t/t4214-log-graph-octopus.sh b/t/t4214-log-graph-octopus.sh
index dab96c8..3ae8e51 100755
--- a/t/t4214-log-graph-octopus.sh
+++ b/t/t4214-log-graph-octopus.sh
@@ -5,6 +5,25 @@ test_description='git log --graph of skewed left octopus merge.'
. ./test-lib.sh
test_expect_success 'set up merge history' '
+ test_commit initial &&
+ for i in 1 2 3 4 ; do
+ git checkout master -b $i || return $?
+ # Make tag name different from branch name, to avoid
+ # ambiguity error when calling checkout.
+ test_commit $i $i $i tag$i || return $?
+ done &&
+ git checkout 1 -b merge &&
+ test_merge octopus-merge 1 2 3 4 &&
+ test_commit after-merge &&
+ git checkout 1 -b L &&
+ test_commit left &&
+ git checkout 4 -b crossover &&
+ test_commit after-4 &&
+ git checkout initial -b more-L &&
+ test_commit after-initial
+'
+
+test_expect_success 'log --graph with tricky octopus merge, no color' '
cat >expect.uncolored <<-\EOF &&
* left
| *---. octopus-merge
@@ -19,6 +38,13 @@ test_expect_success 'set up merge history' '
|/
* initial
EOF
+ git log --color=never --graph --date-order --pretty=tformat:%s left octopus-merge >actual.raw &&
+ sed "s/ *\$//" actual.raw >actual &&
+ test_cmp expect.uncolored actual
+'
+
+test_expect_success 'log --graph with tricky octopus merge with colors' '
+ test_config log.graphColors red,green,yellow,blue,magenta,cyan &&
cat >expect.colors <<-\EOF &&
* left
<RED>|<RESET> *<BLUE>-<RESET><BLUE>-<RESET><MAGENTA>-<RESET><MAGENTA>.<RESET> octopus-merge
@@ -33,33 +59,11 @@ test_expect_success 'set up merge history' '
<MAGENTA>|<RESET><MAGENTA>/<RESET>
* initial
EOF
- test_commit initial &&
- for i in 1 2 3 4 ; do
- git checkout master -b $i || return $?
- # Make tag name different from branch name, to avoid
- # ambiguity error when calling checkout.
- test_commit $i $i $i tag$i || return $?
- done &&
- git checkout 1 -b merge &&
- test_tick &&
- git merge -m octopus-merge 1 2 3 4 &&
- git checkout 1 -b L &&
- test_commit left
-'
-
-test_expect_success 'log --graph with tricky octopus merge with colors' '
- test_config log.graphColors red,green,yellow,blue,magenta,cyan &&
- git log --color=always --graph --date-order --pretty=tformat:%s --all >actual.colors.raw &&
+ git log --color=always --graph --date-order --pretty=tformat:%s left octopus-merge >actual.colors.raw &&
test_decode_color <actual.colors.raw | sed "s/ *\$//" >actual.colors &&
test_cmp expect.colors actual.colors
'
-test_expect_success 'log --graph with tricky octopus merge, no color' '
- git log --color=never --graph --date-order --pretty=tformat:%s --all >actual.raw &&
- sed "s/ *\$//" actual.raw >actual &&
- test_cmp expect.uncolored actual
-'
-
# Repeat the previous two tests with "normal" octopus merge (i.e.,
# without the first parent skewing to the "left" branch column).
@@ -76,7 +80,7 @@ test_expect_success 'log --graph with normal octopus merge, no color' '
|/
* initial
EOF
- git log --color=never --graph --date-order --pretty=tformat:%s merge >actual.raw &&
+ git log --color=never --graph --date-order --pretty=tformat:%s octopus-merge >actual.raw &&
sed "s/ *\$//" actual.raw >actual &&
test_cmp expect.uncolored actual
'
@@ -95,8 +99,283 @@ test_expect_success 'log --graph with normal octopus merge with colors' '
* initial
EOF
test_config log.graphColors red,green,yellow,blue,magenta,cyan &&
- git log --color=always --graph --date-order --pretty=tformat:%s merge >actual.colors.raw &&
+ git log --color=always --graph --date-order --pretty=tformat:%s octopus-merge >actual.colors.raw &&
+ test_decode_color <actual.colors.raw | sed "s/ *\$//" >actual.colors &&
+ test_cmp expect.colors actual.colors
+'
+
+test_expect_success 'log --graph with normal octopus merge and child, no color' '
+ cat >expect.uncolored <<-\EOF &&
+ * after-merge
+ *---. octopus-merge
+ |\ \ \
+ | | | * 4
+ | | * | 3
+ | | |/
+ | * | 2
+ | |/
+ * | 1
+ |/
+ * initial
+ EOF
+ git log --color=never --graph --date-order --pretty=tformat:%s after-merge >actual.raw &&
+ sed "s/ *\$//" actual.raw >actual &&
+ test_cmp expect.uncolored actual
+'
+
+test_expect_failure 'log --graph with normal octopus and child merge with colors' '
+ cat >expect.colors <<-\EOF &&
+ * after-merge
+ *<BLUE>-<RESET><BLUE>-<RESET><MAGENTA>-<RESET><MAGENTA>.<RESET> octopus-merge
+ <GREEN>|<RESET><YELLOW>\<RESET> <BLUE>\<RESET> <MAGENTA>\<RESET>
+ <GREEN>|<RESET> <YELLOW>|<RESET> <BLUE>|<RESET> * 4
+ <GREEN>|<RESET> <YELLOW>|<RESET> * <MAGENTA>|<RESET> 3
+ <GREEN>|<RESET> <YELLOW>|<RESET> <MAGENTA>|<RESET><MAGENTA>/<RESET>
+ <GREEN>|<RESET> * <MAGENTA>|<RESET> 2
+ <GREEN>|<RESET> <MAGENTA>|<RESET><MAGENTA>/<RESET>
+ * <MAGENTA>|<RESET> 1
+ <MAGENTA>|<RESET><MAGENTA>/<RESET>
+ * initial
+ EOF
+ test_config log.graphColors red,green,yellow,blue,magenta,cyan &&
+ git log --color=always --graph --date-order --pretty=tformat:%s after-merge >actual.colors.raw &&
+ test_decode_color <actual.colors.raw | sed "s/ *\$//" >actual.colors &&
+ test_cmp expect.colors actual.colors
+'
+
+test_expect_success 'log --graph with tricky octopus merge and its child, no color' '
+ cat >expect.uncolored <<-\EOF &&
+ * left
+ | * after-merge
+ | *---. octopus-merge
+ | |\ \ \
+ |/ / / /
+ | | | * 4
+ | | * | 3
+ | | |/
+ | * | 2
+ | |/
+ * | 1
+ |/
+ * initial
+ EOF
+ git log --color=never --graph --date-order --pretty=tformat:%s left after-merge >actual.raw &&
+ sed "s/ *\$//" actual.raw >actual &&
+ test_cmp expect.uncolored actual
+'
+
+test_expect_failure 'log --graph with tricky octopus merge and its child with colors' '
+ test_config log.graphColors red,green,yellow,blue,magenta,cyan &&
+ cat >expect.colors <<-\EOF &&
+ * left
+ <RED>|<RESET> * after-merge
+ <RED>|<RESET> *<MAGENTA>-<RESET><MAGENTA>-<RESET><CYAN>-<RESET><CYAN>.<RESET> octopus-merge
+ <RED>|<RESET> <RED>|<RESET><BLUE>\<RESET> <MAGENTA>\<RESET> <CYAN>\<RESET>
+ <RED>|<RESET><RED>/<RESET> <BLUE>/<RESET> <MAGENTA>/<RESET> <CYAN>/<RESET>
+ <RED>|<RESET> <BLUE>|<RESET> <MAGENTA>|<RESET> * 4
+ <RED>|<RESET> <BLUE>|<RESET> * <CYAN>|<RESET> 3
+ <RED>|<RESET> <BLUE>|<RESET> <CYAN>|<RESET><CYAN>/<RESET>
+ <RED>|<RESET> * <CYAN>|<RESET> 2
+ <RED>|<RESET> <CYAN>|<RESET><CYAN>/<RESET>
+ * <CYAN>|<RESET> 1
+ <CYAN>|<RESET><CYAN>/<RESET>
+ * initial
+ EOF
+ git log --color=always --graph --date-order --pretty=tformat:%s left after-merge >actual.colors.raw &&
+ test_decode_color <actual.colors.raw | sed "s/ *\$//" >actual.colors &&
+ test_cmp expect.colors actual.colors
+'
+
+test_expect_success 'log --graph with crossover in octopus merge, no color' '
+ cat >expect.uncolored <<-\EOF &&
+ * after-4
+ | *---. octopus-merge
+ | |\ \ \
+ | |_|_|/
+ |/| | |
+ * | | | 4
+ | | | * 3
+ | |_|/
+ |/| |
+ | | * 2
+ | |/
+ |/|
+ | * 1
+ |/
+ * initial
+ EOF
+ git log --color=never --graph --date-order --pretty=tformat:%s after-4 octopus-merge >actual.raw &&
+ sed "s/ *\$//" actual.raw >actual &&
+ test_cmp expect.uncolored actual
+'
+
+test_expect_failure 'log --graph with crossover in octopus merge with colors' '
+ test_config log.graphColors red,green,yellow,blue,magenta,cyan &&
+ cat >expect.colors <<-\EOF &&
+ * after-4
+ <RED>|<RESET> *<BLUE>-<RESET><BLUE>-<RESET><RED>-<RESET><RED>.<RESET> octopus-merge
+ <RED>|<RESET> <GREEN>|<RESET><YELLOW>\<RESET> <BLUE>\<RESET> <RED>\<RESET>
+ <RED>|<RESET> <GREEN>|<RESET><RED>_<RESET><YELLOW>|<RESET><RED>_<RESET><BLUE>|<RESET><RED>/<RESET>
+ <RED>|<RESET><RED>/<RESET><GREEN>|<RESET> <YELLOW>|<RESET> <BLUE>|<RESET>
+ * <GREEN>|<RESET> <YELLOW>|<RESET> <BLUE>|<RESET> 4
+ <MAGENTA>|<RESET> <GREEN>|<RESET> <YELLOW>|<RESET> * 3
+ <MAGENTA>|<RESET> <GREEN>|<RESET><MAGENTA>_<RESET><YELLOW>|<RESET><MAGENTA>/<RESET>
+ <MAGENTA>|<RESET><MAGENTA>/<RESET><GREEN>|<RESET> <YELLOW>|<RESET>
+ <MAGENTA>|<RESET> <GREEN>|<RESET> * 2
+ <MAGENTA>|<RESET> <GREEN>|<RESET><MAGENTA>/<RESET>
+ <MAGENTA>|<RESET><MAGENTA>/<RESET><GREEN>|<RESET>
+ <MAGENTA>|<RESET> * 1
+ <MAGENTA>|<RESET><MAGENTA>/<RESET>
+ * initial
+ EOF
+ git log --color=always --graph --date-order --pretty=tformat:%s after-4 octopus-merge >actual.colors.raw &&
+ test_decode_color <actual.colors.raw | sed "s/ *\$//" >actual.colors &&
+ test_cmp expect.colors actual.colors
+'
+
+test_expect_success 'log --graph with crossover in octopus merge and its child, no color' '
+ cat >expect.uncolored <<-\EOF &&
+ * after-4
+ | * after-merge
+ | *---. octopus-merge
+ | |\ \ \
+ | |_|_|/
+ |/| | |
+ * | | | 4
+ | | | * 3
+ | |_|/
+ |/| |
+ | | * 2
+ | |/
+ |/|
+ | * 1
+ |/
+ * initial
+ EOF
+ git log --color=never --graph --date-order --pretty=tformat:%s after-4 after-merge >actual.raw &&
+ sed "s/ *\$//" actual.raw >actual &&
+ test_cmp expect.uncolored actual
+'
+
+test_expect_failure 'log --graph with crossover in octopus merge and its child with colors' '
+ test_config log.graphColors red,green,yellow,blue,magenta,cyan &&
+ cat >expect.colors <<-\EOF &&
+ * after-4
+ <RED>|<RESET> * after-merge
+ <RED>|<RESET> *<MAGENTA>-<RESET><MAGENTA>-<RESET><RED>-<RESET><RED>.<RESET> octopus-merge
+ <RED>|<RESET> <YELLOW>|<RESET><BLUE>\<RESET> <MAGENTA>\<RESET> <RED>\<RESET>
+ <RED>|<RESET> <YELLOW>|<RESET><RED>_<RESET><BLUE>|<RESET><RED>_<RESET><MAGENTA>|<RESET><RED>/<RESET>
+ <RED>|<RESET><RED>/<RESET><YELLOW>|<RESET> <BLUE>|<RESET> <MAGENTA>|<RESET>
+ * <YELLOW>|<RESET> <BLUE>|<RESET> <MAGENTA>|<RESET> 4
+ <CYAN>|<RESET> <YELLOW>|<RESET> <BLUE>|<RESET> * 3
+ <CYAN>|<RESET> <YELLOW>|<RESET><CYAN>_<RESET><BLUE>|<RESET><CYAN>/<RESET>
+ <CYAN>|<RESET><CYAN>/<RESET><YELLOW>|<RESET> <BLUE>|<RESET>
+ <CYAN>|<RESET> <YELLOW>|<RESET> * 2
+ <CYAN>|<RESET> <YELLOW>|<RESET><CYAN>/<RESET>
+ <CYAN>|<RESET><CYAN>/<RESET><YELLOW>|<RESET>
+ <CYAN>|<RESET> * 1
+ <CYAN>|<RESET><CYAN>/<RESET>
+ * initial
+ EOF
+ git log --color=always --graph --date-order --pretty=tformat:%s after-4 after-merge >actual.colors.raw &&
+ test_decode_color <actual.colors.raw | sed "s/ *\$//" >actual.colors &&
+ test_cmp expect.colors actual.colors
+'
+
+test_expect_success 'log --graph with unrelated commit and octopus tip, no color' '
+ cat >expect.uncolored <<-\EOF &&
+ * after-initial
+ | *---. octopus-merge
+ | |\ \ \
+ | | | | * 4
+ | |_|_|/
+ |/| | |
+ | | | * 3
+ | |_|/
+ |/| |
+ | | * 2
+ | |/
+ |/|
+ | * 1
+ |/
+ * initial
+ EOF
+ git log --color=never --graph --date-order --pretty=tformat:%s after-initial octopus-merge >actual.raw &&
+ sed "s/ *\$//" actual.raw >actual &&
+ test_cmp expect.uncolored actual
+'
+
+test_expect_success 'log --graph with unrelated commit and octopus tip with colors' '
+ test_config log.graphColors red,green,yellow,blue,magenta,cyan &&
+ cat >expect.colors <<-\EOF &&
+ * after-initial
+ <RED>|<RESET> *<BLUE>-<RESET><BLUE>-<RESET><MAGENTA>-<RESET><MAGENTA>.<RESET> octopus-merge
+ <RED>|<RESET> <GREEN>|<RESET><YELLOW>\<RESET> <BLUE>\<RESET> <MAGENTA>\<RESET>
+ <RED>|<RESET> <GREEN>|<RESET> <YELLOW>|<RESET> <BLUE>|<RESET> * 4
+ <RED>|<RESET> <GREEN>|<RESET><RED>_<RESET><YELLOW>|<RESET><RED>_<RESET><BLUE>|<RESET><RED>/<RESET>
+ <RED>|<RESET><RED>/<RESET><GREEN>|<RESET> <YELLOW>|<RESET> <BLUE>|<RESET>
+ <RED>|<RESET> <GREEN>|<RESET> <YELLOW>|<RESET> * 3
+ <RED>|<RESET> <GREEN>|<RESET><RED>_<RESET><YELLOW>|<RESET><RED>/<RESET>
+ <RED>|<RESET><RED>/<RESET><GREEN>|<RESET> <YELLOW>|<RESET>
+ <RED>|<RESET> <GREEN>|<RESET> * 2
+ <RED>|<RESET> <GREEN>|<RESET><RED>/<RESET>
+ <RED>|<RESET><RED>/<RESET><GREEN>|<RESET>
+ <RED>|<RESET> * 1
+ <RED>|<RESET><RED>/<RESET>
+ * initial
+ EOF
+ git log --color=always --graph --date-order --pretty=tformat:%s after-initial octopus-merge >actual.colors.raw &&
+ test_decode_color <actual.colors.raw | sed "s/ *\$//" >actual.colors &&
+ test_cmp expect.colors actual.colors
+'
+
+test_expect_success 'log --graph with unrelated commit and octopus child, no color' '
+ cat >expect.uncolored <<-\EOF &&
+ * after-initial
+ | * after-merge
+ | *---. octopus-merge
+ | |\ \ \
+ | | | | * 4
+ | |_|_|/
+ |/| | |
+ | | | * 3
+ | |_|/
+ |/| |
+ | | * 2
+ | |/
+ |/|
+ | * 1
+ |/
+ * initial
+ EOF
+ git log --color=never --graph --date-order --pretty=tformat:%s after-initial after-merge >actual.raw &&
+ sed "s/ *\$//" actual.raw >actual &&
+ test_cmp expect.uncolored actual
+'
+
+test_expect_failure 'log --graph with unrelated commit and octopus child with colors' '
+ test_config log.graphColors red,green,yellow,blue,magenta,cyan &&
+ cat >expect.colors <<-\EOF &&
+ * after-initial
+ <RED>|<RESET> * after-merge
+ <RED>|<RESET> *<MAGENTA>-<RESET><MAGENTA>-<RESET><CYAN>-<RESET><CYAN>.<RESET> octopus-merge
+ <RED>|<RESET> <YELLOW>|<RESET><BLUE>\<RESET> <MAGENTA>\<RESET> <CYAN>\<RESET>
+ <RED>|<RESET> <YELLOW>|<RESET> <BLUE>|<RESET> <MAGENTA>|<RESET> * 4
+ <RED>|<RESET> <YELLOW>|<RESET><RED>_<RESET><BLUE>|<RESET><RED>_<RESET><MAGENTA>|<RESET><RED>/<RESET>
+ <RED>|<RESET><RED>/<RESET><YELLOW>|<RESET> <BLUE>|<RESET> <MAGENTA>|<RESET>
+ <RED>|<RESET> <YELLOW>|<RESET> <BLUE>|<RESET> * 3
+ <RED>|<RESET> <YELLOW>|<RESET><RED>_<RESET><BLUE>|<RESET><RED>/<RESET>
+ <RED>|<RESET><RED>/<RESET><YELLOW>|<RESET> <BLUE>|<RESET>
+ <RED>|<RESET> <YELLOW>|<RESET> * 2
+ <RED>|<RESET> <YELLOW>|<RESET><RED>/<RESET>
+ <RED>|<RESET><RED>/<RESET><YELLOW>|<RESET>
+ <RED>|<RESET> * 1
+ <RED>|<RESET><RED>/<RESET>
+ * initial
+ EOF
+ git log --color=always --graph --date-order --pretty=tformat:%s after-initial after-merge >actual.colors.raw &&
test_decode_color <actual.colors.raw | sed "s/ *\$//" >actual.colors &&
test_cmp expect.colors actual.colors
'
+
test_done
diff --git a/t/t5318-commit-graph.sh b/t/t5318-commit-graph.sh
index ab3eccf..d42b3ef 100755
--- a/t/t5318-commit-graph.sh
+++ b/t/t5318-commit-graph.sh
@@ -124,6 +124,42 @@ test_expect_success 'Add more commits' '
git repack
'
+test_expect_success 'commit-graph write progress off for redirected stderr' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git commit-graph write 2>err &&
+ test_line_count = 0 err
+'
+
+test_expect_success 'commit-graph write force progress on for stderr' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git commit-graph write --progress 2>err &&
+ test_file_not_empty err
+'
+
+test_expect_success 'commit-graph write with the --no-progress option' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git commit-graph write --no-progress 2>err &&
+ test_line_count = 0 err
+'
+
+test_expect_success 'commit-graph verify progress off for redirected stderr' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git commit-graph verify 2>err &&
+ test_line_count = 0 err
+'
+
+test_expect_success 'commit-graph verify force progress on for stderr' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git commit-graph verify --progress 2>err &&
+ test_file_not_empty err
+'
+
+test_expect_success 'commit-graph verify with the --no-progress option' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git commit-graph verify --no-progress 2>err &&
+ test_line_count = 0 err
+'
+
# Current graph structure:
#
# __M3___
@@ -585,4 +621,47 @@ test_expect_success 'get_commit_tree_in_graph works for non-the_repository' '
test_cmp expect actual
'
+test_expect_success 'corrupt commit-graph write (broken parent)' '
+ rm -rf repo &&
+ git init repo &&
+ (
+ cd repo &&
+ empty="$(git mktree </dev/null)" &&
+ cat >broken <<-EOF &&
+ tree $empty
+ parent 0000000000000000000000000000000000000000
+ author whatever <whatever@example.com> 1234 -0000
+ committer whatever <whatever@example.com> 1234 -0000
+
+ broken commit
+ EOF
+ broken="$(git hash-object -w -t commit --literally broken)" &&
+ git commit-tree -p "$broken" -m "good commit" "$empty" >good &&
+ test_must_fail git commit-graph write --stdin-commits \
+ <good 2>test_err &&
+ test_i18ngrep "unable to parse commit" test_err
+ )
+'
+
+test_expect_success 'corrupt commit-graph write (missing tree)' '
+ rm -rf repo &&
+ git init repo &&
+ (
+ cd repo &&
+ tree="$(git mktree </dev/null)" &&
+ cat >broken <<-EOF &&
+ parent 0000000000000000000000000000000000000000
+ author whatever <whatever@example.com> 1234 -0000
+ committer whatever <whatever@example.com> 1234 -0000
+
+ broken commit
+ EOF
+ broken="$(git hash-object -w -t commit --literally broken)" &&
+ git commit-tree -p "$broken" -m "good" "$tree" >good &&
+ test_must_fail git commit-graph write --stdin-commits \
+ <good 2>test_err &&
+ test_i18ngrep "unable to get tree for" test_err
+ )
+'
+
test_done
diff --git a/t/t5324-split-commit-graph.sh b/t/t5324-split-commit-graph.sh
index e2c3953..115aabd 100755
--- a/t/t5324-split-commit-graph.sh
+++ b/t/t5324-split-commit-graph.sh
@@ -320,7 +320,7 @@ test_expect_success 'add octopus merge' '
git merge commits/3 commits/4 &&
git branch merge/octopus &&
git commit-graph write --reachable --split &&
- git commit-graph verify 2>err &&
+ git commit-graph verify --progress 2>err &&
test_line_count = 3 err &&
test_i18ngrep ! warning err &&
test_line_count = 3 $graphdir/commit-graph-chain
diff --git a/t/t5500-fetch-pack.sh b/t/t5500-fetch-pack.sh
index 5115711..6b97923 100755
--- a/t/t5500-fetch-pack.sh
+++ b/t/t5500-fetch-pack.sh
@@ -792,6 +792,44 @@ test_expect_success 'clone shallow since selects no commits' '
)
'
+# A few subtle things about the request in this test:
+#
+# - the server must have commit-graphs present and enabled
+#
+# - the history is such that our want/have share a common ancestor ("base"
+# here)
+#
+# - we send only a single have, which is fewer than a normal client would
+# send. This ensures that we don't parse "base" up front with
+# parse_object(), but rather traverse to it as a parent while deciding if we
+# can stop the "have" negotiation, and call parse_commit(). The former
+# sees the actual object data and so always loads the three oid, whereas the
+# latter will try to load it lazily.
+#
+# - we must use protocol v2, because it handles the "have" negotiation before
+# processing the shallow directives
+#
+test_expect_success 'shallow since with commit graph and already-seen commit' '
+ test_create_repo shallow-since-graph &&
+ (
+ cd shallow-since-graph &&
+ test_commit base &&
+ test_commit master &&
+ git checkout -b other HEAD^ &&
+ test_commit other &&
+ git commit-graph write --reachable &&
+ git config core.commitGraph true &&
+
+ GIT_PROTOCOL=version=2 git upload-pack . <<-EOF >/dev/null
+ 0012command=fetch
+ 00010013deepen-since 1
+ 0032want $(git rev-parse other)
+ 0032have $(git rev-parse master)
+ 0000
+ EOF
+ )
+'
+
test_expect_success 'shallow clone exclude tag two' '
test_create_repo shallow-exclude &&
(
diff --git a/t/t5514-fetch-multiple.sh b/t/t5514-fetch-multiple.sh
index 5426d4b..de8e2f1 100755
--- a/t/t5514-fetch-multiple.sh
+++ b/t/t5514-fetch-multiple.sh
@@ -183,4 +183,15 @@ test_expect_success 'git fetch --all --tags' '
test_cmp expect test8/output
'
+test_expect_success 'parallel' '
+ git remote add one ./bogus1 &&
+ git remote add two ./bogus2 &&
+
+ test_must_fail env GIT_TRACE="$PWD/trace" \
+ git fetch --jobs=2 --multiple one two 2>err &&
+ grep "preparing to run up to 2 tasks" trace &&
+ test_i18ngrep "could not fetch .one.*128" err &&
+ test_i18ngrep "could not fetch .two.*128" err
+'
+
test_done
diff --git a/t/t5541-http-push-smart.sh b/t/t5541-http-push-smart.sh
index b86ddb6..92bac43 100755
--- a/t/t5541-http-push-smart.sh
+++ b/t/t5541-http-push-smart.sh
@@ -262,7 +262,7 @@ test_expect_success TTY 'push shows progress when stderr is a tty' '
cd "$ROOT_PATH"/test_repo_clone &&
test_commit noisy &&
test_terminal git push >output 2>&1 &&
- test_i18ngrep "Writing objects" output
+ test_i18ngrep "^Writing objects" output
'
test_expect_success TTY 'push --quiet silences status and progress' '
@@ -277,7 +277,7 @@ test_expect_success TTY 'push --no-progress silences progress but not status' '
test_commit no-progress &&
test_terminal git push --no-progress >output 2>&1 &&
test_i18ngrep "^To http" output &&
- test_i18ngrep ! "Writing objects" output
+ test_i18ngrep ! "^Writing objects" output
'
test_expect_success 'push --progress shows progress to non-tty' '
@@ -285,7 +285,7 @@ test_expect_success 'push --progress shows progress to non-tty' '
test_commit progress &&
git push --progress >output 2>&1 &&
test_i18ngrep "^To http" output &&
- test_i18ngrep "Writing objects" output
+ test_i18ngrep "^Writing objects" output
'
test_expect_success 'http push gives sane defaults to reflog' '
diff --git a/t/t5616-partial-clone.sh b/t/t5616-partial-clone.sh
index fc634a5..79f7b65 100755
--- a/t/t5616-partial-clone.sh
+++ b/t/t5616-partial-clone.sh
@@ -260,6 +260,42 @@ test_expect_success 'fetch what is specified on CLI even if already promised' '
! grep "?$(cat blob)" missing_after
'
+test_expect_success 'setup src repo for sparse filter' '
+ git init sparse-src &&
+ git -C sparse-src config --local uploadpack.allowfilter 1 &&
+ git -C sparse-src config --local uploadpack.allowanysha1inwant 1 &&
+ test_commit -C sparse-src one &&
+ test_commit -C sparse-src two &&
+ echo /one.t >sparse-src/only-one &&
+ git -C sparse-src add . &&
+ git -C sparse-src commit -m "add sparse checkout files"
+'
+
+test_expect_success 'partial clone with sparse filter succeeds' '
+ rm -rf dst.git &&
+ git clone --no-local --bare \
+ --filter=sparse:oid=master:only-one \
+ sparse-src dst.git &&
+ (
+ cd dst.git &&
+ git rev-list --objects --missing=print HEAD >out &&
+ grep "^$(git rev-parse HEAD:one.t)" out &&
+ grep "^?$(git rev-parse HEAD:two.t)" out
+ )
+'
+
+test_expect_success 'partial clone with unresolvable sparse filter fails cleanly' '
+ rm -rf dst.git &&
+ test_must_fail git clone --no-local --bare \
+ --filter=sparse:oid=master:no-such-name \
+ sparse-src dst.git 2>err &&
+ test_i18ngrep "unable to access sparse blob in .master:no-such-name" err &&
+ test_must_fail git clone --no-local --bare \
+ --filter=sparse:oid=master \
+ sparse-src dst.git 2>err &&
+ test_i18ngrep "unable to parse sparse filter data in" err
+'
+
. "$TEST_DIRECTORY"/lib-httpd.sh
start_httpd
diff --git a/t/t5801-remote-helpers.sh b/t/t5801-remote-helpers.sh
index 2d6c4a2..121e5c6 100755
--- a/t/t5801-remote-helpers.sh
+++ b/t/t5801-remote-helpers.sh
@@ -247,7 +247,6 @@ clean_mark () {
test_expect_success 'proper failure checks for fetching' '
(cd local &&
test_must_fail env GIT_REMOTE_TESTGIT_FAILURE=1 git fetch 2>error &&
- cat error &&
test_i18ngrep -q "error while running fast-import" error
)
'
diff --git a/t/t6036-recursive-corner-cases.sh b/t/t6036-recursive-corner-cases.sh
index d23b948..7fddcc8 100755
--- a/t/t6036-recursive-corner-cases.sh
+++ b/t/t6036-recursive-corner-cases.sh
@@ -1562,6 +1562,7 @@ test_expect_success 'check nested conflicts' '
cd nested_conflicts &&
git clean -f &&
+ MASTER=$(git rev-parse --short master) &&
git checkout L2^0 &&
# Merge must fail; there is a conflict
@@ -1582,7 +1583,7 @@ test_expect_success 'check nested conflicts' '
git cat-file -p R1:a >theirs &&
test_must_fail git merge-file --diff3 \
-L "Temporary merge branch 1" \
- -L "merged common ancestors" \
+ -L "$MASTER" \
-L "Temporary merge branch 2" \
ours \
base \
@@ -1594,7 +1595,7 @@ test_expect_success 'check nested conflicts' '
git cat-file -p R1:b >theirs &&
test_must_fail git merge-file --diff3 \
-L "Temporary merge branch 1" \
- -L "merged common ancestors" \
+ -L "$MASTER" \
-L "Temporary merge branch 2" \
ours \
base \
@@ -1732,6 +1733,7 @@ test_expect_success 'check virtual merge base with nested conflicts' '
(
cd virtual_merge_base_has_nested_conflicts &&
+ MASTER=$(git rev-parse --short master) &&
git checkout L3^0 &&
# Merge must fail; there is a conflict
@@ -1760,7 +1762,7 @@ test_expect_success 'check virtual merge base with nested conflicts' '
cp left merged-once &&
test_must_fail git merge-file --diff3 \
-L "Temporary merge branch 1" \
- -L "merged common ancestors" \
+ -L "$MASTER" \
-L "Temporary merge branch 2" \
merged-once \
base \
diff --git a/t/t6047-diff3-conflict-markers.sh b/t/t6047-diff3-conflict-markers.sh
new file mode 100755
index 0000000..860542a
--- /dev/null
+++ b/t/t6047-diff3-conflict-markers.sh
@@ -0,0 +1,202 @@
+#!/bin/sh
+
+test_description='recursive merge diff3 style conflict markers'
+
+. ./test-lib.sh
+
+# Setup:
+# L1
+# \
+# ?
+# /
+# R1
+#
+# Where:
+# L1 and R1 both have a file named 'content' but have no common history
+#
+
+test_expect_success 'setup no merge base' '
+ test_create_repo no_merge_base &&
+ (
+ cd no_merge_base &&
+
+ git checkout -b L &&
+ test_commit A content A &&
+
+ git checkout --orphan R &&
+ test_commit B content B
+ )
+'
+
+test_expect_success 'check no merge base' '
+ (
+ cd no_merge_base &&
+
+ git checkout L^0 &&
+
+ test_must_fail git -c merge.conflictstyle=diff3 merge --allow-unrelated-histories -s recursive R^0 &&
+
+ grep "|||||| empty tree" content
+ )
+'
+
+# Setup:
+# L1
+# / \
+# master ?
+# \ /
+# R1
+#
+# Where:
+# L1 and R1 have modified the same file ('content') in conflicting ways
+#
+
+test_expect_success 'setup unique merge base' '
+ test_create_repo unique_merge_base &&
+ (
+ cd unique_merge_base &&
+
+ test_commit base content "1
+2
+3
+4
+5
+" &&
+
+ git branch L &&
+ git branch R &&
+
+ git checkout L &&
+ test_commit L content "1
+2
+3
+4
+5
+7" &&
+
+ git checkout R &&
+ git rm content &&
+ test_commit R renamed "1
+2
+3
+4
+5
+six"
+ )
+'
+
+test_expect_success 'check unique merge base' '
+ (
+ cd unique_merge_base &&
+
+ git checkout L^0 &&
+ MASTER=$(git rev-parse --short master) &&
+
+ test_must_fail git -c merge.conflictstyle=diff3 merge -s recursive R^0 &&
+
+ grep "|||||| $MASTER:content" renamed
+ )
+'
+
+# Setup:
+# L1---L2--L3
+# / \ / \
+# master X1 ?
+# \ / \ /
+# R1---R2--R3
+#
+# Where:
+# commits L1 and R1 have modified the same file in non-conflicting ways
+# X1 is an auto-generated merge-base used when merging L1 and R1
+# commits L2 and R2 are merges of R1 and L1 into L1 and R1, respectively
+# commits L3 and R3 both modify 'content' in conflicting ways
+#
+
+test_expect_success 'setup multiple merge bases' '
+ test_create_repo multiple_merge_bases &&
+ (
+ cd multiple_merge_bases &&
+
+ test_commit initial content "1
+2
+3
+4
+5" &&
+
+ git branch L &&
+ git branch R &&
+
+ # Create L1
+ git checkout L &&
+ test_commit L1 content "0
+1
+2
+3
+4
+5" &&
+
+ # Create R1
+ git checkout R &&
+ test_commit R1 content "1
+2
+3
+4
+5
+6" &&
+
+ # Create L2
+ git checkout L &&
+ git merge R1 &&
+
+ # Create R2
+ git checkout R &&
+ git merge L1 &&
+
+ # Create L3
+ git checkout L &&
+ test_commit L3 content "0
+1
+2
+3
+4
+5
+A" &&
+
+ # Create R3
+ git checkout R &&
+ git rm content &&
+ test_commit R3 renamed "0
+2
+3
+4
+5
+six"
+ )
+'
+
+test_expect_success 'check multiple merge bases' '
+ (
+ cd multiple_merge_bases &&
+
+ git checkout L^0 &&
+
+ test_must_fail git -c merge.conflictstyle=diff3 merge -s recursive R^0 &&
+
+ grep "|||||| merged common ancestors:content" renamed
+ )
+'
+
+test_expect_success 'rebase describes fake ancestor base' '
+ test_create_repo rebase &&
+ (
+ cd rebase &&
+ test_commit base file &&
+ test_commit master file &&
+ git checkout -b side HEAD^ &&
+ test_commit side file &&
+ test_must_fail git -c merge.conflictstyle=diff3 rebase master &&
+ grep "||||||| constructed merge base" file
+ )
+'
+
+test_done
diff --git a/t/t6120-describe.sh b/t/t6120-describe.sh
index 2b883d8..45047d0 100755
--- a/t/t6120-describe.sh
+++ b/t/t6120-describe.sh
@@ -424,4 +424,19 @@ test_expect_success 'describe complains about missing object' '
test_must_fail git describe $ZERO_OID
'
+test_expect_success 'name-rev a rev shortly after epoch' '
+ test_when_finished "git checkout master" &&
+
+ git checkout --orphan no-timestamp-underflow &&
+ # Any date closer to epoch than the CUTOFF_DATE_SLOP constant
+ # in builtin/name-rev.c.
+ GIT_COMMITTER_DATE="@1234 +0000" \
+ git commit -m "committer date shortly after epoch" &&
+ old_commit_oid=$(git rev-parse HEAD) &&
+
+ echo "$old_commit_oid no-timestamp-underflow" >expect &&
+ git name-rev $old_commit_oid >actual &&
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t7009-filter-branch-null-sha1.sh b/t/t7008-filter-branch-null-sha1.sh
index 9ba9f24..9ba9f24 100755
--- a/t/t7009-filter-branch-null-sha1.sh
+++ b/t/t7008-filter-branch-null-sha1.sh
diff --git a/t/t7300-clean.sh b/t/t7300-clean.sh
index d01fd12..6e6d24c 100755
--- a/t/t7300-clean.sh
+++ b/t/t7300-clean.sh
@@ -117,6 +117,7 @@ test_expect_success C_LOCALE_OUTPUT 'git clean with relative prefix' '
would_clean=$(
cd docs &&
git clean -n ../src |
+ grep part3 |
sed -n -e "s|^Would remove ||p"
) &&
verbose test "$would_clean" = ../src/part3.c
@@ -129,6 +130,7 @@ test_expect_success C_LOCALE_OUTPUT 'git clean with absolute path' '
would_clean=$(
cd docs &&
git clean -n "$(pwd)/../src" |
+ grep part3 |
sed -n -e "s|^Would remove ||p"
) &&
verbose test "$would_clean" = ../src/part3.c
@@ -547,7 +549,7 @@ test_expect_failure 'nested (non-empty) bare repositories should be cleaned even
test_path_is_missing strange_bare
'
-test_expect_success 'giving path in nested git work tree will remove it' '
+test_expect_success 'giving path in nested git work tree will NOT remove it' '
rm -fr repo &&
mkdir repo &&
(
@@ -559,7 +561,7 @@ test_expect_success 'giving path in nested git work tree will remove it' '
git clean -f -d repo/bar/baz &&
test_path_is_file repo/.git/HEAD &&
test_path_is_dir repo/bar/ &&
- test_path_is_missing repo/bar/baz
+ test_path_is_file repo/bar/baz/hello.world
'
test_expect_success 'giving path to nested .git will not remove it' '
@@ -577,7 +579,7 @@ test_expect_success 'giving path to nested .git will not remove it' '
test_path_is_dir untracked/
'
-test_expect_success 'giving path to nested .git/ will remove contents' '
+test_expect_success 'giving path to nested .git/ will NOT remove contents' '
rm -fr repo untracked &&
mkdir repo untracked &&
(
@@ -587,7 +589,7 @@ test_expect_success 'giving path to nested .git/ will remove contents' '
) &&
git clean -f -d repo/.git/ &&
test_path_is_dir repo/.git &&
- test_dir_is_empty repo/.git &&
+ test_path_is_file repo/.git/HEAD &&
test_path_is_dir untracked/
'
@@ -669,7 +671,7 @@ test_expect_success 'git clean -d skips untracked dirs containing ignored files'
test_path_is_missing foo/b/bb
'
-test_expect_failure 'git clean -d skips nested repo containing ignored files' '
+test_expect_success 'git clean -d skips nested repo containing ignored files' '
test_when_finished "rm -rf nested-repo-with-ignored-file" &&
git init nested-repo-with-ignored-file &&
@@ -691,6 +693,38 @@ test_expect_failure 'git clean -d skips nested repo containing ignored files' '
test_path_is_file nested-repo-with-ignored-file/file
'
+test_expect_success 'git clean handles being told what to clean' '
+ mkdir -p d1 d2 &&
+ touch d1/ut d2/ut &&
+ git clean -f */ut &&
+ test_path_is_missing d1/ut &&
+ test_path_is_missing d2/ut
+'
+
+test_expect_success 'git clean handles being told what to clean, with -d' '
+ mkdir -p d1 d2 &&
+ touch d1/ut d2/ut &&
+ git clean -ffd */ut &&
+ test_path_is_missing d1/ut &&
+ test_path_is_missing d2/ut
+'
+
+test_expect_success 'git clean works if a glob is passed without -d' '
+ mkdir -p d1 d2 &&
+ touch d1/ut d2/ut &&
+ git clean -f "*ut" &&
+ test_path_is_missing d1/ut &&
+ test_path_is_missing d2/ut
+'
+
+test_expect_success 'git clean works if a glob is passed with -d' '
+ mkdir -p d1 d2 &&
+ touch d1/ut d2/ut &&
+ git clean -ffd "*ut" &&
+ test_path_is_missing d1/ut &&
+ test_path_is_missing d2/ut
+'
+
test_expect_success MINGW 'handle clean & core.longpaths = false nicely' '
test_config core.longpaths false &&
a50=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa &&
diff --git a/t/t7400-submodule-basic.sh b/t/t7400-submodule-basic.sh
index a208cb2..691b5fc 100755
--- a/t/t7400-submodule-basic.sh
+++ b/t/t7400-submodule-basic.sh
@@ -527,7 +527,6 @@ test_expect_success 'update --init' '
test_must_fail git config submodule.example.url &&
git submodule update init 2> update.out &&
- cat update.out &&
test_i18ngrep "not initialized" update.out &&
test_must_fail git rev-parse --resolve-git-dir init/.git &&
@@ -545,7 +544,6 @@ test_expect_success 'update --init from subdirectory' '
(
cd sub &&
git submodule update ../init 2>update.out &&
- cat update.out &&
test_i18ngrep "not initialized" update.out &&
test_must_fail git rev-parse --resolve-git-dir ../init/.git &&
diff --git a/t/t7505-prepare-commit-msg-hook.sh b/t/t7505-prepare-commit-msg-hook.sh
index ba8bd1b..94f85cd 100755
--- a/t/t7505-prepare-commit-msg-hook.sh
+++ b/t/t7505-prepare-commit-msg-hook.sh
@@ -241,13 +241,7 @@ test_rebase () {
git add b &&
git rebase --continue
) &&
- if test "$mode" = -p # reword amended after pick
- then
- n=18
- else
- n=17
- fi &&
- git log --pretty=%s -g -n$n HEAD@{1} >actual &&
+ git log --pretty=%s -g -n18 HEAD@{1} >actual &&
test_cmp "$TEST_DIRECTORY/t7505/expected-rebase${mode:--i}" actual
'
}
diff --git a/t/t7505/expected-rebase-i b/t/t7505/expected-rebase-i
index c514bdb..93bada5 100644
--- a/t/t7505/expected-rebase-i
+++ b/t/t7505/expected-rebase-i
@@ -7,7 +7,8 @@ message (no editor) [edit rebase-10]
message [fixup rebase-9]
message (no editor) [fixup rebase-8]
message (no editor) [squash rebase-7]
-message [reword rebase-6]
+HEAD [reword rebase-6]
+message (no editor) [reword rebase-6]
message [squash rebase-5]
message (no editor) [fixup rebase-4]
message (no editor) [pick rebase-3]
diff --git a/t/t7812-grep-icase-non-ascii.sh b/t/t7812-grep-icase-non-ascii.sh
index 0c685d3..531eb59 100755
--- a/t/t7812-grep-icase-non-ascii.sh
+++ b/t/t7812-grep-icase-non-ascii.sh
@@ -53,4 +53,32 @@ test_expect_success REGEX_LOCALE 'pickaxe -i on non-ascii' '
test_cmp expected actual
'
+test_expect_success GETTEXT_LOCALE,LIBPCRE2 'PCRE v2: setup invalid UTF-8 data' '
+ printf "\\200\\n" >invalid-0x80 &&
+ echo "ævar" >expected &&
+ cat expected >>invalid-0x80 &&
+ git add invalid-0x80
+'
+
+test_expect_success GETTEXT_LOCALE,LIBPCRE2 'PCRE v2: grep ASCII from invalid UTF-8 data' '
+ git grep -h "var" invalid-0x80 >actual &&
+ test_cmp expected actual &&
+ git grep -h "(*NO_JIT)var" invalid-0x80 >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success GETTEXT_LOCALE,LIBPCRE2 'PCRE v2: grep non-ASCII from invalid UTF-8 data' '
+ git grep -h "æ" invalid-0x80 >actual &&
+ test_cmp expected actual &&
+ git grep -h "(*NO_JIT)æ" invalid-0x80 &&
+ test_cmp expected actual
+'
+
+test_expect_success GETTEXT_LOCALE,LIBPCRE2 'PCRE v2: grep non-ASCII from invalid UTF-8 data with -i' '
+ test_might_fail git grep -hi "Æ" invalid-0x80 >actual &&
+ test_cmp expected actual &&
+ test_must_fail git grep -hi "(*NO_JIT)Æ" invalid-0x80 &&
+ test_cmp expected actual
+'
+
test_done
diff --git a/t/t7008-grep-binary.sh b/t/t7815-grep-binary.sh
index 2d87c49..90ebb64 100755
--- a/t/t7008-grep-binary.sh
+++ b/t/t7815-grep-binary.sh
@@ -4,41 +4,6 @@ test_description='git grep in binary files'
. ./test-lib.sh
-nul_match () {
- matches=$1
- flags=$2
- pattern=$3
- pattern_human=$(echo "$pattern" | sed 's/Q/<NUL>/g')
-
- if test "$matches" = 1
- then
- test_expect_success "git grep -f f $flags '$pattern_human' a" "
- printf '$pattern' | q_to_nul >f &&
- git grep -f f $flags a
- "
- elif test "$matches" = 0
- then
- test_expect_success "git grep -f f $flags '$pattern_human' a" "
- printf '$pattern' | q_to_nul >f &&
- test_must_fail git grep -f f $flags a
- "
- elif test "$matches" = T1
- then
- test_expect_failure "git grep -f f $flags '$pattern_human' a" "
- printf '$pattern' | q_to_nul >f &&
- git grep -f f $flags a
- "
- elif test "$matches" = T0
- then
- test_expect_failure "git grep -f f $flags '$pattern_human' a" "
- printf '$pattern' | q_to_nul >f &&
- test_must_fail git grep -f f $flags a
- "
- else
- test_expect_success "PANIC: Test framework error. Unknown matches value $matches" 'false'
- fi
-}
-
test_expect_success 'setup' "
echo 'binaryQfileQm[*]cQ*æQð' | q_to_nul >a &&
git add a &&
@@ -102,72 +67,6 @@ test_expect_failure 'git grep .fi a' '
git grep .fi a
'
-nul_match 1 '-F' 'yQf'
-nul_match 0 '-F' 'yQx'
-nul_match 1 '-Fi' 'YQf'
-nul_match 0 '-Fi' 'YQx'
-nul_match 1 '' 'yQf'
-nul_match 0 '' 'yQx'
-nul_match 1 '' 'æQð'
-nul_match 1 '-F' 'eQm[*]c'
-nul_match 1 '-Fi' 'EQM[*]C'
-
-# Regex patterns that would match but shouldn't with -F
-nul_match 0 '-F' 'yQ[f]'
-nul_match 0 '-F' '[y]Qf'
-nul_match 0 '-Fi' 'YQ[F]'
-nul_match 0 '-Fi' '[Y]QF'
-nul_match 0 '-F' 'æQ[ð]'
-nul_match 0 '-F' '[æ]Qð'
-nul_match 0 '-Fi' 'ÆQ[Ð]'
-nul_match 0 '-Fi' '[Æ]QÐ'
-
-# kwset is disabled on -i & non-ASCII. No way to match non-ASCII \0
-# patterns case-insensitively.
-nul_match T1 '-i' 'ÆQÐ'
-
-# \0 implicitly disables regexes. This is an undocumented internal
-# limitation.
-nul_match T1 '' 'yQ[f]'
-nul_match T1 '' '[y]Qf'
-nul_match T1 '-i' 'YQ[F]'
-nul_match T1 '-i' '[Y]Qf'
-nul_match T1 '' 'æQ[ð]'
-nul_match T1 '' '[æ]Qð'
-nul_match T1 '-i' 'ÆQ[Ð]'
-
-# ... because of \0 implicitly disabling regexes regexes that
-# should/shouldn't match don't do the right thing.
-nul_match T1 '' 'eQm.*cQ'
-nul_match T1 '-i' 'EQM.*cQ'
-nul_match T0 '' 'eQm[*]c'
-nul_match T0 '-i' 'EQM[*]C'
-
-# Due to the REG_STARTEND extension when kwset() is disabled on -i &
-# non-ASCII the string will be matched in its entirety, but the
-# pattern will be cut off at the first \0.
-nul_match 0 '-i' 'NOMATCHQð'
-nul_match T0 '-i' '[Æ]QNOMATCH'
-nul_match T0 '-i' '[æ]QNOMATCH'
-# Matches, but for the wrong reasons, just stops at [æ]
-nul_match 1 '-i' '[Æ]Qð'
-nul_match 1 '-i' '[æ]Qð'
-
-# Ensure that the matcher doesn't regress to something that stops at
-# \0
-nul_match 0 '-F' 'yQ[f]'
-nul_match 0 '-Fi' 'YQ[F]'
-nul_match 0 '' 'yQNOMATCH'
-nul_match 0 '' 'QNOMATCH'
-nul_match 0 '-i' 'YQNOMATCH'
-nul_match 0 '-i' 'QNOMATCH'
-nul_match 0 '-F' 'æQ[ð]'
-nul_match 0 '-Fi' 'ÆQ[Ð]'
-nul_match 0 '' 'yQNÓMATCH'
-nul_match 0 '' 'QNÓMATCH'
-nul_match 0 '-i' 'YQNÓMATCH'
-nul_match 0 '-i' 'QNÓMATCH'
-
test_expect_success 'grep respects binary diff attribute' '
echo text >t &&
git add t &&
diff --git a/t/t7816-grep-binary-pattern.sh b/t/t7816-grep-binary-pattern.sh
new file mode 100755
index 0000000..60bab29
--- /dev/null
+++ b/t/t7816-grep-binary-pattern.sh
@@ -0,0 +1,127 @@
+#!/bin/sh
+
+test_description='git grep with a binary pattern files'
+
+. ./lib-gettext.sh
+
+nul_match_internal () {
+ matches=$1
+ prereqs=$2
+ lc_all=$3
+ extra_flags=$4
+ flags=$5
+ pattern=$6
+ pattern_human=$(echo "$pattern" | sed 's/Q/<NUL>/g')
+
+ if test "$matches" = 1
+ then
+ test_expect_success $prereqs "LC_ALL='$lc_all' git grep $extra_flags -f f $flags '$pattern_human' a" "
+ printf '$pattern' | q_to_nul >f &&
+ LC_ALL='$lc_all' git grep $extra_flags -f f $flags a
+ "
+ elif test "$matches" = 0
+ then
+ test_expect_success $prereqs "LC_ALL='$lc_all' git grep $extra_flags -f f $flags '$pattern_human' a" "
+ >stderr &&
+ printf '$pattern' | q_to_nul >f &&
+ test_must_fail env LC_ALL=\"$lc_all\" git grep $extra_flags -f f $flags a 2>stderr &&
+ test_i18ngrep ! 'This is only supported with -P under PCRE v2' stderr
+ "
+ elif test "$matches" = P
+ then
+ test_expect_success $prereqs "error, PCRE v2 only: LC_ALL='$lc_all' git grep -f f $flags '$pattern_human' a" "
+ >stderr &&
+ printf '$pattern' | q_to_nul >f &&
+ test_must_fail env LC_ALL=\"$lc_all\" git grep -f f $flags a 2>stderr &&
+ test_i18ngrep 'This is only supported with -P under PCRE v2' stderr
+ "
+ else
+ test_expect_success "PANIC: Test framework error. Unknown matches value $matches" 'false'
+ fi
+}
+
+nul_match () {
+ matches=$1
+ matches_pcre2=$2
+ matches_pcre2_locale=$3
+ flags=$4
+ pattern=$5
+ pattern_human=$(echo "$pattern" | sed 's/Q/<NUL>/g')
+
+ nul_match_internal "$matches" "" "C" "" "$flags" "$pattern"
+ nul_match_internal "$matches_pcre2" "LIBPCRE2" "C" "-P" "$flags" "$pattern"
+ nul_match_internal "$matches_pcre2_locale" "LIBPCRE2,GETTEXT_LOCALE" "$is_IS_locale" "-P" "$flags" "$pattern"
+}
+
+test_expect_success 'setup' "
+ echo 'binaryQfileQm[*]cQ*æQð' | q_to_nul >a &&
+ git add a &&
+ git commit -m.
+"
+
+# Simple fixed-string matching that can use kwset (no -i && non-ASCII)
+nul_match P P P '-F' 'yQf'
+nul_match P P P '-F' 'yQx'
+nul_match P P P '-Fi' 'YQf'
+nul_match P P P '-Fi' 'YQx'
+nul_match P P 1 '' 'yQf'
+nul_match P P 0 '' 'yQx'
+nul_match P P 1 '' 'æQð'
+nul_match P P P '-F' 'eQm[*]c'
+nul_match P P P '-Fi' 'EQM[*]C'
+
+# Regex patterns that would match but shouldn't with -F
+nul_match P P P '-F' 'yQ[f]'
+nul_match P P P '-F' '[y]Qf'
+nul_match P P P '-Fi' 'YQ[F]'
+nul_match P P P '-Fi' '[Y]QF'
+nul_match P P P '-F' 'æQ[ð]'
+nul_match P P P '-F' '[æ]Qð'
+
+# The -F kwset codepath can't handle -i && non-ASCII...
+nul_match P 1 1 '-i' '[æ]Qð'
+
+# ...PCRE v2 only matches non-ASCII with -i casefolding under UTF-8
+# semantics
+nul_match P P P '-Fi' 'ÆQ[Ð]'
+nul_match P 0 1 '-i' 'ÆQ[Ð]'
+nul_match P 0 1 '-i' '[Æ]QÐ'
+nul_match P 0 1 '-i' '[Æ]Qð'
+nul_match P 0 1 '-i' 'ÆQÐ'
+
+# \0 in regexes can only work with -P & PCRE v2
+nul_match P P 1 '' 'yQ[f]'
+nul_match P P 1 '' '[y]Qf'
+nul_match P P 1 '-i' 'YQ[F]'
+nul_match P P 1 '-i' '[Y]Qf'
+nul_match P P 1 '' 'æQ[ð]'
+nul_match P P 1 '' '[æ]Qð'
+nul_match P P 1 '-i' 'ÆQ[Ð]'
+nul_match P P 1 '' 'eQm.*cQ'
+nul_match P P 1 '-i' 'EQM.*cQ'
+nul_match P P 0 '' 'eQm[*]c'
+nul_match P P 0 '-i' 'EQM[*]C'
+
+# Assert that we're using REG_STARTEND and the pattern doesn't match
+# just because it's cut off at the first \0.
+nul_match P P 0 '-i' 'NOMATCHQð'
+nul_match P P 0 '-i' '[Æ]QNOMATCH'
+nul_match P P 0 '-i' '[æ]QNOMATCH'
+
+# Ensure that the matcher doesn't regress to something that stops at
+# \0
+nul_match P P P '-F' 'yQ[f]'
+nul_match P P P '-Fi' 'YQ[F]'
+nul_match P P 0 '' 'yQNOMATCH'
+nul_match P P 0 '' 'QNOMATCH'
+nul_match P P 0 '-i' 'YQNOMATCH'
+nul_match P P 0 '-i' 'QNOMATCH'
+nul_match P P P '-F' 'æQ[ð]'
+nul_match P P P '-Fi' 'ÆQ[Ð]'
+nul_match P P 1 '-i' 'ÆQ[Ð]'
+nul_match P P 0 '' 'yQNÓMATCH'
+nul_match P P 0 '' 'QNÓMATCH'
+nul_match P P 0 '-i' 'YQNÓMATCH'
+nul_match P P 0 '-i' 'QNÓMATCH'
+
+test_done
diff --git a/t/t9300-fast-import.sh b/t/t9300-fast-import.sh
index 141b7fa..e707fb8 100755
--- a/t/t9300-fast-import.sh
+++ b/t/t9300-fast-import.sh
@@ -85,6 +85,36 @@ test_expect_success 'A: create pack from stdin' '
An annotated tag that annotates a blob.
EOF
+ tag to-be-deleted
+ from :3
+ data <<EOF
+ Another annotated tag that annotates a blob.
+ EOF
+
+ reset refs/tags/to-be-deleted
+ from 0000000000000000000000000000000000000000
+
+ tag nested
+ mark :6
+ from :4
+ data <<EOF
+ Tag of our lovely commit
+ EOF
+
+ reset refs/tags/nested
+ from 0000000000000000000000000000000000000000
+
+ tag nested
+ mark :7
+ from :6
+ data <<EOF
+ Tag of tag of our lovely commit
+ EOF
+
+ alias
+ mark :8
+ to :5
+
INPUT_END
git fast-import --export-marks=marks.out <input &&
git whatchanged master
@@ -157,12 +187,19 @@ test_expect_success 'A: verify tag/series-A-blob' '
test_cmp expect actual
'
+test_expect_success 'A: verify tag deletion is successful' '
+ test_must_fail git rev-parse --verify refs/tags/to-be-deleted
+'
+
test_expect_success 'A: verify marks output' '
cat >expect <<-EOF &&
:2 $(git rev-parse --verify master:file2)
:3 $(git rev-parse --verify master:file3)
:4 $(git rev-parse --verify master:file4)
:5 $(git rev-parse --verify master^0)
+ :6 $(git cat-file tag nested | grep object | cut -d" " -f 2)
+ :7 $(git rev-parse --verify nested)
+ :8 $(git rev-parse --verify master^0)
EOF
test_cmp expect marks.out
'
@@ -2781,7 +2818,6 @@ test_expect_success 'S: filemodify with garbage after mark must fail' '
COMMIT
M 100644 :403x hello.c
EOF
- cat err &&
test_i18ngrep "space after mark" err
'
@@ -2798,7 +2834,6 @@ test_expect_success 'S: filemodify with garbage after inline must fail' '
inline
BLOB
EOF
- cat err &&
test_i18ngrep "nvalid dataref" err
'
@@ -2812,7 +2847,6 @@ test_expect_success 'S: filemodify with garbage after sha1 must fail' '
COMMIT
M 100644 ${sha1}x hello.c
EOF
- cat err &&
test_i18ngrep "space after SHA1" err
'
@@ -2828,7 +2862,6 @@ test_expect_success 'S: notemodify with garbage after mark dataref must fail' '
COMMIT
N :202x :302
EOF
- cat err &&
test_i18ngrep "space after mark" err
'
@@ -2844,7 +2877,6 @@ test_expect_success 'S: notemodify with garbage after inline dataref must fail'
note blob
BLOB
EOF
- cat err &&
test_i18ngrep "nvalid dataref" err
'
@@ -2858,7 +2890,6 @@ test_expect_success 'S: notemodify with garbage after sha1 dataref must fail' '
COMMIT
N ${sha1}x :302
EOF
- cat err &&
test_i18ngrep "space after SHA1" err
'
@@ -2874,7 +2905,6 @@ test_expect_success 'S: notemodify with garbage after mark commit-ish must fail'
COMMIT
N :202 :302x
EOF
- cat err &&
test_i18ngrep "after mark" err
'
@@ -2908,7 +2938,6 @@ test_expect_success 'S: from with garbage after mark must fail' '
EOF
# now evaluate the error
- cat err &&
test_i18ngrep "after mark" err
'
@@ -2928,7 +2957,6 @@ test_expect_success 'S: merge with garbage after mark must fail' '
merge :303x
M 100644 :403 hello.c
EOF
- cat err &&
test_i18ngrep "after mark" err
'
@@ -2944,7 +2972,6 @@ test_expect_success 'S: tag with garbage after mark must fail' '
tag S
TAG
EOF
- cat err &&
test_i18ngrep "after mark" err
'
@@ -2955,7 +2982,6 @@ test_expect_success 'S: cat-blob with garbage after mark must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
cat-blob :403x
EOF
- cat err &&
test_i18ngrep "after mark" err
'
@@ -2966,7 +2992,6 @@ test_expect_success 'S: ls with garbage after mark must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
ls :302x hello.c
EOF
- cat err &&
test_i18ngrep "space after mark" err
'
@@ -2975,7 +3000,6 @@ test_expect_success 'S: ls with garbage after sha1 must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
ls ${sha1}x hello.c
EOF
- cat err &&
test_i18ngrep "space after tree-ish" err
'
diff --git a/t/t9350-fast-export.sh b/t/t9350-fast-export.sh
index b4004e0..2e4e214 100755
--- a/t/t9350-fast-export.sh
+++ b/t/t9350-fast-export.sh
@@ -53,6 +53,33 @@ test_expect_success 'fast-export | fast-import' '
'
+test_expect_success 'fast-export ^muss^{commit} muss' '
+ git fast-export --tag-of-filtered-object=rewrite ^muss^{commit} muss >actual &&
+ cat >expected <<-EOF &&
+ tag muss
+ from $(git rev-parse --verify muss^{commit})
+ $(git cat-file tag muss | grep tagger)
+ data 9
+ valentin
+
+ EOF
+ test_cmp expected actual
+'
+
+test_expect_success 'fast-export --mark-tags ^muss^{commit} muss' '
+ git fast-export --mark-tags --tag-of-filtered-object=rewrite ^muss^{commit} muss >actual &&
+ cat >expected <<-EOF &&
+ tag muss
+ mark :1
+ from $(git rev-parse --verify muss^{commit})
+ $(git cat-file tag muss | grep tagger)
+ data 9
+ valentin
+
+ EOF
+ test_cmp expected actual
+'
+
test_expect_success 'fast-export master~2..master' '
git fast-export master~2..master >actual &&
@@ -513,10 +540,41 @@ test_expect_success 'tree_tag' '
'
# NEEDSWORK: not just check return status, but validate the output
+# Note that these tests DO NOTHING other than print a warning that
+# they are ommitting the one tag we asked them to export (because the
+# tags resolve to a tree). They exist just to make sure we do not
+# abort but instead just warn.
test_expect_success 'tree_tag-obj' 'git fast-export tree_tag-obj'
test_expect_success 'tag-obj_tag' 'git fast-export tag-obj_tag'
test_expect_success 'tag-obj_tag-obj' 'git fast-export tag-obj_tag-obj'
+test_expect_success 'handling tags of blobs' '
+ git tag -a -m "Tag of a blob" blobtag $(git rev-parse master:file) &&
+ git fast-export blobtag >actual &&
+ cat >expect <<-EOF &&
+ blob
+ mark :1
+ data 9
+ die Luft
+
+ tag blobtag
+ from :1
+ tagger $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data 14
+ Tag of a blob
+
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'handling nested tags' '
+ git tag -a -m "This is a nested tag" nested muss &&
+ git fast-export --mark-tags nested >output &&
+ grep "^from $ZERO_OID$" output &&
+ grep "^tag nested$" output >tag_lines &&
+ test_line_count = 2 tag_lines
+'
+
test_expect_success 'directory becomes symlink' '
git init dirtosymlink &&
git init result &&
@@ -567,17 +625,15 @@ test_expect_success 'fast-export quotes pathnames' '
'
test_expect_success 'test bidirectionality' '
- >marks-cur &&
- >marks-new &&
git init marks-test &&
- git fast-export --export-marks=marks-cur --import-marks=marks-cur --branches | \
- git --git-dir=marks-test/.git fast-import --export-marks=marks-new --import-marks=marks-new &&
+ git fast-export --export-marks=marks-cur --import-marks-if-exists=marks-cur --branches | \
+ git --git-dir=marks-test/.git fast-import --export-marks=marks-new --import-marks-if-exists=marks-new &&
(cd marks-test &&
git reset --hard &&
echo Wohlauf > file &&
git commit -a -m "back in time") &&
- git --git-dir=marks-test/.git fast-export --export-marks=marks-new --import-marks=marks-new --branches | \
- git fast-import --export-marks=marks-cur --import-marks=marks-cur
+ git --git-dir=marks-test/.git fast-export --export-marks=marks-new --import-marks-if-exists=marks-new --branches | \
+ git fast-import --export-marks=marks-cur --import-marks-if-exists=marks-cur
'
cat > expected << EOF
diff --git a/t/test-lib-functions.sh b/t/test-lib-functions.sh
index 87bf3a2..b299ecc 100644
--- a/t/test-lib-functions.sh
+++ b/t/test-lib-functions.sh
@@ -228,9 +228,11 @@ test_commit () {
# can be a tag pointing to the commit-to-merge.
test_merge () {
+ label="$1" &&
+ shift &&
test_tick &&
- git merge -m "$1" "$2" &&
- git tag "$1"
+ git merge -m "$label" "$@" &&
+ git tag "$label"
}
# Efficiently create <nr> commits, each with a unique number (from 1 to <nr>
diff --git a/t/test-lib.sh b/t/test-lib.sh
index ee602c4..e06fa02 100644
--- a/t/test-lib.sh
+++ b/t/test-lib.sh
@@ -572,6 +572,7 @@ export TERM
error () {
say_color error "error: $*"
+ finalize_junit_xml
GIT_EXIT_OK=t
exit 1
}
@@ -700,7 +701,7 @@ test_failure_ () {
say_color error "not ok $test_count - $1"
shift
printf '%s\n' "$*" | sed -e 's/^/# /'
- test "$immediate" = "" || { GIT_EXIT_OK=t; exit 1; }
+ test "$immediate" = "" || { finalize_junit_xml; GIT_EXIT_OK=t; exit 1; }
}
test_known_broken_ok_ () {
@@ -1068,6 +1069,25 @@ write_junit_xml_testcase () {
junit_have_testcase=t
}
+finalize_junit_xml () {
+ if test -n "$write_junit_xml" && test -n "$junit_xml_path"
+ then
+ test -n "$junit_have_testcase" || {
+ junit_start=$(test-tool date getnanos)
+ write_junit_xml_testcase "all tests skipped"
+ }
+
+ # adjust the overall time
+ junit_time=$(test-tool date getnanos $junit_suite_start)
+ sed "s/<testsuite [^>]*/& time=\"$junit_time\"/" \
+ <"$junit_xml_path" >"$junit_xml_path.new"
+ mv "$junit_xml_path.new" "$junit_xml_path"
+
+ write_junit_xml " </testsuite>" "</testsuites>"
+ write_junit_xml=
+ fi
+}
+
test_atexit_cleanup=:
test_atexit_handler () {
# In a succeeding test script 'test_atexit_handler' is invoked
@@ -1090,21 +1110,7 @@ test_done () {
# removed, so the commands can access pidfiles and socket files.
test_atexit_handler
- if test -n "$write_junit_xml" && test -n "$junit_xml_path"
- then
- test -n "$junit_have_testcase" || {
- junit_start=$(test-tool date getnanos)
- write_junit_xml_testcase "all tests skipped"
- }
-
- # adjust the overall time
- junit_time=$(test-tool date getnanos $junit_suite_start)
- sed "s/<testsuite [^>]*/& time=\"$junit_time\"/" \
- <"$junit_xml_path" >"$junit_xml_path.new"
- mv "$junit_xml_path.new" "$junit_xml_path"
-
- write_junit_xml " </testsuite>" "</testsuites>"
- fi
+ finalize_junit_xml
if test -z "$HARNESS_ACTIVE"
then
diff --git a/trace2/tr2_dst.c b/trace2/tr2_dst.c
index c698575..ae052a0 100644
--- a/trace2/tr2_dst.c
+++ b/trace2/tr2_dst.c
@@ -8,6 +8,19 @@
*/
#define MAX_AUTO_ATTEMPTS 10
+/*
+ * Sentinel file used to detect when we should discard new traces to avoid
+ * writing too many trace files to a directory.
+ */
+#define DISCARD_SENTINEL_NAME "git-trace2-discard"
+
+/*
+ * When set to zero, disables directory file count checks. Otherwise, controls
+ * how many files we can write to a directory before entering discard mode.
+ * This can be overridden via the TR2_SYSENV_MAX_FILES setting.
+ */
+static int tr2env_max_files = 0;
+
static int tr2_dst_want_warning(void)
{
static int tr2env_dst_debug = -1;
@@ -32,9 +45,75 @@ void tr2_dst_trace_disable(struct tr2_dst *dst)
dst->need_close = 0;
}
+/*
+ * Check to make sure we're not overloading the target directory with too many
+ * files. First get the threshold (if present) from the config or envvar. If
+ * it's zero or unset, disable this check. Next check for the presence of a
+ * sentinel file, then check file count.
+ *
+ * Returns 0 if tracing should proceed as normal. Returns 1 if the sentinel file
+ * already exists, which means tracing should be disabled. Returns -1 if there
+ * are too many files but there was no sentinel file, which means we have
+ * created and should write traces to the sentinel file.
+ *
+ * We expect that some trace processing system is gradually collecting files
+ * from the target directory; after it removes the sentinel file we'll start
+ * writing traces again.
+ */
+static int tr2_dst_too_many_files(struct tr2_dst *dst, const char *tgt_prefix)
+{
+ int file_count = 0, max_files = 0, ret = 0;
+ const char *max_files_var;
+ DIR *dirp;
+ struct strbuf path = STRBUF_INIT, sentinel_path = STRBUF_INIT;
+ struct stat statbuf;
+
+ /* Get the config or envvar and decide if we should continue this check */
+ max_files_var = tr2_sysenv_get(TR2_SYSENV_MAX_FILES);
+ if (max_files_var && *max_files_var && ((max_files = atoi(max_files_var)) >= 0))
+ tr2env_max_files = max_files;
+
+ if (!tr2env_max_files) {
+ ret = 0;
+ goto cleanup;
+ }
+
+ strbuf_addstr(&path, tgt_prefix);
+ if (!is_dir_sep(path.buf[path.len - 1])) {
+ strbuf_addch(&path, '/');
+ }
+
+ /* check sentinel */
+ strbuf_addbuf(&sentinel_path, &path);
+ strbuf_addstr(&sentinel_path, DISCARD_SENTINEL_NAME);
+ if (!stat(sentinel_path.buf, &statbuf)) {
+ ret = 1;
+ goto cleanup;
+ }
+
+ /* check file count */
+ dirp = opendir(path.buf);
+ while (file_count < tr2env_max_files && dirp && readdir(dirp))
+ file_count++;
+ if (dirp)
+ closedir(dirp);
+
+ if (file_count >= tr2env_max_files) {
+ dst->too_many_files = 1;
+ dst->fd = open(sentinel_path.buf, O_WRONLY | O_CREAT | O_EXCL, 0666);
+ ret = -1;
+ goto cleanup;
+ }
+
+cleanup:
+ strbuf_release(&path);
+ strbuf_release(&sentinel_path);
+ return ret;
+}
+
static int tr2_dst_try_auto_path(struct tr2_dst *dst, const char *tgt_prefix)
{
- int fd;
+ int too_many_files;
const char *last_slash, *sid = tr2_sid_get();
struct strbuf path = STRBUF_INIT;
size_t base_path_len;
@@ -50,18 +129,29 @@ static int tr2_dst_try_auto_path(struct tr2_dst *dst, const char *tgt_prefix)
strbuf_addstr(&path, sid);
base_path_len = path.len;
- for (attempt_count = 0; attempt_count < MAX_AUTO_ATTEMPTS; attempt_count++) {
- if (attempt_count > 0) {
- strbuf_setlen(&path, base_path_len);
- strbuf_addf(&path, ".%d", attempt_count);
+ too_many_files = tr2_dst_too_many_files(dst, tgt_prefix);
+ if (!too_many_files) {
+ for (attempt_count = 0; attempt_count < MAX_AUTO_ATTEMPTS; attempt_count++) {
+ if (attempt_count > 0) {
+ strbuf_setlen(&path, base_path_len);
+ strbuf_addf(&path, ".%d", attempt_count);
+ }
+
+ dst->fd = open(path.buf, O_WRONLY | O_CREAT | O_EXCL, 0666);
+ if (dst->fd != -1)
+ break;
}
-
- fd = open(path.buf, O_WRONLY | O_CREAT | O_EXCL, 0666);
- if (fd != -1)
- break;
+ } else if (too_many_files == 1) {
+ strbuf_release(&path);
+ if (tr2_dst_want_warning())
+ warning("trace2: not opening %s trace file due to too "
+ "many files in target directory %s",
+ tr2_sysenv_display_name(dst->sysenv_var),
+ tgt_prefix);
+ return 0;
}
- if (fd == -1) {
+ if (dst->fd == -1) {
if (tr2_dst_want_warning())
warning("trace2: could not open '%.*s' for '%s' tracing: %s",
(int) base_path_len, path.buf,
@@ -75,7 +165,6 @@ static int tr2_dst_try_auto_path(struct tr2_dst *dst, const char *tgt_prefix)
strbuf_release(&path);
- dst->fd = fd;
dst->need_close = 1;
dst->initialized = 1;
diff --git a/trace2/tr2_dst.h b/trace2/tr2_dst.h
index 3adf3ba..b1a8c14 100644
--- a/trace2/tr2_dst.h
+++ b/trace2/tr2_dst.h
@@ -9,6 +9,7 @@ struct tr2_dst {
int fd;
unsigned int initialized : 1;
unsigned int need_close : 1;
+ unsigned int too_many_files : 1;
};
/*
diff --git a/trace2/tr2_sysenv.c b/trace2/tr2_sysenv.c
index 5958cfc..3c3792e 100644
--- a/trace2/tr2_sysenv.c
+++ b/trace2/tr2_sysenv.c
@@ -49,6 +49,9 @@ static struct tr2_sysenv_entry tr2_sysenv_settings[] = {
"trace2.perftarget" },
[TR2_SYSENV_PERF_BRIEF] = { "GIT_TRACE2_PERF_BRIEF",
"trace2.perfbrief" },
+
+ [TR2_SYSENV_MAX_FILES] = { "GIT_TRACE2_MAX_FILES",
+ "trace2.maxfiles" },
};
/* clang-format on */
diff --git a/trace2/tr2_sysenv.h b/trace2/tr2_sysenv.h
index 8dd82a7..d4364a7 100644
--- a/trace2/tr2_sysenv.h
+++ b/trace2/tr2_sysenv.h
@@ -24,6 +24,8 @@ enum tr2_sysenv_variable {
TR2_SYSENV_PERF,
TR2_SYSENV_PERF_BRIEF,
+ TR2_SYSENV_MAX_FILES,
+
TR2_SYSENV_MUST_BE_LAST
};
diff --git a/trace2/tr2_tgt_event.c b/trace2/tr2_tgt_event.c
index 9bcac20..6353e8a 100644
--- a/trace2/tr2_tgt_event.c
+++ b/trace2/tr2_tgt_event.c
@@ -10,16 +10,17 @@
#include "trace2/tr2_tgt.h"
#include "trace2/tr2_tls.h"
-static struct tr2_dst tr2dst_event = { TR2_SYSENV_EVENT, 0, 0, 0 };
+static struct tr2_dst tr2dst_event = { TR2_SYSENV_EVENT, 0, 0, 0, 0 };
/*
- * The version number of the JSON data generated by the EVENT target
- * in this source file. Update this if you make a significant change
- * to the JSON fields or message structure. You probably do not need
- * to update this if you just add another call to one of the existing
- * TRACE2 API methods.
+ * The version number of the JSON data generated by the EVENT target in this
+ * source file. The version should be incremented if new event types are added,
+ * if existing fields are removed, or if there are significant changes in
+ * interpretation of existing events or fields. Smaller changes, such as adding
+ * a new field to an existing event, do not require an increment to the EVENT
+ * format version.
*/
-#define TR2_EVENT_VERSION "1"
+#define TR2_EVENT_VERSION "2"
/*
* Region nesting limit for messages written to the event target.
@@ -107,6 +108,19 @@ static void event_fmt_prepare(const char *event_name, const char *file,
jw_object_intmax(jw, "repo", repo->trace2_repo_id);
}
+static void fn_too_many_files_fl(const char *file, int line)
+{
+ const char *event_name = "too_many_files";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
static void fn_version_fl(const char *file, int line)
{
const char *event_name = "version";
@@ -120,6 +134,9 @@ static void fn_version_fl(const char *file, int line)
tr2_dst_write_line(&tr2dst_event, &jw.json);
jw_release(&jw);
+
+ if (tr2dst_event.too_many_files)
+ fn_too_many_files_fl(file, line);
}
static void fn_start_fl(const char *file, int line,
diff --git a/trace2/tr2_tgt_normal.c b/trace2/tr2_tgt_normal.c
index 438ed05..31b602c 100644
--- a/trace2/tr2_tgt_normal.c
+++ b/trace2/tr2_tgt_normal.c
@@ -9,7 +9,7 @@
#include "trace2/tr2_tgt.h"
#include "trace2/tr2_tls.h"
-static struct tr2_dst tr2dst_normal = { TR2_SYSENV_NORMAL, 0, 0, 0 };
+static struct tr2_dst tr2dst_normal = { TR2_SYSENV_NORMAL, 0, 0, 0, 0 };
/*
* Use the TR2_SYSENV_NORMAL_BRIEF setting to omit the "<time> <file>:<line>"
diff --git a/trace2/tr2_tgt_perf.c b/trace2/tr2_tgt_perf.c
index fd979db..ffac802 100644
--- a/trace2/tr2_tgt_perf.c
+++ b/trace2/tr2_tgt_perf.c
@@ -11,7 +11,7 @@
#include "trace2/tr2_tgt.h"
#include "trace2/tr2_tls.h"
-static struct tr2_dst tr2dst_perf = { TR2_SYSENV_PERF, 0, 0, 0 };
+static struct tr2_dst tr2dst_perf = { TR2_SYSENV_PERF, 0, 0, 0, 0 };
/*
* Use TR2_SYSENV_PERF_BRIEF to omit the "<time> <file>:<line>"
diff --git a/unpack-trees.c b/unpack-trees.c
index f0f56d4..33ea781 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -11,7 +11,6 @@
#include "refs.h"
#include "attr.h"
#include "split-index.h"
-#include "dir.h"
#include "submodule.h"
#include "submodule-config.h"
#include "fsmonitor.h"
diff --git a/upload-pack.c b/upload-pack.c
index 875db92..a00d7ec 100644
--- a/upload-pack.c
+++ b/upload-pack.c
@@ -721,7 +721,7 @@ static void deepen_by_rev_list(struct packet_writer *writer, int ac,
{
struct commit_list *result;
- close_commit_graph(the_repository->objects);
+ disable_commit_graph(the_repository);
result = get_shallow_commits_by_rev_list(ac, av, SHALLOW, NOT_SHALLOW);
send_shallow(writer, result);
free_commit_list(result);
diff --git a/wrapper.c b/wrapper.c
index c55d772..e1eaef2 100644
--- a/wrapper.c
+++ b/wrapper.c
@@ -441,7 +441,9 @@ int git_mkstemps_mode(char *pattern, int suffix_len, int mode)
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789";
- static const int num_letters = 62;
+ static const int num_letters = ARRAY_SIZE(letters) - 1;
+ static const char x_pattern[] = "XXXXXX";
+ static const int num_x = ARRAY_SIZE(x_pattern) - 1;
uint64_t value;
struct timeval tv;
char *filename_template;
@@ -450,12 +452,12 @@ int git_mkstemps_mode(char *pattern, int suffix_len, int mode)
len = strlen(pattern);
- if (len < 6 + suffix_len) {
+ if (len < num_x + suffix_len) {
errno = EINVAL;
return -1;
}
- if (strncmp(&pattern[len - 6 - suffix_len], "XXXXXX", 6)) {
+ if (strncmp(&pattern[len - num_x - suffix_len], x_pattern, num_x)) {
errno = EINVAL;
return -1;
}
@@ -466,16 +468,15 @@ int git_mkstemps_mode(char *pattern, int suffix_len, int mode)
*/
gettimeofday(&tv, NULL);
value = ((uint64_t)tv.tv_usec << 16) ^ tv.tv_sec ^ getpid();
- filename_template = &pattern[len - 6 - suffix_len];
+ filename_template = &pattern[len - num_x - suffix_len];
for (count = 0; count < TMP_MAX; ++count) {
uint64_t v = value;
+ int i;
/* Fill in the random bits. */
- filename_template[0] = letters[v % num_letters]; v /= num_letters;
- filename_template[1] = letters[v % num_letters]; v /= num_letters;
- filename_template[2] = letters[v % num_letters]; v /= num_letters;
- filename_template[3] = letters[v % num_letters]; v /= num_letters;
- filename_template[4] = letters[v % num_letters]; v /= num_letters;
- filename_template[5] = letters[v % num_letters]; v /= num_letters;
+ for (i = 0; i < num_x; i++) {
+ filename_template[i] = letters[v % num_letters];
+ v /= num_letters;
+ }
fd = open(pattern, O_CREAT | O_EXCL | O_RDWR, mode);
if (fd >= 0)
diff --git a/wt-status.c b/wt-status.c
index ad6282c..cc6f945 100644
--- a/wt-status.c
+++ b/wt-status.c
@@ -2029,7 +2029,7 @@ static void wt_porcelain_v2_print_tracking(struct wt_status *s)
char eol = s->null_termination ? '\0' : '\n';
fprintf(s->fp, "# branch.oid %s%c",
- (s->is_initial ? "(initial)" : sha1_to_hex(s->sha1_commit)),
+ (s->is_initial ? "(initial)" : oid_to_hex(&s->oid_commit)),
eol);
if (!s->branch)
diff --git a/wt-status.h b/wt-status.h
index 77dad5b..71c3f25 100644
--- a/wt-status.h
+++ b/wt-status.h
@@ -116,7 +116,7 @@ struct wt_status {
int rename_limit;
enum wt_status_format status_format;
struct wt_status_state state;
- unsigned char sha1_commit[GIT_MAX_RAWSZ]; /* when not Initial */
+ struct object_id oid_commit; /* when not Initial */
/* These are computed during processing of the individual sections */
int committable;
diff --git a/xdiff/xdiffi.c b/xdiff/xdiffi.c
index 1f1f4a3..bd03513 100644
--- a/xdiff/xdiffi.c
+++ b/xdiff/xdiffi.c
@@ -38,9 +38,9 @@ typedef struct s_xdpsplit {
* Basically considers a "box" (off1, off2, lim1, lim2) and scan from both
* the forward diagonal starting from (off1, off2) and the backward diagonal
* starting from (lim1, lim2). If the K values on the same diagonal crosses
- * returns the furthest point of reach. We might end up having to expensive
- * cases using this algorithm is full, so a little bit of heuristic is needed
- * to cut the search and to return a suboptimal point.
+ * returns the furthest point of reach. We might encounter expensive edge cases
+ * using this algorithm, so a little bit of heuristic is needed to cut the
+ * search and to return a suboptimal point.
*/
static long xdl_split(unsigned long const *ha1, long off1, long lim1,
unsigned long const *ha2, long off2, long lim2,
@@ -63,11 +63,13 @@ static long xdl_split(unsigned long const *ha1, long off1, long lim1,
int got_snake = 0;
/*
- * We need to extent the diagonal "domain" by one. If the next
+ * We need to extend the diagonal "domain" by one. If the next
* values exits the box boundaries we need to change it in the
- * opposite direction because (max - min) must be a power of two.
+ * opposite direction because (max - min) must be a power of
+ * two.
+ *
* Also we initialize the external K value to -1 so that we can
- * avoid extra conditions check inside the core loop.
+ * avoid extra conditions in the check inside the core loop.
*/
if (fmin > dmin)
kvdf[--fmin - 1] = -1;
@@ -98,11 +100,13 @@ static long xdl_split(unsigned long const *ha1, long off1, long lim1,
}
/*
- * We need to extent the diagonal "domain" by one. If the next
+ * We need to extend the diagonal "domain" by one. If the next
* values exits the box boundaries we need to change it in the
- * opposite direction because (max - min) must be a power of two.
+ * opposite direction because (max - min) must be a power of
+ * two.
+ *
* Also we initialize the external K value to -1 so that we can
- * avoid extra conditions check inside the core loop.
+ * avoid extra conditions in the check inside the core loop.
*/
if (bmin > dmin)
kvdb[--bmin - 1] = XDL_LINE_MAX;
@@ -138,7 +142,7 @@ static long xdl_split(unsigned long const *ha1, long off1, long lim1,
/*
* If the edit cost is above the heuristic trigger and if
* we got a good snake, we sample current diagonals to see
- * if some of the, have reached an "interesting" path. Our
+ * if some of them have reached an "interesting" path. Our
* measure is a function of the distance from the diagonal
* corner (i1 + i2) penalized with the distance from the
* mid diagonal itself. If this value is above the current
@@ -196,8 +200,9 @@ static long xdl_split(unsigned long const *ha1, long off1, long lim1,
}
/*
- * Enough is enough. We spent too much time here and now we collect
- * the furthest reaching path using the (i1 + i2) measure.
+ * Enough is enough. We spent too much time here and now we
+ * collect the furthest reaching path using the (i1 + i2)
+ * measure.
*/
if (ec >= xenv->mxcost) {
long fbest, fbest1, bbest, bbest1;
@@ -244,9 +249,9 @@ static long xdl_split(unsigned long const *ha1, long off1, long lim1,
/*
- * Rule: "Divide et Impera". Recursively split the box in sub-boxes by calling
- * the box splitting function. Note that the real job (marking changed lines)
- * is done in the two boundary reaching checks.
+ * Rule: "Divide et Impera" (divide & conquer). Recursively split the box in
+ * sub-boxes by calling the box splitting function. Note that the real job
+ * (marking changed lines) is done in the two boundary reaching checks.
*/
int xdl_recs_cmp(diffdata_t *dd1, long off1, long lim1,
diffdata_t *dd2, long off2, long lim2,
@@ -323,7 +328,9 @@ int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
}
/*
- * Allocate and setup K vectors to be used by the differential algorithm.
+ * Allocate and setup K vectors to be used by the differential
+ * algorithm.
+ *
* One is to store the forward path and one to store the backward path.
*/
ndiags = xe->xdf1.nreff + xe->xdf2.nreff + 3;
@@ -394,8 +401,8 @@ static int recs_match(xrecord_t *rec1, xrecord_t *rec2, long flags)
/*
* If a line is indented more than this, get_indent() just returns this value.
* This avoids having to do absurd amounts of work for data that are not
- * human-readable text, and also ensures that the output of get_indent fits within
- * an int.
+ * human-readable text, and also ensures that the output of get_indent fits
+ * within an int.
*/
#define MAX_INDENT 200
@@ -429,9 +436,9 @@ static int get_indent(xrecord_t *rec)
}
/*
- * If more than this number of consecutive blank rows are found, just return this
- * value. This avoids requiring O(N^2) work for pathological cases, and also
- * ensures that the output of score_split fits in an int.
+ * If more than this number of consecutive blank rows are found, just return
+ * this value. This avoids requiring O(N^2) work for pathological cases, and
+ * also ensures that the output of score_split fits in an int.
*/
#define MAX_BLANKS 20
@@ -443,8 +450,8 @@ struct split_measurement {
int end_of_file;
/*
- * How much is the line immediately following the split indented (or -1 if
- * the line is blank):
+ * How much is the line immediately following the split indented (or -1
+ * if the line is blank):
*/
int indent;
@@ -454,8 +461,8 @@ struct split_measurement {
int pre_blank;
/*
- * How much is the nearest non-blank line above the split indented (or -1
- * if there is no such line)?
+ * How much is the nearest non-blank line above the split indented (or
+ * -1 if there is no such line)?
*/
int pre_indent;
@@ -581,13 +588,13 @@ static void measure_split(const xdfile_t *xdf, long split,
/*
* Compute a badness score for the hypothetical split whose measurements are
- * stored in m. The weight factors were determined empirically using the tools and
- * corpus described in
+ * stored in m. The weight factors were determined empirically using the tools
+ * and corpus described in
*
* https://github.com/mhagger/diff-slider-tools
*
- * Also see that project if you want to improve the weights based on, for example,
- * a larger or more diverse corpus.
+ * Also see that project if you want to improve the weights based on, for
+ * example, a larger or more diverse corpus.
*/
static void score_add_split(const struct split_measurement *m, struct split_score *s)
{
@@ -809,13 +816,16 @@ int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, long flags) {
group_init(xdfo, &go);
while (1) {
- /* If the group is empty in the to-be-compacted file, skip it: */
+ /*
+ * If the group is empty in the to-be-compacted file, skip it:
+ */
if (g.end == g.start)
goto next;
/*
* Now shift the change up and then down as far as possible in
- * each direction. If it bumps into any other changes, merge them.
+ * each direction. If it bumps into any other changes, merge
+ * them.
*/
do {
groupsize = g.end - g.start;
@@ -858,17 +868,17 @@ int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, long flags) {
* If the group can be shifted, then we can possibly use this
* freedom to produce a more intuitive diff.
*
- * The group is currently shifted as far down as possible, so the
- * heuristics below only have to handle upwards shifts.
+ * The group is currently shifted as far down as possible, so
+ * the heuristics below only have to handle upwards shifts.
*/
if (g.end == earliest_end) {
/* no shifting was possible */
} else if (end_matching_other != -1) {
/*
- * Move the possibly merged group of changes back to line
- * up with the last group of changes from the other file
- * that it can align with.
+ * Move the possibly merged group of changes back to
+ * line up with the last group of changes from the
+ * other file that it can align with.
*/
while (go.end == go.start) {
if (group_slide_up(xdf, &g, flags))
@@ -879,14 +889,15 @@ int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, long flags) {
} else if (flags & XDF_INDENT_HEURISTIC) {
/*
* Indent heuristic: a group of pure add/delete lines
- * implies two splits, one between the end of the "before"
- * context and the start of the group, and another between
- * the end of the group and the beginning of the "after"
- * context. Some splits are aesthetically better and some
- * are worse. We compute a badness "score" for each split,
- * and add the scores for the two splits to define a
- * "score" for each position that the group can be shifted
- * to. Then we pick the shift with the lowest score.
+ * implies two splits, one between the end of the
+ * "before" context and the start of the group, and
+ * another between the end of the group and the
+ * beginning of the "after" context. Some splits are
+ * aesthetically better and some are worse. We compute
+ * a badness "score" for each split, and add the scores
+ * for the two splits to define a "score" for each
+ * position that the group can be shifted to. Then we
+ * pick the shift with the lowest score.
*/
long shift, best_shift = -1;
struct split_score best_score;