summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format2
-rw-r--r--.gitignore1
-rw-r--r--Documentation/Makefile1
-rw-r--r--Documentation/RelNotes/2.16.2.txt30
-rw-r--r--Documentation/RelNotes/2.17.0.txt77
-rw-r--r--Documentation/config.txt8
-rw-r--r--Documentation/git-commit-graph.txt94
-rw-r--r--Documentation/git-pack-objects.txt11
-rw-r--r--Documentation/git-show.txt4
-rw-r--r--Documentation/git-submodule.txt16
-rw-r--r--Documentation/git.txt10
-rw-r--r--Documentation/gitattributes.txt54
-rw-r--r--Documentation/gitignore.txt11
-rw-r--r--Documentation/gitremote-helpers.txt7
-rw-r--r--Documentation/gitsubmodules.txt100
-rw-r--r--Documentation/rev-list-options.txt11
-rw-r--r--Documentation/technical/api-object-access.txt2
-rw-r--r--Documentation/technical/commit-graph-format.txt97
-rw-r--r--Documentation/technical/commit-graph.txt163
-rw-r--r--Documentation/technical/long-running-process-protocol.txt50
-rw-r--r--Documentation/technical/pack-protocol.txt8
-rw-r--r--Documentation/technical/protocol-capabilities.txt8
-rw-r--r--Documentation/technical/repository-version.txt12
-rw-r--r--INSTALL17
-rw-r--r--Makefile87
-rw-r--r--alloc.c1
-rw-r--r--apply.c18
-rw-r--r--blame.c2
-rw-r--r--builtin.h1
-rw-r--r--builtin/am.c6
-rw-r--r--builtin/cat-file.c2
-rw-r--r--builtin/checkout.c3
-rw-r--r--builtin/clone.c22
-rw-r--r--builtin/commit-graph.c171
-rw-r--r--builtin/commit-tree.c4
-rw-r--r--builtin/commit.c296
-rw-r--r--builtin/describe.c2
-rw-r--r--builtin/fetch-pack.c18
-rw-r--r--builtin/fetch.c83
-rw-r--r--builtin/fsck.c34
-rw-r--r--builtin/gc.c3
-rw-r--r--builtin/hash-object.c3
-rw-r--r--builtin/index-pack.c222
-rw-r--r--builtin/merge.c8
-rw-r--r--builtin/mktag.c6
-rw-r--r--builtin/mktree.c10
-rw-r--r--builtin/notes.c8
-rw-r--r--builtin/pack-objects.c101
-rw-r--r--builtin/prune.c7
-rw-r--r--builtin/pull.c2
-rw-r--r--builtin/rebase--helper.c2
-rw-r--r--builtin/receive-pack.c11
-rw-r--r--builtin/repack.c8
-rw-r--r--builtin/replace.c4
-rw-r--r--builtin/reset.c28
-rw-r--r--builtin/rev-list.c73
-rw-r--r--builtin/rev-parse.c4
-rw-r--r--builtin/revert.c4
-rw-r--r--builtin/submodule--helper.c340
-rw-r--r--builtin/tag.c2
-rw-r--r--builtin/unpack-objects.c29
-rw-r--r--builtin/worktree.c2
-rw-r--r--bulk-checkin.c28
-rw-r--r--cache-tree.c23
-rw-r--r--cache.h89
-rwxr-xr-xci/lib-travisci.sh7
-rwxr-xr-xci/run-linux32-build.sh51
-rwxr-xr-xci/run-linux32-docker.sh7
-rwxr-xr-xci/run-tests.sh7
-rw-r--r--combine-diff.c2
-rw-r--r--command-list.txt1
-rw-r--r--commit-graph.c738
-rw-r--r--commit-graph.h46
-rw-r--r--commit.c24
-rw-r--r--commit.h14
-rw-r--r--config.c17
-rw-r--r--connected.c2
-rw-r--r--contrib/coccinelle/strbuf.cocci20
-rw-r--r--contrib/completion/git-completion.bash4
-rwxr-xr-xcontrib/examples/git-difftool.perl2
-rw-r--r--convert.c44
-rw-r--r--convert.h17
-rw-r--r--csum-file.c54
-rw-r--r--csum-file.h43
-rw-r--r--daemon.c15
-rw-r--r--diff-lib.c4
-rw-r--r--diff.c9
-rw-r--r--diffcore-rename.c12
-rw-r--r--dir.c110
-rw-r--r--dir.h12
-rw-r--r--environment.c5
-rw-r--r--fast-import.c70
-rw-r--r--fetch-object.c45
-rw-r--r--fetch-object.h11
-rw-r--r--fetch-pack.c65
-rw-r--r--fetch-pack.h10
-rwxr-xr-xgit-send-email.perl2
-rwxr-xr-xgit-submodule.sh112
-rwxr-xr-xgit-svn.perl5
-rw-r--r--git.c1
-rw-r--r--hash.h40
-rw-r--r--hashmap.h3
-rw-r--r--http-push.c4
-rw-r--r--http-walker.c6
-rw-r--r--http.c98
-rw-r--r--list-objects-filter-options.c92
-rw-r--r--list-objects-filter-options.h18
-rw-r--r--list-objects.c29
-rw-r--r--log-tree.c5
-rw-r--r--mailinfo.c10
-rw-r--r--match-trees.c46
-rw-r--r--merge-recursive.c26
-rw-r--r--mru.c50
-rw-r--r--mru.h45
-rw-r--r--name-hash.c3
-rw-r--r--notes-cache.c8
-rw-r--r--notes-merge.c9
-rw-r--r--notes-utils.c9
-rw-r--r--notes-utils.h3
-rw-r--r--notes.c63
-rw-r--r--notes.h29
-rw-r--r--object.c2
-rw-r--r--pack-bitmap-write.c30
-rw-r--r--pack-check.c32
-rw-r--r--pack-revindex.c6
-rw-r--r--pack-write.c80
-rw-r--r--pack.h4
-rw-r--r--packfile.c131
-rw-r--r--packfile.h15
-rw-r--r--parse-options.c2
-rw-r--r--perl/.gitignore9
-rw-r--r--perl/Git.pm2
-rw-r--r--perl/Git/Error.pm46
-rw-r--r--perl/Git/FromCPAN/Error.pm (renamed from perl/private-Error.pm)0
-rw-r--r--perl/Git/I18N.pm2
-rw-r--r--perl/Makefile90
-rw-r--r--perl/Makefile.PL62
-rw-r--r--preload-index.c2
-rw-r--r--quote.c30
-rw-r--r--quote.h10
-rw-r--r--read-cache.c141
-rw-r--r--refs/files-backend.c3
-rw-r--r--refs/packed-backend.c106
-rw-r--r--refs/ref-cache.c6
-rw-r--r--remote-curl.c20
-rw-r--r--replace_object.c6
-rw-r--r--repository.c2
-rw-r--r--rerere.c4
-rw-r--r--revision.c46
-rw-r--r--revision.h6
-rw-r--r--run-command.c88
-rw-r--r--sequencer.c538
-rw-r--r--sequencer.h25
-rw-r--r--setup.c7
-rw-r--r--sha1-lookup.c28
-rw-r--r--sha1-lookup.h22
-rw-r--r--sha1_file.c237
-rw-r--r--sha1dc_git.h6
-rw-r--r--split-index.c2
-rw-r--r--sub-process.h4
-rw-r--r--t/helper/test-dump-untracked-cache.c4
-rw-r--r--t/helper/test-run-command.c9
-rw-r--r--t/helper/test-wildmatch.c2
-rw-r--r--t/lib-git-daemon.sh41
-rwxr-xr-xt/perf/aggregate.perl50
-rwxr-xr-xt/t0061-run-command.sh37
-rwxr-xr-xt/t0410-partial-clone.sh343
-rwxr-xr-xt/t1700-split-index.sh38
-rwxr-xr-xt/t3070-wildmatch.sh655
-rwxr-xr-xt/t3404-rebase-interactive.sh14
-rwxr-xr-xt/t3501-revert-cherry-pick.sh7
-rwxr-xr-xt/t3512-cherry-pick-submodule.sh1
-rwxr-xr-xt/t3513-revert-submodule.sh1
-rwxr-xr-xt/t5318-commit-graph.sh224
-rwxr-xr-xt/t5500-fetch-pack.sh63
-rwxr-xr-xt/t5551-http-fetch-smart.sh33
-rwxr-xr-xt/t5570-git-daemon.sh37
-rwxr-xr-xt/t5601-clone.sh101
-rwxr-xr-xt/t5616-partial-clone.sh146
-rwxr-xr-xt/t6015-rev-list-show-all-parents.sh31
-rwxr-xr-xt/t7505-prepare-commit-msg-hook.sh134
-rw-r--r--t/t7505/expected-rebase-i17
-rw-r--r--t/t7505/expected-rebase-p18
-rwxr-xr-xt/t7607-merge-overwrite.sh5
-rwxr-xr-xt/t9001-send-email.sh17
-rw-r--r--t/test-lib-functions.sh34
-rw-r--r--t/test-lib.sh6
-rw-r--r--trace.c9
-rw-r--r--transport-helper.c5
-rw-r--r--transport.c12
-rw-r--r--transport.h16
-rw-r--r--unpack-trees.c22
-rw-r--r--upload-pack.c31
-rw-r--r--wrap-for-bin.sh2
194 files changed, 6588 insertions, 2174 deletions
diff --git a/.clang-format b/.clang-format
index 611ab47..12a89f9 100644
--- a/.clang-format
+++ b/.clang-format
@@ -163,7 +163,7 @@ PenaltyBreakComment: 10
PenaltyBreakFirstLessLess: 0
PenaltyBreakString: 10
PenaltyExcessCharacter: 100
-PenaltyReturnTypeOnItsOwnLine: 5
+PenaltyReturnTypeOnItsOwnLine: 60
# Don't sort #include's
SortIncludes: false
diff --git a/.gitignore b/.gitignore
index 833ef3b..e82f901 100644
--- a/.gitignore
+++ b/.gitignore
@@ -34,6 +34,7 @@
/git-clone
/git-column
/git-commit
+/git-commit-graph
/git-commit-tree
/git-config
/git-count-objects
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 4ae9ba5..6232143 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -72,6 +72,7 @@ TECH_DOCS += SubmittingPatches
TECH_DOCS += technical/hash-function-transition
TECH_DOCS += technical/http-protocol
TECH_DOCS += technical/index-format
+TECH_DOCS += technical/long-running-process-protocol
TECH_DOCS += technical/pack-format
TECH_DOCS += technical/pack-heuristics
TECH_DOCS += technical/pack-protocol
diff --git a/Documentation/RelNotes/2.16.2.txt b/Documentation/RelNotes/2.16.2.txt
new file mode 100644
index 0000000..a216466
--- /dev/null
+++ b/Documentation/RelNotes/2.16.2.txt
@@ -0,0 +1,30 @@
+Git v2.16.2 Release Notes
+=========================
+
+Fixes since v2.16.1
+-------------------
+
+ * An old regression in "git describe --all $annotated_tag^0" has been
+ fixed.
+
+ * "git svn dcommit" did not take into account the fact that a
+ svn+ssh:// URL with a username@ (typically used for pushing) refers
+ to the same SVN repository without the username@ and failed when
+ svn.pushmergeinfo option is set.
+
+ * "git merge -Xours/-Xtheirs" learned to use our/their version when
+ resolving a conflicting updates to a symbolic link.
+
+ * "git clone $there $here" is allowed even when here directory exists
+ as long as it is an empty directory, but the command incorrectly
+ removed it upon a failure of the operation.
+
+ * "git stash -- <pathspec>" incorrectly blew away untracked files in
+ the directory that matched the pathspec, which has been corrected.
+
+ * "git add -p" was taught to ignore local changes to submodules as
+ they do not interfere with the partial addition of regular changes
+ anyway.
+
+
+Also contains various documentation updates and code clean-ups.
diff --git a/Documentation/RelNotes/2.17.0.txt b/Documentation/RelNotes/2.17.0.txt
index 759e75f..e1e509b 100644
--- a/Documentation/RelNotes/2.17.0.txt
+++ b/Documentation/RelNotes/2.17.0.txt
@@ -19,6 +19,46 @@ Performance, Internal Implementation, Development Support etc.
* "perf" test output can be sent to codespeed server.
(merge 19cf57a92e cc/codespeed later to maint).
+ * The build procedure for perl/ part has been greatly simplified by
+ weaning ourselves off of MakeMaker.
+
+ * In preparation for implementing narrow/partial clone, the machinery
+ for checking object connectivity used by gc and fsck has been
+ taught that a missing object is OK when it is referenced by a
+ packfile specially marked as coming from trusted repository that
+ promises to make them available on-demand and lazily.
+
+ * The machinery to clone & fetch, which in turn involves packing and
+ unpacking objects, has been told how to omit certain objects using
+ the filtering mechanism introduced by another topic. It now knows
+ to mark the resulting pack as a promisor pack to tolerate missing
+ objects, laying foundation for "narrow" clones.
+
+ * The first step to getting rid of mru API and using the
+ doubly-linked list API directly instead.
+
+ * Retire mru API as it does not give enough abstraction over
+ underlying list API to be worth it.
+
+ * Rewrite two more "git submodule" subcommands in C.
+
+ * The tracing machinery learned to report tweaking of environment
+ variables as well.
+ (merge 090a09272a nd/trace-with-env later to maint).
+
+ * Update Coccinelle rules to catch and optimize strbuf_addf(&buf, "%s", str)
+ (merge cd9a4b6d93 rs/strbuf-cocci-workaround later to maint).
+
+ * Prevent "clang-format" from breaking line after function return type.
+ (merge a3715d43e8 po/clang-format-functype-weight later to maint).
+
+ * The sequencer infrastructure is shared across "git cherry-pick",
+ "git rebase -i", etc., and has always spawned "git commit" when it
+ needs to create a commit. It has been taught to do so internally,
+ when able, by reusing the codepath "git commit" itself uses, which
+ gives performance boost for a few tens of percents in some sample
+ scenarios.
+
Also contains various documentation updates and code clean-ups.
@@ -77,8 +117,45 @@ Fixes since v2.16
anyway.
(merge 12434efc1d nd/add-i-ignore-submodules later to maint).
+ * Avoid showing a warning message in the middle of a line of "git
+ diff" output.
+ (merge 4e056c989f nd/diff-flush-before-warning later to maint).
+
+ * The http tracing code, often used to debug connection issues,
+ learned to redact potentially sensitive information from its output
+ so that it can be more safely sharable.
+ (merge 8ba18e6fa4 jt/http-redact-cookies later to maint).
+
+ * Crash fix for a corner case where an error codepath tried to unlock
+ what it did not acquire lock on.
+ (merge 81fcb698e0 mr/packed-ref-store-fix later to maint).
+
+ * The split-index mode had a few corner case bugs fixed.
+ (merge ae59a4e44f tg/split-index-fixes later to maint).
+
+ * Assorted fixes to "git daemon".
+ (merge ed15e58efe jk/daemon-fixes later to maint).
+
+ * Completion of "git merge -s<strategy>" (in contrib/) did not work
+ well in non-C locale.
+ (merge 7cc763aaa3 nd/list-merge-strategy later to maint).
+
+ * Workaround for segfault with more recent versions of SVN.
+ (merge 7f6f75e97a ew/svn-branch-segfault-fix later to maint).
+
* Other minor doc, test and build updates and code cleanups.
(merge e2a5a028c7 bw/oidmap-autoinit later to maint).
(merge f0a6068a9f ys/bisect-object-id-missing-conversion-fix later to maint).
(merge 30221a3389 as/read-tree-prefix-doc-fix later to maint).
(merge 9bd2ce5432 ab/doc-cat-file-e-still-shows-errors later to maint).
+ (merge ec3b4b06f8 cl/t9001-cleanup later to maint).
+ (merge e1b3f3dd38 ks/submodule-doc-updates later to maint).
+ (merge fbac558a9b rs/describe-unique-abbrev later to maint).
+ (merge 8462ff43e4 tb/crlf-conv-flags later to maint).
+ (merge 7d68bb0766 rb/hashmap-h-compilation-fix later to maint).
+ (merge 3449847168 cc/sha1-file-name later to maint).
+ (merge ad622a256f ds/use-get-be64 later to maint).
+ (merge f919ffebed sg/cocci-move-array later to maint).
+ (merge 4e801463c7 jc/mailinfo-cleanup-fix later to maint).
+ (merge ef5b3a6c5e nd/shared-index-fix later to maint).
+ (merge 9f5258cbb8 tz/doc-show-defaults-to-head later to maint).
diff --git a/Documentation/config.txt b/Documentation/config.txt
index 0e25b2c..11f0271 100644
--- a/Documentation/config.txt
+++ b/Documentation/config.txt
@@ -898,6 +898,10 @@ core.notesRef::
This setting defaults to "refs/notes/commits", and it can be overridden by
the `GIT_NOTES_REF` environment variable. See linkgit:git-notes[1].
+core.commitGraph::
+ Enable git commit graph feature. Allows reading from the
+ commit-graph file.
+
core.sparseCheckout::
Enable "sparse checkout" feature. See section "Sparse checkout" in
linkgit:git-read-tree[1] for more information.
@@ -3343,6 +3347,10 @@ uploadpack.packObjectsHook::
was run. I.e., `upload-pack` will feed input intended for
`pack-objects` to the hook, and expects a completed packfile on
stdout.
+
+uploadpack.allowFilter::
+ If this option is set, `upload-pack` will advertise partial
+ clone and partial fetch object filtering.
+
Note that this configuration variable is ignored if it is seen in the
repository-level config (this is a safety measure against fetching from
diff --git a/Documentation/git-commit-graph.txt b/Documentation/git-commit-graph.txt
new file mode 100644
index 0000000..4c97b55
--- /dev/null
+++ b/Documentation/git-commit-graph.txt
@@ -0,0 +1,94 @@
+git-commit-graph(1)
+===================
+
+NAME
+----
+git-commit-graph - Write and verify Git commit graph files
+
+
+SYNOPSIS
+--------
+[verse]
+'git commit-graph read' [--object-dir <dir>]
+'git commit-graph write' <options> [--object-dir <dir>]
+
+
+DESCRIPTION
+-----------
+
+Manage the serialized commit graph file.
+
+
+OPTIONS
+-------
+--object-dir::
+ Use given directory for the location of packfiles and commit graph
+ file. This parameter exists to specify the location of an alternate
+ that only has the objects directory, not a full .git directory. The
+ commit graph file is expected to be at <dir>/info/commit-graph and
+ the packfiles are expected to be in <dir>/pack.
+
+
+COMMANDS
+--------
+'write'::
+
+Write a commit graph file based on the commits found in packfiles.
++
+With the `--stdin-packs` option, generate the new commit graph by
+walking objects only in the specified pack-indexes. (Cannot be combined
+with --stdin-commits.)
++
+With the `--stdin-commits` option, generate the new commit graph by
+walking commits starting at the commits specified in stdin as a list
+of OIDs in hex, one OID per line. (Cannot be combined with
+--stdin-packs.)
++
+With the `--append` option, include all commits that are present in the
+existing commit-graph file.
+
+'read'::
+
+Read a graph file given by the commit-graph file and output basic
+details about the graph file. Used for debugging purposes.
+
+
+EXAMPLES
+--------
+
+* Write a commit graph file for the packed commits in your local .git folder.
++
+------------------------------------------------
+$ git commit-graph write
+------------------------------------------------
+
+* Write a graph file, extending the current graph file using commits
+* in <pack-index>.
++
+------------------------------------------------
+$ echo <pack-index> | git commit-graph write --stdin-packs
+------------------------------------------------
+
+* Write a graph file containing all reachable commits.
++
+------------------------------------------------
+$ git show-ref -s | git commit-graph write --stdin-commits
+------------------------------------------------
+
+* Write a graph file containing all commits in the current
+* commit-graph file along with those reachable from HEAD.
++
+------------------------------------------------
+$ git rev-parse HEAD | git commit-graph write --stdin-commits --append
+------------------------------------------------
+
+* Read basic information from the commit-graph file.
++
+------------------------------------------------
+$ git commit-graph read
+------------------------------------------------
+
+
+GIT
+---
+Part of the linkgit:git[1] suite
diff --git a/Documentation/git-pack-objects.txt b/Documentation/git-pack-objects.txt
index aa403d0..81bc490 100644
--- a/Documentation/git-pack-objects.txt
+++ b/Documentation/git-pack-objects.txt
@@ -255,6 +255,17 @@ a missing object is encountered. This is the default action.
The form '--missing=allow-any' will allow object traversal to continue
if a missing object is encountered. Missing objects will silently be
omitted from the results.
++
+The form '--missing=allow-promisor' is like 'allow-any', but will only
+allow object traversal to continue for EXPECTED promisor missing objects.
+Unexpected missing object will raise an error.
+
+--exclude-promisor-objects::
+ Omit objects that are known to be in the promisor remote. (This
+ option has the purpose of operating only on locally created objects,
+ so that when we repack, we still maintain a distinction between
+ locally created objects [without .promisor] and objects from the
+ promisor remote [with .promisor].) This is used with partial clone.
SEE ALSO
--------
diff --git a/Documentation/git-show.txt b/Documentation/git-show.txt
index 82a4125..e73ef54 100644
--- a/Documentation/git-show.txt
+++ b/Documentation/git-show.txt
@@ -9,7 +9,7 @@ git-show - Show various types of objects
SYNOPSIS
--------
[verse]
-'git show' [options] <object>...
+'git show' [options] [<object>...]
DESCRIPTION
-----------
@@ -35,7 +35,7 @@ This manual page describes only the most frequently used options.
OPTIONS
-------
<object>...::
- The names of objects to show.
+ The names of objects to show (defaults to 'HEAD').
For a more complete list of ways to spell object names, see
"SPECIFYING REVISIONS" section in linkgit:gitrevisions[7].
diff --git a/Documentation/git-submodule.txt b/Documentation/git-submodule.txt
index ff61200..71c5618 100644
--- a/Documentation/git-submodule.txt
+++ b/Documentation/git-submodule.txt
@@ -70,8 +70,8 @@ status [--cached] [--recursive] [--] [<path>...]::
Show the status of the submodules. This will print the SHA-1 of the
currently checked out commit for each submodule, along with the
submodule path and the output of 'git describe' for the
- SHA-1. Each SHA-1 will be prefixed with `-` if the submodule is not
- initialized, `+` if the currently checked out submodule commit
+ SHA-1. Each SHA-1 will possibly be prefixed with `-` if the submodule is
+ not initialized, `+` if the currently checked out submodule commit
does not match the SHA-1 found in the index of the containing
repository and `U` if the submodule has merge conflicts.
+
@@ -132,15 +132,15 @@ expects by cloning missing submodules and updating the working tree of
the submodules. The "updating" can be done in several ways depending
on command line options and the value of `submodule.<name>.update`
configuration variable. The command line option takes precedence over
-the configuration variable. if neither is given, a checkout is performed.
-update procedures supported both from the command line as well as setting
-`submodule.<name>.update`:
+the configuration variable. If neither is given, a 'checkout' is performed.
+The 'update' procedures supported both from the command line as well as
+through the `submodule.<name>.update` configuration are:
checkout;; the commit recorded in the superproject will be
checked out in the submodule on a detached HEAD.
+
If `--force` is specified, the submodule will be checked out (using
-`git checkout --force` if appropriate), even if the commit specified
+`git checkout --force`), even if the commit specified
in the index of the containing repository already matches the commit
checked out in the submodule.
@@ -150,8 +150,8 @@ checked out in the submodule.
merge;; the commit recorded in the superproject will be merged
into the current branch in the submodule.
-The following procedures are only available via the `submodule.<name>.update`
-configuration variable:
+The following 'update' procedures are only available via the
+`submodule.<name>.update` configuration variable:
custom command;; arbitrary shell command that takes a single
argument (the sha1 of the commit recorded in the
diff --git a/Documentation/git.txt b/Documentation/git.txt
index 3f4161a..8163b57 100644
--- a/Documentation/git.txt
+++ b/Documentation/git.txt
@@ -646,6 +646,16 @@ of clones and fetches.
variable.
See `GIT_TRACE` for available trace output options.
+`GIT_TRACE_CURL_NO_DATA`::
+ When a curl trace is enabled (see `GIT_TRACE_CURL` above), do not dump
+ data (that is, only dump info lines and headers).
+
+`GIT_REDACT_COOKIES`::
+ This can be set to a comma-separated list of strings. When a curl trace
+ is enabled (see `GIT_TRACE_CURL` above), whenever a "Cookies:" header
+ sent by the client is dumped, values of cookies whose key is in that
+ list (case-sensitive) are redacted.
+
`GIT_LITERAL_PATHSPECS`::
Setting this variable to `1` will cause Git to treat all
pathspecs literally, rather than as glob patterns. For example,
diff --git a/Documentation/gitattributes.txt b/Documentation/gitattributes.txt
index 30687de..c21f5ca 100644
--- a/Documentation/gitattributes.txt
+++ b/Documentation/gitattributes.txt
@@ -392,46 +392,14 @@ Long Running Filter Process
If the filter command (a string value) is defined via
`filter.<driver>.process` then Git can process all blobs with a
single filter invocation for the entire life of a single Git
-command. This is achieved by using a packet format (pkt-line,
-see technical/protocol-common.txt) based protocol over standard
-input and standard output as follows. All packets, except for the
-"*CONTENT" packets and the "0000" flush packet, are considered
-text and therefore are terminated by a LF.
-
-Git starts the filter when it encounters the first file
-that needs to be cleaned or smudged. After the filter started
-Git sends a welcome message ("git-filter-client"), a list of supported
-protocol version numbers, and a flush packet. Git expects to read a welcome
-response message ("git-filter-server"), exactly one protocol version number
-from the previously sent list, and a flush packet. All further
-communication will be based on the selected version. The remaining
-protocol description below documents "version=2". Please note that
-"version=42" in the example below does not exist and is only there
-to illustrate how the protocol would look like with more than one
-version.
-
-After the version negotiation Git sends a list of all capabilities that
-it supports and a flush packet. Git expects to read a list of desired
-capabilities, which must be a subset of the supported capabilities list,
-and a flush packet as response:
-------------------------
-packet: git> git-filter-client
-packet: git> version=2
-packet: git> version=42
-packet: git> 0000
-packet: git< git-filter-server
-packet: git< version=2
-packet: git< 0000
-packet: git> capability=clean
-packet: git> capability=smudge
-packet: git> capability=not-yet-invented
-packet: git> 0000
-packet: git< capability=clean
-packet: git< capability=smudge
-packet: git< 0000
-------------------------
-Supported filter capabilities in version 2 are "clean", "smudge",
-and "delay".
+command. This is achieved by using the long-running process protocol
+(described in technical/long-running-process-protocol.txt).
+
+When Git encounters the first file that needs to be cleaned or smudged,
+it starts the filter and performs the handshake. In the handshake, the
+welcome message sent by Git is "git-filter-client", only version 2 is
+suppported, and the supported capabilities are "clean", "smudge", and
+"delay".
Afterwards Git sends a list of "key=value" pairs terminated with
a flush packet. The list will contain at least the filter command
@@ -517,12 +485,6 @@ the protocol then Git will stop the filter process and restart it
with the next file that needs to be processed. Depending on the
`filter.<driver>.required` flag Git will interpret that as error.
-After the filter has processed a command it is expected to wait for
-a "key=value" list containing the next command. Git will close
-the command pipe on exit. The filter is expected to detect EOF
-and exit gracefully on its own. Git will wait until the filter
-process has stopped.
-
Delay
^^^^^
diff --git a/Documentation/gitignore.txt b/Documentation/gitignore.txt
index 63260f0..ff5d7f9 100644
--- a/Documentation/gitignore.txt
+++ b/Documentation/gitignore.txt
@@ -102,12 +102,11 @@ PATTERN FORMAT
(relative to the toplevel of the work tree if not from a
`.gitignore` file).
- - Otherwise, Git treats the pattern as a shell glob suitable
- for consumption by fnmatch(3) with the FNM_PATHNAME flag:
- wildcards in the pattern will not match a / in the pathname.
- For example, "Documentation/{asterisk}.html" matches
- "Documentation/git.html" but not "Documentation/ppc/ppc.html"
- or "tools/perf/Documentation/perf.html".
+ - Otherwise, Git treats the pattern as a shell glob: "`*`" matches
+ anything except "`/`", "`?`" matches any one character except "`/`"
+ and "`[]`" matches one character in a selected range. See
+ fnmatch(3) and the FNM_PATHNAME flag for a more detailed
+ description.
- A leading slash matches the beginning of the pathname.
For example, "/{asterisk}.c" matches "cat-file.c" but not
diff --git a/Documentation/gitremote-helpers.txt b/Documentation/gitremote-helpers.txt
index 4a584f3..4b8c93e 100644
--- a/Documentation/gitremote-helpers.txt
+++ b/Documentation/gitremote-helpers.txt
@@ -466,6 +466,13 @@ set by Git if the remote helper has the 'option' capability.
Transmit <string> as a push option. As the push option
must not contain LF or NUL characters, the string is not encoded.
+'option from-promisor' {'true'|'false'}::
+ Indicate that these objects are being fetched from a promisor.
+
+'option no-dependents' {'true'|'false'}::
+ Indicate that only the objects wanted need to be fetched, not
+ their dependents.
+
SEE ALSO
--------
linkgit:git-remote[1]
diff --git a/Documentation/gitsubmodules.txt b/Documentation/gitsubmodules.txt
index 46cf120..4d6c177 100644
--- a/Documentation/gitsubmodules.txt
+++ b/Documentation/gitsubmodules.txt
@@ -36,8 +36,8 @@ The `gitlink` entry contains the object name of the commit that the
superproject expects the submodule’s working directory to be at.
The section `submodule.foo.*` in the `.gitmodules` file gives additional
-hints to Gits porcelain layer such as where to obtain the submodule via
-the `submodule.foo.url` setting.
+hints to Git's porcelain layer. For example, the `submodule.foo.url`
+setting specifies where to obtain the submodule.
Submodules can be used for at least two different use cases:
@@ -51,18 +51,21 @@ Submodules can be used for at least two different use cases:
2. Splitting a (logically single) project into multiple
repositories and tying them back together. This can be used to
- overcome current limitations of Gits implementation to have
+ overcome current limitations of Git's implementation to have
finer grained access:
- * Size of the git repository:
+ * Size of the Git repository:
In its current form Git scales up poorly for large repositories containing
content that is not compressed by delta computation between trees.
- However you can also use submodules to e.g. hold large binary assets
- and these repositories are then shallowly cloned such that you do not
+ For example, you can use submodules to hold large binary assets
+ and these repositories can be shallowly cloned such that you do not
have a large history locally.
* Transfer size:
In its current form Git requires the whole working tree present. It
does not allow partial trees to be transferred in fetch or clone.
+ If the project you work on consists of multiple repositories tied
+ together as submodules in a superproject, you can avoid fetching the
+ working trees of the repositories you are not interested in.
* Access control:
By restricting user access to submodules, this can be used to implement
read/write policies for different users.
@@ -73,9 +76,10 @@ The configuration of submodules
Submodule operations can be configured using the following mechanisms
(from highest to lowest precedence):
- * The command line for those commands that support taking submodule specs.
- Most commands have a boolean flag '--recurse-submodules' whether to
- recurse into submodules. Examples are `ls-files` or `checkout`.
+ * The command line for those commands that support taking submodules
+ as part of their pathspecs. Most commands have a boolean flag
+ `--recurse-submodules` which specify whether to recurse into submodules.
+ Examples are `grep` and `checkout`.
Some commands take enums, such as `fetch` and `push`, where you can
specify how submodules are affected.
@@ -87,8 +91,8 @@ Submodule operations can be configured using the following mechanisms
For example an effect from the submodule's `.gitignore` file
would be observed when you run `git status --ignore-submodules=none` in
the superproject. This collects information from the submodule's working
-directory by running `status` in the submodule, which does pay attention
-to its `.gitignore` file.
+directory by running `status` in the submodule while paying attention
+to the `.gitignore` file of the submodule.
+
The submodule's `$GIT_DIR/config` file would come into play when running
`git push --recurse-submodules=check` in the superproject, as this would
@@ -97,20 +101,20 @@ remotes are configured in the submodule as usual in the `$GIT_DIR/config`
file.
* The configuration file `$GIT_DIR/config` in the superproject.
- Typical configuration at this place is controlling if a submodule
- is recursed into at all via the `active` flag for example.
+ Git only recurses into active submodules (see "ACTIVE SUBMODULES"
+ section below).
+
If the submodule is not yet initialized, then the configuration
-inside the submodule does not exist yet, so configuration where to
+inside the submodule does not exist yet, so where to
obtain the submodule from is configured here for example.
- * the `.gitmodules` file inside the superproject. Additionally to the
- required mapping between submodule's name and path, a project usually
+ * The `.gitmodules` file inside the superproject. A project usually
uses this file to suggest defaults for the upstream collection
- of repositories.
+ of repositories for the mapping that is required between a
+ submodule's name and its path.
+
-This file mainly serves as the mapping between name and path in
-the superproject, such that the submodule's git directory can be
+This file mainly serves as the mapping between the name and path of submodules
+in the superproject, such that the submodule's Git directory can be
located.
+
If the submodule has never been initialized, this is the only place
@@ -137,8 +141,8 @@ directory is automatically moved to `$GIT_DIR/modules/<name>/`
of the superproject.
* Deinitialized submodule: A `gitlink`, and a `.gitmodules` entry,
-but no submodule working directory. The submodule’s git directory
-may be there as after deinitializing the git directory is kept around.
+but no submodule working directory. The submodule’s Git directory
+may be there as after deinitializing the Git directory is kept around.
The directory which is supposed to be the working directory is empty instead.
+
A submodule can be deinitialized by running `git submodule deinit`.
@@ -160,6 +164,60 @@ from another repository.
To completely remove a submodule, manually delete
`$GIT_DIR/modules/<name>/`.
+ACTIVE SUBMODULES
+-----------------
+
+A submodule is considered active,
+
+ (a) if `submodule.<name>.active` is set to `true`
+ or
+ (b) if the submodule's path matches the pathspec in `submodule.active`
+ or
+ (c) if `submodule.<name>.url` is set.
+
+and these are evaluated in this order.
+
+For example:
+
+ [submodule "foo"]
+ active = false
+ url = https://example.org/foo
+ [submodule "bar"]
+ active = true
+ url = https://example.org/bar
+ [submodule "baz"]
+ url = https://example.org/baz
+
+In the above config only the submodule 'bar' and 'baz' are active,
+'bar' due to (a) and 'baz' due to (c). 'foo' is inactive because
+(a) takes precedence over (c)
+
+Note that (c) is a historical artefact and will be ignored if the
+(a) and (b) specify that the submodule is not active. In other words,
+if we have an `submodule.<name>.active` set to `false` or if the
+submodule's path is excluded in the pathspec in `submodule.active`, the
+url doesn't matter whether it is present or not. This is illustrated in
+the example that follows.
+
+ [submodule "foo"]
+ active = true
+ url = https://example.org/foo
+ [submodule "bar"]
+ url = https://example.org/bar
+ [submodule "baz"]
+ url = https://example.org/baz
+ [submodule "bob"]
+ ignore = true
+ [submodule]
+ active = b*
+ active = :(exclude) baz
+
+In here all submodules except 'baz' (foo, bar, bob) are active.
+'foo' due to its own active flag and all the others due to the
+submodule active pathspec, which specifies that any submodule
+starting with 'b' except 'baz' are also active, regardless of the
+presence of the .url field.
+
Workflow for a third party library
----------------------------------
diff --git a/Documentation/rev-list-options.txt b/Documentation/rev-list-options.txt
index 22f5c9b..7b27363 100644
--- a/Documentation/rev-list-options.txt
+++ b/Documentation/rev-list-options.txt
@@ -750,10 +750,21 @@ The form '--missing=allow-any' will allow object traversal to continue
if a missing object is encountered. Missing objects will silently be
omitted from the results.
+
+The form '--missing=allow-promisor' is like 'allow-any', but will only
+allow object traversal to continue for EXPECTED promisor missing objects.
+Unexpected missing objects will raise an error.
++
The form '--missing=print' is like 'allow-any', but will also print a
list of the missing objects. Object IDs are prefixed with a ``?'' character.
endif::git-rev-list[]
+--exclude-promisor-objects::
+ (For internal use only.) Prefilter object traversal at
+ promisor boundary. This is used with partial clone. This is
+ stronger than `--missing=allow-promisor` because it limits the
+ traversal, rather than just silencing errors about missing
+ objects.
+
--no-walk[=(sorted|unsorted)]::
Only show the given commits, but do not traverse their ancestors.
This has no effect if a range is specified. If the argument
diff --git a/Documentation/technical/api-object-access.txt b/Documentation/technical/api-object-access.txt
index 03bb0e9..a1162e5 100644
--- a/Documentation/technical/api-object-access.txt
+++ b/Documentation/technical/api-object-access.txt
@@ -7,7 +7,7 @@ Talk about <sha1_file.c> and <object.h> family, things like
* read_object_with_reference()
* has_sha1_file()
* write_sha1_file()
-* pretend_sha1_file()
+* pretend_object_file()
* lookup_{object,commit,tag,blob,tree}
* parse_{object,commit,tag,blob,tree}
* Use of object flags
diff --git a/Documentation/technical/commit-graph-format.txt b/Documentation/technical/commit-graph-format.txt
new file mode 100644
index 0000000..ad6af81
--- /dev/null
+++ b/Documentation/technical/commit-graph-format.txt
@@ -0,0 +1,97 @@
+Git commit graph format
+=======================
+
+The Git commit graph stores a list of commit OIDs and some associated
+metadata, including:
+
+- The generation number of the commit. Commits with no parents have
+ generation number 1; commits with parents have generation number
+ one more than the maximum generation number of its parents. We
+ reserve zero as special, and can be used to mark a generation
+ number invalid or as "not computed".
+
+- The root tree OID.
+
+- The commit date.
+
+- The parents of the commit, stored using positional references within
+ the graph file.
+
+These positional references are stored as unsigned 32-bit integers
+corresponding to the array position withing the list of commit OIDs. We
+use the most-significant bit for special purposes, so we can store at most
+(1 << 31) - 1 (around 2 billion) commits.
+
+== Commit graph files have the following format:
+
+In order to allow extensions that add extra data to the graph, we organize
+the body into "chunks" and provide a binary lookup table at the beginning
+of the body. The header includes certain values, such as number of chunks
+and hash type.
+
+All 4-byte numbers are in network order.
+
+HEADER:
+
+ 4-byte signature:
+ The signature is: {'C', 'G', 'P', 'H'}
+
+ 1-byte version number:
+ Currently, the only valid version is 1.
+
+ 1-byte Hash Version (1 = SHA-1)
+ We infer the hash length (H) from this value.
+
+ 1-byte number (C) of "chunks"
+
+ 1-byte (reserved for later use)
+ Current clients should ignore this value.
+
+CHUNK LOOKUP:
+
+ (C + 1) * 12 bytes listing the table of contents for the chunks:
+ First 4 bytes describe the chunk id. Value 0 is a terminating label.
+ Other 8 bytes provide the byte-offset in current file for chunk to
+ start. (Chunks are ordered contiguously in the file, so you can infer
+ the length using the next chunk position if necessary.) Each chunk
+ ID appears at most once.
+
+ The remaining data in the body is described one chunk at a time, and
+ these chunks may be given in any order. Chunks are required unless
+ otherwise specified.
+
+CHUNK DATA:
+
+ OID Fanout (ID: {'O', 'I', 'D', 'F'}) (256 * 4 bytes)
+ The ith entry, F[i], stores the number of OIDs with first
+ byte at most i. Thus F[255] stores the total
+ number of commits (N).
+
+ OID Lookup (ID: {'O', 'I', 'D', 'L'}) (N * H bytes)
+ The OIDs for all commits in the graph, sorted in ascending order.
+
+ Commit Data (ID: {'C', 'G', 'E', 'T' }) (N * (H + 16) bytes)
+ * The first H bytes are for the OID of the root tree.
+ * The next 8 bytes are for the positions of the first two parents
+ of the ith commit. Stores value 0xffffffff if no parent in that
+ position. If there are more than two parents, the second value
+ has its most-significant bit on and the other bits store an array
+ position into the Large Edge List chunk.
+ * The next 8 bytes store the generation number of the commit and
+ the commit time in seconds since EPOCH. The generation number
+ uses the higher 30 bits of the first 4 bytes, while the commit
+ time uses the 32 bits of the second 4 bytes, along with the lowest
+ 2 bits of the lowest byte, storing the 33rd and 34th bit of the
+ commit time.
+
+ Large Edge List (ID: {'E', 'D', 'G', 'E'}) [Optional]
+ This list of 4-byte values store the second through nth parents for
+ all octopus merges. The second parent value in the commit data stores
+ an array position within this list along with the most-significant bit
+ on. Starting at that array position, iterate through this list of commit
+ positions for the parents until reaching a value with the most-significant
+ bit on. The other bits correspond to the position of the last parent.
+
+TRAILER:
+
+ H-byte HASH-checksum of all of the above.
diff --git a/Documentation/technical/commit-graph.txt b/Documentation/technical/commit-graph.txt
new file mode 100644
index 0000000..0550c6d
--- /dev/null
+++ b/Documentation/technical/commit-graph.txt
@@ -0,0 +1,163 @@
+Git Commit Graph Design Notes
+=============================
+
+Git walks the commit graph for many reasons, including:
+
+1. Listing and filtering commit history.
+2. Computing merge bases.
+
+These operations can become slow as the commit count grows. The merge
+base calculation shows up in many user-facing commands, such as 'merge-base'
+or 'status' and can take minutes to compute depending on history shape.
+
+There are two main costs here:
+
+1. Decompressing and parsing commits.
+2. Walking the entire graph to satisfy topological order constraints.
+
+The commit graph file is a supplemental data structure that accelerates
+commit graph walks. If a user downgrades or disables the 'core.commitGraph'
+config setting, then the existing ODB is sufficient. The file is stored
+as "commit-graph" either in the .git/objects/info directory or in the info
+directory of an alternate.
+
+The commit graph file stores the commit graph structure along with some
+extra metadata to speed up graph walks. By listing commit OIDs in lexi-
+cographic order, we can identify an integer position for each commit and
+refer to the parents of a commit using those integer positions. We use
+binary search to find initial commits and then use the integer positions
+for fast lookups during the walk.
+
+A consumer may load the following info for a commit from the graph:
+
+1. The commit OID.
+2. The list of parents, along with their integer position.
+3. The commit date.
+4. The root tree OID.
+5. The generation number (see definition below).
+
+Values 1-4 satisfy the requirements of parse_commit_gently().
+
+Define the "generation number" of a commit recursively as follows:
+
+ * A commit with no parents (a root commit) has generation number one.
+
+ * A commit with at least one parent has generation number one more than
+ the largest generation number among its parents.
+
+Equivalently, the generation number of a commit A is one more than the
+length of a longest path from A to a root commit. The recursive definition
+is easier to use for computation and observing the following property:
+
+ If A and B are commits with generation numbers N and M, respectively,
+ and N <= M, then A cannot reach B. That is, we know without searching
+ that B is not an ancestor of A because it is further from a root commit
+ than A.
+
+ Conversely, when checking if A is an ancestor of B, then we only need
+ to walk commits until all commits on the walk boundary have generation
+ number at most N. If we walk commits using a priority queue seeded by
+ generation numbers, then we always expand the boundary commit with highest
+ generation number and can easily detect the stopping condition.
+
+This property can be used to significantly reduce the time it takes to
+walk commits and determine topological relationships. Without generation
+numbers, the general heuristic is the following:
+
+ If A and B are commits with commit time X and Y, respectively, and
+ X < Y, then A _probably_ cannot reach B.
+
+This heuristic is currently used whenever the computation is allowed to
+violate topological relationships due to clock skew (such as "git log"
+with default order), but is not used when the topological order is
+required (such as merge base calculations, "git log --graph").
+
+In practice, we expect some commits to be created recently and not stored
+in the commit graph. We can treat these commits as having "infinite"
+generation number and walk until reaching commits with known generation
+number.
+
+Design Details
+--------------
+
+- The commit graph file is stored in a file named 'commit-graph' in the
+ .git/objects/info directory. This could be stored in the info directory
+ of an alternate.
+
+- The core.commitGraph config setting must be on to consume graph files.
+
+- The file format includes parameters for the object ID hash function,
+ so a future change of hash algorithm does not require a change in format.
+
+Future Work
+-----------
+
+- The commit graph feature currently does not honor commit grafts. This can
+ be remedied by duplicating or refactoring the current graft logic.
+
+- The 'commit-graph' subcommand does not have a "verify" mode that is
+ necessary for integration with fsck.
+
+- The file format includes room for precomputed generation numbers. These
+ are not currently computed, so all generation numbers will be marked as
+ 0 (or "uncomputed"). A later patch will include this calculation.
+
+- After computing and storing generation numbers, we must make graph
+ walks aware of generation numbers to gain the performance benefits they
+ enable. This will mostly be accomplished by swapping a commit-date-ordered
+ priority queue with one ordered by generation number. The following
+ operations are important candidates:
+
+ - paint_down_to_common()
+ - 'log --topo-order'
+
+- Currently, parse_commit_gently() requires filling in the root tree
+ object for a commit. This passes through lookup_tree() and consequently
+ lookup_object(). Also, it calls lookup_commit() when loading the parents.
+ These method calls check the ODB for object existence, even if the
+ consumer does not need the content. For example, we do not need the
+ tree contents when computing merge bases. Now that commit parsing is
+ removed from the computation time, these lookup operations are the
+ slowest operations keeping graph walks from being fast. Consider
+ loading these objects without verifying their existence in the ODB and
+ only loading them fully when consumers need them. Consider a method
+ such as "ensure_tree_loaded(commit)" that fully loads a tree before
+ using commit->tree.
+
+- The current design uses the 'commit-graph' subcommand to generate the graph.
+ When this feature stabilizes enough to recommend to most users, we should
+ add automatic graph writes to common operations that create many commits.
+ For example, one could compute a graph on 'clone', 'fetch', or 'repack'
+ commands.
+
+- A server could provide a commit graph file as part of the network protocol
+ to avoid extra calculations by clients. This feature is only of benefit if
+ the user is willing to trust the file, because verifying the file is correct
+ is as hard as computing it from scratch.
+
+Related Links
+-------------
+[0] https://bugs.chromium.org/p/git/issues/detail?id=8
+ Chromium work item for: Serialized Commit Graph
+
+[1] https://public-inbox.org/git/20110713070517.GC18566@sigill.intra.peff.net/
+ An abandoned patch that introduced generation numbers.
+
+[2] https://public-inbox.org/git/20170908033403.q7e6dj7benasrjes@sigill.intra.peff.net/
+ Discussion about generation numbers on commits and how they interact
+ with fsck.
+
+[3] https://public-inbox.org/git/20170908034739.4op3w4f2ma5s65ku@sigill.intra.peff.net/
+ More discussion about generation numbers and not storing them inside
+ commit objects. A valuable quote:
+
+ "I think we should be moving more in the direction of keeping
+ repo-local caches for optimizations. Reachability bitmaps have been
+ a big performance win. I think we should be doing the same with our
+ properties of commits. Not just generation numbers, but making it
+ cheap to access the graph structure without zlib-inflating whole
+ commit objects (i.e., packv4 or something like the "metapacks" I
+ proposed a few years ago)."
+
+[4] https://public-inbox.org/git/20180108154822.54829-1-git@jeffhostetler.com/T/#u
+ A patch to remove the ahead-behind calculation from 'status'.
diff --git a/Documentation/technical/long-running-process-protocol.txt b/Documentation/technical/long-running-process-protocol.txt
new file mode 100644
index 0000000..aa0aa9a
--- /dev/null
+++ b/Documentation/technical/long-running-process-protocol.txt
@@ -0,0 +1,50 @@
+Long-running process protocol
+=============================
+
+This protocol is used when Git needs to communicate with an external
+process throughout the entire life of a single Git command. All
+communication is in pkt-line format (see technical/protocol-common.txt)
+over standard input and standard output.
+
+Handshake
+---------
+
+Git starts by sending a welcome message (for example,
+"git-filter-client"), a list of supported protocol version numbers, and
+a flush packet. Git expects to read the welcome message with "server"
+instead of "client" (for example, "git-filter-server"), exactly one
+protocol version number from the previously sent list, and a flush
+packet. All further communication will be based on the selected version.
+The remaining protocol description below documents "version=2". Please
+note that "version=42" in the example below does not exist and is only
+there to illustrate how the protocol would look like with more than one
+version.
+
+After the version negotiation Git sends a list of all capabilities that
+it supports and a flush packet. Git expects to read a list of desired
+capabilities, which must be a subset of the supported capabilities list,
+and a flush packet as response:
+------------------------
+packet: git> git-filter-client
+packet: git> version=2
+packet: git> version=42
+packet: git> 0000
+packet: git< git-filter-server
+packet: git< version=2
+packet: git< 0000
+packet: git> capability=clean
+packet: git> capability=smudge
+packet: git> capability=not-yet-invented
+packet: git> 0000
+packet: git< capability=clean
+packet: git< capability=smudge
+packet: git< 0000
+------------------------
+
+Shutdown
+--------
+
+Git will close
+the command pipe on exit. The filter is expected to detect EOF
+and exit gracefully on its own. Git will wait until the filter
+process has stopped.
diff --git a/Documentation/technical/pack-protocol.txt b/Documentation/technical/pack-protocol.txt
index cd31edc..7fee6b7 100644
--- a/Documentation/technical/pack-protocol.txt
+++ b/Documentation/technical/pack-protocol.txt
@@ -241,6 +241,7 @@ out of what the server said it could do with the first 'want' line.
upload-request = want-list
*shallow-line
*1depth-request
+ [filter-request]
flush-pkt
want-list = first-want
@@ -256,6 +257,8 @@ out of what the server said it could do with the first 'want' line.
additional-want = PKT-LINE("want" SP obj-id)
depth = 1*DIGIT
+
+ filter-request = PKT-LINE("filter" SP filter-spec)
----
Clients MUST send all the obj-ids it wants from the reference
@@ -278,6 +281,11 @@ complete those commits. Commits whose parents are not received as a
result are defined as shallow and marked as such in the server. This
information is sent back to the client in the next step.
+The client can optionally request that pack-objects omit various
+objects from the packfile using one of several filtering techniques.
+These are intended for use with partial clone and partial fetch
+operations. See `rev-list` for possible "filter-spec" values.
+
Once all the 'want's and 'shallow's (and optional 'deepen') are
transferred, clients MUST send a flush-pkt, to tell the server side
that it is done sending the list.
diff --git a/Documentation/technical/protocol-capabilities.txt b/Documentation/technical/protocol-capabilities.txt
index 26dcc6f..332d209 100644
--- a/Documentation/technical/protocol-capabilities.txt
+++ b/Documentation/technical/protocol-capabilities.txt
@@ -309,3 +309,11 @@ to accept a signed push certificate, and asks the <nonce> to be
included in the push certificate. A send-pack client MUST NOT
send a push-cert packet unless the receive-pack server advertises
this capability.
+
+filter
+------
+
+If the upload-pack server advertises the 'filter' capability,
+fetch-pack may send "filter" commands to request a partial clone
+or partial fetch and request that the server omit various objects
+from the packfile.
diff --git a/Documentation/technical/repository-version.txt b/Documentation/technical/repository-version.txt
index 00ad379..e03eacc 100644
--- a/Documentation/technical/repository-version.txt
+++ b/Documentation/technical/repository-version.txt
@@ -86,3 +86,15 @@ for testing format-1 compatibility.
When the config key `extensions.preciousObjects` is set to `true`,
objects in the repository MUST NOT be deleted (e.g., by `git-prune` or
`git repack -d`).
+
+`partialclone`
+~~~~~~~~~~~~~~
+
+When the config key `extensions.partialclone` is set, it indicates
+that the repo was created with a partial clone (or later performed
+a partial fetch) and that the remote may have omitted sending
+certain unwanted objects. Such a remote is called a "promisor remote"
+and it promises that all such omitted objects can be fetched from it
+in the future.
+
+The value of this key is the name of the promisor remote.
diff --git a/INSTALL b/INSTALL
index ffb071e..808e07b 100644
--- a/INSTALL
+++ b/INSTALL
@@ -84,9 +84,24 @@ Issues of note:
GIT_EXEC_PATH=`pwd`
PATH=`pwd`:$PATH
- GITPERLLIB=`pwd`/perl/blib/lib
+ GITPERLLIB=`pwd`/perl/build/lib
export GIT_EXEC_PATH PATH GITPERLLIB
+ - By default (unless NO_PERL is provided) Git will ship various perl
+ scripts & libraries it needs. However, for simplicity it doesn't
+ use the ExtUtils::MakeMaker toolchain to decide where to place the
+ perl libraries. Depending on the system this can result in the perl
+ libraries not being where you'd like them if they're expected to be
+ used by things other than Git itself.
+
+ Manually supplying a perllibdir prefix should fix this, if this is
+ a problem you care about, e.g.:
+
+ prefix=/usr perllibdir=/usr/$(/usr/bin/perl -MConfig -wle 'print substr $Config{installsitelib}, 1 + length $Config{siteprefixexp}')
+
+ Will result in e.g. perllibdir=/usr/share/perl/5.26.1 on Debian,
+ perllibdir=/usr/share/perl5 (which we'd use by default) on CentOS.
+
- Git is reasonably self-sufficient, but does depend on a few external
programs and libraries. Git can be used without most of them by adding
the approriate "NO_<LIBRARY>=YesPlease" to the make command line or
diff --git a/Makefile b/Makefile
index 1a9b23b..bf91b2d 100644
--- a/Makefile
+++ b/Makefile
@@ -294,9 +294,6 @@ all::
#
# Define PERL_PATH to the path of your Perl binary (usually /usr/bin/perl).
#
-# Define NO_PERL_MAKEMAKER if you cannot use Makefiles generated by perl's
-# MakeMaker (e.g. using ActiveState under Cygwin).
-#
# Define NO_PERL if you do not want Perl scripts or libraries at all.
#
# Define PYTHON_PATH to the path of your Python binary (often /usr/bin/python
@@ -479,6 +476,7 @@ gitexecdir = libexec/git-core
mergetoolsdir = $(gitexecdir)/mergetools
sharedir = $(prefix)/share
gitwebdir = $(sharedir)/gitweb
+perllibdir = $(sharedir)/perl5
localedir = $(sharedir)/locale
template_dir = share/git-core/templates
htmldir = $(prefix)/share/doc/git-doc
@@ -492,7 +490,7 @@ mandir_relative = $(patsubst $(prefix)/%,%,$(mandir))
infodir_relative = $(patsubst $(prefix)/%,%,$(infodir))
htmldir_relative = $(patsubst $(prefix)/%,%,$(htmldir))
-export prefix bindir sharedir sysconfdir gitwebdir localedir
+export prefix bindir sharedir sysconfdir gitwebdir perllibdir localedir
CC = cc
AR = ar
@@ -773,6 +771,7 @@ LIB_OBJS += color.o
LIB_OBJS += column.o
LIB_OBJS += combine-diff.o
LIB_OBJS += commit.o
+LIB_OBJS += commit-graph.o
LIB_OBJS += compat/obstack.o
LIB_OBJS += compat/terminal.o
LIB_OBJS += config.o
@@ -804,6 +803,7 @@ LIB_OBJS += ewah/ewah_bitmap.o
LIB_OBJS += ewah/ewah_io.o
LIB_OBJS += ewah/ewah_rlw.o
LIB_OBJS += exec_cmd.o
+LIB_OBJS += fetch-object.o
LIB_OBJS += fetch-pack.o
LIB_OBJS += fsck.o
LIB_OBJS += fsmonitor.o
@@ -832,7 +832,6 @@ LIB_OBJS += merge.o
LIB_OBJS += merge-blobs.o
LIB_OBJS += merge-recursive.o
LIB_OBJS += mergesort.o
-LIB_OBJS += mru.o
LIB_OBJS += name-hash.o
LIB_OBJS += notes.o
LIB_OBJS += notes-cache.o
@@ -948,6 +947,7 @@ BUILTIN_OBJS += builtin/clone.o
BUILTIN_OBJS += builtin/column.o
BUILTIN_OBJS += builtin/commit-tree.o
BUILTIN_OBJS += builtin/commit.o
+BUILTIN_OBJS += builtin/commit-graph.o
BUILTIN_OBJS += builtin/config.o
BUILTIN_OBJS += builtin/count-objects.o
BUILTIN_OBJS += builtin/credential.o
@@ -1515,7 +1515,9 @@ else
LIB_OBJS += sha1dc_git.o
ifdef DC_SHA1_EXTERNAL
ifdef DC_SHA1_SUBMODULE
+ ifneq ($(DC_SHA1_SUBMODULE),auto)
$(error Only set DC_SHA1_EXTERNAL or DC_SHA1_SUBMODULE, not both)
+ endif
endif
BASIC_CFLAGS += -DDC_SHA1_EXTERNAL
EXTLIBS += -lsha1detectcoll
@@ -1543,9 +1545,6 @@ ifdef SHA1_MAX_BLOCK_SIZE
LIB_OBJS += compat/sha1-chunked.o
BASIC_CFLAGS += -DSHA1_MAX_BLOCK_SIZE="$(SHA1_MAX_BLOCK_SIZE)"
endif
-ifdef NO_PERL_MAKEMAKER
- export NO_PERL_MAKEMAKER
-endif
ifdef NO_HSTRERROR
COMPAT_CFLAGS += -DNO_HSTRERROR
COMPAT_OBJS += compat/hstrerror.o
@@ -1732,8 +1731,10 @@ ETC_GITATTRIBUTES_SQ = $(subst ','\'',$(ETC_GITATTRIBUTES))
DESTDIR_SQ = $(subst ','\'',$(DESTDIR))
bindir_SQ = $(subst ','\'',$(bindir))
bindir_relative_SQ = $(subst ','\'',$(bindir_relative))
+mandir_SQ = $(subst ','\'',$(mandir))
mandir_relative_SQ = $(subst ','\'',$(mandir_relative))
infodir_relative_SQ = $(subst ','\'',$(infodir_relative))
+perllibdir_SQ = $(subst ','\'',$(perllibdir))
localedir_SQ = $(subst ','\'',$(localedir))
gitexecdir_SQ = $(subst ','\'',$(gitexecdir))
template_dir_SQ = $(subst ','\'',$(template_dir))
@@ -1844,9 +1845,6 @@ ifndef NO_TCLTK
$(QUIET_SUBDIR0)git-gui $(QUIET_SUBDIR1) gitexecdir='$(gitexec_instdir_SQ)' all
$(QUIET_SUBDIR0)gitk-git $(QUIET_SUBDIR1) all
endif
-ifndef NO_PERL
- $(QUIET_SUBDIR0)perl $(QUIET_SUBDIR1) PERL_PATH='$(PERL_PATH_SQ)' prefix='$(prefix_SQ)' localedir='$(localedir_SQ)' all
-endif
$(QUIET_SUBDIR0)templates $(QUIET_SUBDIR1) SHELL_PATH='$(SHELL_PATH_SQ)' PERL_PATH='$(PERL_PATH_SQ)'
please_set_SHELL_PATH_to_a_more_modern_shell:
@@ -1928,7 +1926,8 @@ common-cmds.h: $(wildcard Documentation/git-*.txt)
SCRIPT_DEFINES = $(SHELL_PATH_SQ):$(DIFF_SQ):$(GIT_VERSION):\
$(localedir_SQ):$(NO_CURL):$(USE_GETTEXT_SCHEME):$(SANE_TOOL_PATH_SQ):\
- $(gitwebdir_SQ):$(PERL_PATH_SQ):$(SANE_TEXT_GREP):$(PAGER_ENV)
+ $(gitwebdir_SQ):$(PERL_PATH_SQ):$(SANE_TEXT_GREP):$(PAGER_ENV):\
+ $(perllibdir_SQ)
define cmd_munge_script
$(RM) $@ $@+ && \
sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \
@@ -1972,23 +1971,12 @@ git.res: git.rc GIT-VERSION-FILE
$(SCRIPT_PERL_GEN): GIT-BUILD-OPTIONS
ifndef NO_PERL
-$(SCRIPT_PERL_GEN): perl/perl.mak
+$(SCRIPT_PERL_GEN):
-perl/perl.mak: perl/PM.stamp
-
-perl/PM.stamp: FORCE
- @$(FIND) perl -type f -name '*.pm' | sort >$@+ && \
- $(PERL_PATH) -V >>$@+ && \
- { cmp $@+ $@ >/dev/null 2>/dev/null || mv $@+ $@; } && \
- $(RM) $@+
-
-perl/perl.mak: GIT-CFLAGS GIT-PREFIX perl/Makefile perl/Makefile.PL
- $(QUIET_SUBDIR0)perl $(QUIET_SUBDIR1) PERL_PATH='$(PERL_PATH_SQ)' prefix='$(prefix_SQ)' $(@F)
-
-PERL_DEFINES = $(PERL_PATH_SQ):$(PERLLIB_EXTRA_SQ)
-$(SCRIPT_PERL_GEN): % : %.perl perl/perl.mak GIT-PERL-DEFINES GIT-VERSION-FILE
+PERL_DEFINES = $(PERL_PATH_SQ):$(PERLLIB_EXTRA_SQ):$(perllibdir_SQ)
+$(SCRIPT_PERL_GEN): % : %.perl GIT-PERL-DEFINES GIT-VERSION-FILE
$(QUIET_GEN)$(RM) $@ $@+ && \
- INSTLIBDIR=`MAKEFLAGS= $(MAKE) -C perl -s --no-print-directory instlibdir` && \
+ INSTLIBDIR='$(perllibdir_SQ)' && \
INSTLIBDIR_EXTRA='$(PERLLIB_EXTRA_SQ)' && \
INSTLIBDIR="$$INSTLIBDIR$${INSTLIBDIR_EXTRA:+:$$INSTLIBDIR_EXTRA}" && \
sed -e '1{' \
@@ -2312,6 +2300,21 @@ endif
po/build/locale/%/LC_MESSAGES/git.mo: po/%.po
$(QUIET_MSGFMT)mkdir -p $(dir $@) && $(MSGFMT) -o $@ $<
+LIB_PERL := $(wildcard perl/Git.pm perl/Git/*.pm perl/Git/*/*.pm perl/Git/*/*/*.pm)
+LIB_PERL_GEN := $(patsubst perl/%.pm,perl/build/lib/%.pm,$(LIB_PERL))
+
+ifndef NO_PERL
+all:: $(LIB_PERL_GEN)
+endif
+
+perl/build/lib/%.pm: perl/%.pm
+ $(QUIET_GEN)mkdir -p $(dir $@) && \
+ sed -e 's|@@LOCALEDIR@@|$(localedir_SQ)|g' < $< > $@
+
+perl/build/man/man3/Git.3pm: perl/Git.pm
+ $(QUIET_GEN)mkdir -p $(dir $@) && \
+ pod2man $< $@
+
FIND_SOURCE_FILES = ( \
git ls-files \
'*.[hcS]' \
@@ -2572,7 +2575,9 @@ ifndef NO_GETTEXT
(cd '$(DESTDIR_SQ)$(localedir_SQ)' && umask 022 && $(TAR) xof -)
endif
ifndef NO_PERL
- $(MAKE) -C perl prefix='$(prefix_SQ)' DESTDIR='$(DESTDIR_SQ)' install
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perllibdir_SQ)'
+ (cd perl/build/lib && $(TAR) cf - .) | \
+ (cd '$(DESTDIR_SQ)$(perllibdir_SQ)' && umask 022 && $(TAR) xof -)
$(MAKE) -C gitweb install
endif
ifndef NO_TCLTK
@@ -2622,12 +2627,17 @@ endif
install-gitweb:
$(MAKE) -C gitweb install
-install-doc:
+install-doc: install-man-perl
$(MAKE) -C Documentation install
-install-man:
+install-man: install-man-perl
$(MAKE) -C Documentation install-man
+install-man-perl: perl/build/man/man3/Git.3pm
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(mandir_SQ)/man3'
+ (cd perl/build/man/man3 && $(TAR) cf - .) | \
+ (cd '$(DESTDIR_SQ)$(mandir_SQ)/man3' && umask 022 && $(TAR) xof -)
+
install-html:
$(MAKE) -C Documentation install-html
@@ -2662,6 +2672,21 @@ dist: git-archive$(X) configure
$(GIT_TARNAME)/configure \
$(GIT_TARNAME)/version \
$(GIT_TARNAME)/git-gui/version
+ifdef DC_SHA1_SUBMODULE
+ @mkdir -p $(GIT_TARNAME)/sha1collisiondetection/lib
+ @cp sha1collisiondetection/LICENSE.txt \
+ $(GIT_TARNAME)/sha1collisiondetection/
+ @cp sha1collisiondetection/LICENSE.txt \
+ $(GIT_TARNAME)/sha1collisiondetection/
+ @cp sha1collisiondetection/lib/sha1.[ch] \
+ $(GIT_TARNAME)/sha1collisiondetection/lib/
+ @cp sha1collisiondetection/lib/ubc_check.[ch] \
+ $(GIT_TARNAME)/sha1collisiondetection/lib/
+ $(TAR) rf $(GIT_TARNAME).tar \
+ $(GIT_TARNAME)/sha1collisiondetection/LICENSE.txt \
+ $(GIT_TARNAME)/sha1collisiondetection/lib/sha1.[ch] \
+ $(GIT_TARNAME)/sha1collisiondetection/lib/ubc_check.[ch]
+endif
@$(RM) -r $(GIT_TARNAME)
gzip -f -9 $(GIT_TARNAME).tar
@@ -2719,7 +2744,7 @@ clean: profile-clean coverage-clean
$(MAKE) -C Documentation/ clean
ifndef NO_PERL
$(MAKE) -C gitweb clean
- $(MAKE) -C perl clean
+ $(RM) -r perl/build/
endif
$(MAKE) -C templates/ clean
$(MAKE) -C t/ clean
diff --git a/alloc.c b/alloc.c
index 12afadf..cf4f8b6 100644
--- a/alloc.c
+++ b/alloc.c
@@ -93,6 +93,7 @@ void *alloc_commit_node(void)
struct commit *c = alloc_node(&commit_state, sizeof(struct commit));
c->object.type = OBJ_COMMIT;
c->index = alloc_commit_index();
+ c->graph_pos = COMMIT_NOT_FROM_GRAPH;
return c;
}
diff --git a/apply.c b/apply.c
index e9f34dc..79afcf1 100644
--- a/apply.c
+++ b/apply.c
@@ -2263,8 +2263,8 @@ static void show_stats(struct apply_state *state, struct patch *patch)
static int read_old_data(struct stat *st, struct patch *patch,
const char *path, struct strbuf *buf)
{
- enum safe_crlf safe_crlf = patch->crlf_in_old ?
- SAFE_CRLF_KEEP_CRLF : SAFE_CRLF_RENORMALIZE;
+ int conv_flags = patch->crlf_in_old ?
+ CONV_EOL_KEEP_CRLF : CONV_EOL_RENORMALIZE;
switch (st->st_mode & S_IFMT) {
case S_IFLNK:
if (strbuf_readlink(buf, path, st->st_size) < 0)
@@ -2281,7 +2281,7 @@ static int read_old_data(struct stat *st, struct patch *patch,
* should never look at the index when explicit crlf option
* is given.
*/
- convert_to_git(NULL, path, buf->buf, buf->len, buf, safe_crlf);
+ convert_to_git(NULL, path, buf->buf, buf->len, buf, conv_flags);
return 0;
default:
return -1;
@@ -3154,7 +3154,7 @@ static int apply_binary(struct apply_state *state,
* See if the old one matches what the patch
* applies to.
*/
- hash_sha1_file(img->buf, img->len, blob_type, oid.hash);
+ hash_object_file(img->buf, img->len, blob_type, &oid);
if (strcmp(oid_to_hex(&oid), patch->old_sha1_prefix))
return error(_("the patch applies to '%s' (%s), "
"which does not match the "
@@ -3199,7 +3199,7 @@ static int apply_binary(struct apply_state *state,
name);
/* verify that the result matches */
- hash_sha1_file(img->buf, img->len, blob_type, oid.hash);
+ hash_object_file(img->buf, img->len, blob_type, &oid);
if (strcmp(oid_to_hex(&oid), patch->new_sha1_prefix))
return error(_("binary patch to '%s' creates incorrect result (expecting %s, got %s)"),
name, patch->new_sha1_prefix, oid_to_hex(&oid));
@@ -3554,7 +3554,7 @@ static int try_threeway(struct apply_state *state,
/* Preimage the patch was prepared for */
if (patch->is_new)
- write_sha1_file("", 0, blob_type, pre_oid.hash);
+ write_object_file("", 0, blob_type, &pre_oid);
else if (get_oid(patch->old_sha1_prefix, &pre_oid) ||
read_blob_object(&buf, &pre_oid, patch->old_mode))
return error(_("repository lacks the necessary blob to fall back on 3-way merge."));
@@ -3570,7 +3570,7 @@ static int try_threeway(struct apply_state *state,
return -1;
}
/* post_oid is theirs */
- write_sha1_file(tmp_image.buf, tmp_image.len, blob_type, post_oid.hash);
+ write_object_file(tmp_image.buf, tmp_image.len, blob_type, &post_oid);
clear_image(&tmp_image);
/* our_oid is ours */
@@ -3583,7 +3583,7 @@ static int try_threeway(struct apply_state *state,
return error(_("cannot read the current contents of '%s'"),
patch->old_name);
}
- write_sha1_file(tmp_image.buf, tmp_image.len, blob_type, our_oid.hash);
+ write_object_file(tmp_image.buf, tmp_image.len, blob_type, &our_oid);
clear_image(&tmp_image);
/* in-core three-way merge between post and our using pre as base */
@@ -4291,7 +4291,7 @@ static int add_index_file(struct apply_state *state,
}
fill_stat_cache_info(ce, &st);
}
- if (write_sha1_file(buf, size, blob_type, ce->oid.hash) < 0) {
+ if (write_object_file(buf, size, blob_type, &ce->oid) < 0) {
free(ce);
return error(_("unable to create backing store "
"for newly created file %s"), path);
diff --git a/blame.c b/blame.c
index 21c8676..200e0ad 100644
--- a/blame.c
+++ b/blame.c
@@ -232,7 +232,7 @@ static struct commit *fake_working_tree_commit(struct diff_options *opt,
convert_to_git(&the_index, path, buf.buf, buf.len, &buf, 0);
origin->file.ptr = buf.buf;
origin->file.size = buf.len;
- pretend_sha1_file(buf.buf, buf.len, OBJ_BLOB, origin->blob_oid.hash);
+ pretend_object_file(buf.buf, buf.len, OBJ_BLOB, &origin->blob_oid);
/*
* Read the current index, replace the path entry with
diff --git a/builtin.h b/builtin.h
index 42378f3..079855b 100644
--- a/builtin.h
+++ b/builtin.h
@@ -149,6 +149,7 @@ extern int cmd_clone(int argc, const char **argv, const char *prefix);
extern int cmd_clean(int argc, const char **argv, const char *prefix);
extern int cmd_column(int argc, const char **argv, const char *prefix);
extern int cmd_commit(int argc, const char **argv, const char *prefix);
+extern int cmd_commit_graph(int argc, const char **argv, const char *prefix);
extern int cmd_commit_tree(int argc, const char **argv, const char *prefix);
extern int cmd_config(int argc, const char **argv, const char *prefix);
extern int cmd_count_objects(int argc, const char **argv, const char *prefix);
diff --git a/builtin/am.c b/builtin/am.c
index acfe9d3..6661edc 100644
--- a/builtin/am.c
+++ b/builtin/am.c
@@ -1061,7 +1061,7 @@ static void am_setup(struct am_state *state, enum patch_format patch_format,
}
write_state_text(state, "scissors", str);
- sq_quote_argv(&sb, state->git_apply_opts.argv, 0);
+ sq_quote_argv(&sb, state->git_apply_opts.argv);
write_state_text(state, "apply-opt", sb.buf);
if (state->rebasing)
@@ -1641,8 +1641,8 @@ static void do_commit(const struct am_state *state)
setenv("GIT_COMMITTER_DATE",
state->ignore_date ? "" : state->author_date, 1);
- if (commit_tree(state->msg, state->msg_len, tree.hash, parents, commit.hash,
- author, state->sign_commit))
+ if (commit_tree(state->msg, state->msg_len, &tree, parents, &commit,
+ author, state->sign_commit))
die(_("failed to write commit object"));
reflog_msg = getenv("GIT_REFLOG_ACTION");
diff --git a/builtin/cat-file.c b/builtin/cat-file.c
index c6b3b1b..d90170f 100644
--- a/builtin/cat-file.c
+++ b/builtin/cat-file.c
@@ -475,6 +475,8 @@ static int batch_objects(struct batch_options *opt)
for_each_loose_object(batch_loose_object, &sa, 0);
for_each_packed_object(batch_packed_object, &sa, 0);
+ if (repository_format_partial_clone)
+ warning("This repository has extensions.partialClone set. Some objects may not be loaded.");
cb.opt = opt;
cb.expand = &data;
diff --git a/builtin/checkout.c b/builtin/checkout.c
index ed3d655..a52af2e 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -227,8 +227,7 @@ static int checkout_merged(int pos, const struct checkout *state)
* (it also writes the merge result to the object database even
* when it may contain conflicts).
*/
- if (write_sha1_file(result_buf.ptr, result_buf.size,
- blob_type, oid.hash))
+ if (write_object_file(result_buf.ptr, result_buf.size, blob_type, &oid))
die(_("Unable to add merge result for '%s'"), path);
free(result_buf.ptr);
ce = make_cache_entry(mode, oid.hash, path, 2, 0);
diff --git a/builtin/clone.c b/builtin/clone.c
index 2846517..101c27a 100644
--- a/builtin/clone.c
+++ b/builtin/clone.c
@@ -26,6 +26,7 @@
#include "run-command.h"
#include "connected.h"
#include "packfile.h"
+#include "list-objects-filter-options.h"
/*
* Overall FIXMEs:
@@ -60,6 +61,7 @@ static struct string_list option_optional_reference = STRING_LIST_INIT_NODUP;
static int option_dissociate;
static int max_jobs = -1;
static struct string_list option_recurse_submodules = STRING_LIST_INIT_NODUP;
+static struct list_objects_filter_options filter_options;
static int recurse_submodules_cb(const struct option *opt,
const char *arg, int unset)
@@ -135,6 +137,7 @@ static struct option builtin_clone_options[] = {
TRANSPORT_FAMILY_IPV4),
OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
TRANSPORT_FAMILY_IPV6),
+ OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
OPT_END()
};
@@ -893,6 +896,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
struct refspec *refspec;
const char *fetch_pattern;
+ fetch_if_missing = 0;
+
packet_trace_identity("clone");
argc = parse_options(argc, argv, prefix, builtin_clone_options,
builtin_clone_usage, 0);
@@ -1090,6 +1095,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
warning(_("--shallow-since is ignored in local clones; use file:// instead."));
if (option_not.nr)
warning(_("--shallow-exclude is ignored in local clones; use file:// instead."));
+ if (filter_options.choice)
+ warning(_("--filter is ignored in local clones; use file:// instead."));
if (!access(mkpath("%s/shallow", path), F_OK)) {
if (option_local > 0)
warning(_("source repository is shallow, ignoring --local"));
@@ -1118,7 +1125,13 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
transport_set_option(transport, TRANS_OPT_UPLOADPACK,
option_upload_pack);
- if (transport->smart_options && !deepen)
+ if (filter_options.choice) {
+ transport_set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER,
+ filter_options.filter_spec);
+ transport_set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
+ }
+
+ if (transport->smart_options && !deepen && !filter_options.choice)
transport->smart_options->check_self_contained_and_connected = 1;
refs = transport_get_remote_refs(transport);
@@ -1178,13 +1191,17 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
write_refspec_config(src_ref_prefix, our_head_points_at,
remote_head_points_at, &branch_top);
+ if (filter_options.choice)
+ partial_clone_register("origin", &filter_options);
+
if (is_local)
clone_local(path, git_dir);
else if (refs && complete_refs_before_fetch)
transport_fetch_refs(transport, mapped_refs);
update_remote_refs(refs, mapped_refs, remote_head_points_at,
- branch_top.buf, reflog_msg.buf, transport, !is_local);
+ branch_top.buf, reflog_msg.buf, transport,
+ !is_local && !filter_options.choice);
update_head(our_head_points_at, remote_head, reflog_msg.buf);
@@ -1205,6 +1222,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
}
junk_mode = JUNK_LEAVE_REPO;
+ fetch_if_missing = 1;
err = checkout(submodule_progress);
strbuf_release(&reflog_msg);
diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c
new file mode 100644
index 0000000..37420ae
--- /dev/null
+++ b/builtin/commit-graph.c
@@ -0,0 +1,171 @@
+#include "builtin.h"
+#include "config.h"
+#include "dir.h"
+#include "lockfile.h"
+#include "parse-options.h"
+#include "commit-graph.h"
+
+static char const * const builtin_commit_graph_usage[] = {
+ N_("git commit-graph [--object-dir <objdir>]"),
+ N_("git commit-graph read [--object-dir <objdir>]"),
+ N_("git commit-graph write [--object-dir <objdir>] [--append] [--stdin-packs|--stdin-commits]"),
+ NULL
+};
+
+static const char * const builtin_commit_graph_read_usage[] = {
+ N_("git commit-graph read [--object-dir <objdir>]"),
+ NULL
+};
+
+static const char * const builtin_commit_graph_write_usage[] = {
+ N_("git commit-graph write [--object-dir <objdir>] [--append] [--stdin-packs|--stdin-commits]"),
+ NULL
+};
+
+static struct opts_commit_graph {
+ const char *obj_dir;
+ int stdin_packs;
+ int stdin_commits;
+ int append;
+} opts;
+
+static int graph_read(int argc, const char **argv)
+{
+ struct commit_graph *graph = NULL;
+ char *graph_name;
+
+ static struct option builtin_commit_graph_read_options[] = {
+ OPT_STRING(0, "object-dir", &opts.obj_dir,
+ N_("dir"),
+ N_("The object directory to store the graph")),
+ OPT_END(),
+ };
+
+ argc = parse_options(argc, argv, NULL,
+ builtin_commit_graph_read_options,
+ builtin_commit_graph_read_usage, 0);
+
+ if (!opts.obj_dir)
+ opts.obj_dir = get_object_directory();
+
+ graph_name = get_commit_graph_filename(opts.obj_dir);
+ graph = load_commit_graph_one(graph_name);
+
+ if (!graph)
+ die("graph file %s does not exist", graph_name);
+ FREE_AND_NULL(graph_name);
+
+ printf("header: %08x %d %d %d %d\n",
+ ntohl(*(uint32_t*)graph->data),
+ *(unsigned char*)(graph->data + 4),
+ *(unsigned char*)(graph->data + 5),
+ *(unsigned char*)(graph->data + 6),
+ *(unsigned char*)(graph->data + 7));
+ printf("num_commits: %u\n", graph->num_commits);
+ printf("chunks:");
+
+ if (graph->chunk_oid_fanout)
+ printf(" oid_fanout");
+ if (graph->chunk_oid_lookup)
+ printf(" oid_lookup");
+ if (graph->chunk_commit_data)
+ printf(" commit_metadata");
+ if (graph->chunk_large_edges)
+ printf(" large_edges");
+ printf("\n");
+
+ return 0;
+}
+
+static int graph_write(int argc, const char **argv)
+{
+ const char **pack_indexes = NULL;
+ int packs_nr = 0;
+ const char **commit_hex = NULL;
+ int commits_nr = 0;
+ const char **lines = NULL;
+ int lines_nr = 0;
+ int lines_alloc = 0;
+
+ static struct option builtin_commit_graph_write_options[] = {
+ OPT_STRING(0, "object-dir", &opts.obj_dir,
+ N_("dir"),
+ N_("The object directory to store the graph")),
+ OPT_BOOL(0, "stdin-packs", &opts.stdin_packs,
+ N_("scan pack-indexes listed by stdin for commits")),
+ OPT_BOOL(0, "stdin-commits", &opts.stdin_commits,
+ N_("start walk at commits listed by stdin")),
+ OPT_BOOL(0, "append", &opts.append,
+ N_("include all commits already in the commit-graph file")),
+ OPT_END(),
+ };
+
+ argc = parse_options(argc, argv, NULL,
+ builtin_commit_graph_write_options,
+ builtin_commit_graph_write_usage, 0);
+
+ if (opts.stdin_packs && opts.stdin_commits)
+ die(_("cannot use both --stdin-commits and --stdin-packs"));
+ if (!opts.obj_dir)
+ opts.obj_dir = get_object_directory();
+
+ if (opts.stdin_packs || opts.stdin_commits) {
+ struct strbuf buf = STRBUF_INIT;
+ lines_nr = 0;
+ lines_alloc = 128;
+ ALLOC_ARRAY(lines, lines_alloc);
+
+ while (strbuf_getline(&buf, stdin) != EOF) {
+ ALLOC_GROW(lines, lines_nr + 1, lines_alloc);
+ lines[lines_nr++] = strbuf_detach(&buf, NULL);
+ }
+
+ if (opts.stdin_packs) {
+ pack_indexes = lines;
+ packs_nr = lines_nr;
+ }
+ if (opts.stdin_commits) {
+ commit_hex = lines;
+ commits_nr = lines_nr;
+ }
+ }
+
+ write_commit_graph(opts.obj_dir,
+ pack_indexes,
+ packs_nr,
+ commit_hex,
+ commits_nr,
+ opts.append);
+
+ return 0;
+}
+
+int cmd_commit_graph(int argc, const char **argv, const char *prefix)
+{
+ static struct option builtin_commit_graph_options[] = {
+ OPT_STRING(0, "object-dir", &opts.obj_dir,
+ N_("dir"),
+ N_("The object directory to store the graph")),
+ OPT_END(),
+ };
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(builtin_commit_graph_usage,
+ builtin_commit_graph_options);
+
+ git_config(git_default_config, NULL);
+ argc = parse_options(argc, argv, prefix,
+ builtin_commit_graph_options,
+ builtin_commit_graph_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ if (argc > 0) {
+ if (!strcmp(argv[0], "read"))
+ return graph_read(argc, argv);
+ if (!strcmp(argv[0], "write"))
+ return graph_write(argc, argv);
+ }
+
+ usage_with_options(builtin_commit_graph_usage,
+ builtin_commit_graph_options);
+}
diff --git a/builtin/commit-tree.c b/builtin/commit-tree.c
index 2177251..e5bdf57 100644
--- a/builtin/commit-tree.c
+++ b/builtin/commit-tree.c
@@ -117,8 +117,8 @@ int cmd_commit_tree(int argc, const char **argv, const char *prefix)
die_errno("git commit-tree: failed to read");
}
- if (commit_tree(buffer.buf, buffer.len, tree_oid.hash, parents,
- commit_oid.hash, NULL, sign_commit)) {
+ if (commit_tree(buffer.buf, buffer.len, &tree_oid, parents, &commit_oid,
+ NULL, sign_commit)) {
strbuf_release(&buffer);
return 1;
}
diff --git a/builtin/commit.c b/builtin/commit.c
index 4610e3d..e8e8d13 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -31,9 +31,7 @@
#include "gpg-interface.h"
#include "column.h"
#include "sequencer.h"
-#include "notes-utils.h"
#include "mailmap.h"
-#include "sigchain.h"
static const char * const builtin_commit_usage[] = {
N_("git commit [<options>] [--] <pathspec>..."),
@@ -45,31 +43,6 @@ static const char * const builtin_status_usage[] = {
NULL
};
-static const char implicit_ident_advice_noconfig[] =
-N_("Your name and email address were configured automatically based\n"
-"on your username and hostname. Please check that they are accurate.\n"
-"You can suppress this message by setting them explicitly. Run the\n"
-"following command and follow the instructions in your editor to edit\n"
-"your configuration file:\n"
-"\n"
-" git config --global --edit\n"
-"\n"
-"After doing this, you may fix the identity used for this commit with:\n"
-"\n"
-" git commit --amend --reset-author\n");
-
-static const char implicit_ident_advice_config[] =
-N_("Your name and email address were configured automatically based\n"
-"on your username and hostname. Please check that they are accurate.\n"
-"You can suppress this message by setting them explicitly:\n"
-"\n"
-" git config --global user.name \"Your Name\"\n"
-" git config --global user.email you@example.com\n"
-"\n"
-"After doing this, you may fix the identity used for this commit with:\n"
-"\n"
-" git commit --amend --reset-author\n");
-
static const char empty_amend_advice[] =
N_("You asked to amend the most recent commit, but doing so would make\n"
"it empty. You can repeat your command with --allow-empty, or you can\n"
@@ -93,8 +66,6 @@ N_("If you wish to skip this commit, use:\n"
"Then \"git cherry-pick --continue\" will resume cherry-picking\n"
"the remaining commits.\n");
-static GIT_PATH_FUNC(git_path_commit_editmsg, "COMMIT_EDITMSG")
-
static const char *use_message_buffer;
static struct lock_file index_lock; /* real index */
static struct lock_file false_lock; /* used only for partial commits */
@@ -128,12 +99,7 @@ static char *sign_commit;
* if editor is used, and only the whitespaces if the message
* is specified explicitly.
*/
-static enum {
- CLEANUP_SPACE,
- CLEANUP_NONE,
- CLEANUP_SCISSORS,
- CLEANUP_ALL
-} cleanup_mode;
+static enum commit_msg_cleanup_mode cleanup_mode;
static const char *cleanup_arg;
static enum commit_whence whence;
@@ -673,7 +639,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
struct strbuf sb = STRBUF_INIT;
const char *hook_arg1 = NULL;
const char *hook_arg2 = NULL;
- int clean_message_contents = (cleanup_mode != CLEANUP_NONE);
+ int clean_message_contents = (cleanup_mode != COMMIT_MSG_CLEANUP_NONE);
int old_display_comment_prefix;
/* This checks and barfs if author is badly specified */
@@ -814,7 +780,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
struct ident_split ci, ai;
if (whence != FROM_COMMIT) {
- if (cleanup_mode == CLEANUP_SCISSORS)
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS)
wt_status_add_cut_line(s->fp);
status_printf_ln(s, GIT_COLOR_NORMAL,
whence == FROM_MERGE
@@ -834,14 +800,15 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
}
fprintf(s->fp, "\n");
- if (cleanup_mode == CLEANUP_ALL)
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_ALL)
status_printf(s, GIT_COLOR_NORMAL,
_("Please enter the commit message for your changes."
" Lines starting\nwith '%c' will be ignored, and an empty"
" message aborts the commit.\n"), comment_line_char);
- else if (cleanup_mode == CLEANUP_SCISSORS && whence == FROM_COMMIT)
+ else if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS &&
+ whence == FROM_COMMIT)
wt_status_add_cut_line(s->fp);
- else /* CLEANUP_SPACE, that is. */
+ else /* COMMIT_MSG_CLEANUP_SPACE, that is. */
status_printf(s, GIT_COLOR_NORMAL,
_("Please enter the commit message for your changes."
" Lines starting\n"
@@ -986,65 +953,6 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
return 1;
}
-static int rest_is_empty(struct strbuf *sb, int start)
-{
- int i, eol;
- const char *nl;
-
- /* Check if the rest is just whitespace and Signed-off-by's. */
- for (i = start; i < sb->len; i++) {
- nl = memchr(sb->buf + i, '\n', sb->len - i);
- if (nl)
- eol = nl - sb->buf;
- else
- eol = sb->len;
-
- if (strlen(sign_off_header) <= eol - i &&
- starts_with(sb->buf + i, sign_off_header)) {
- i = eol;
- continue;
- }
- while (i < eol)
- if (!isspace(sb->buf[i++]))
- return 0;
- }
-
- return 1;
-}
-
-/*
- * Find out if the message in the strbuf contains only whitespace and
- * Signed-off-by lines.
- */
-static int message_is_empty(struct strbuf *sb)
-{
- if (cleanup_mode == CLEANUP_NONE && sb->len)
- return 0;
- return rest_is_empty(sb, 0);
-}
-
-/*
- * See if the user edited the message in the editor or left what
- * was in the template intact
- */
-static int template_untouched(struct strbuf *sb)
-{
- struct strbuf tmpl = STRBUF_INIT;
- const char *start;
-
- if (cleanup_mode == CLEANUP_NONE && sb->len)
- return 0;
-
- if (!template_file || strbuf_read_file(&tmpl, template_file, 0) <= 0)
- return 0;
-
- strbuf_stripspace(&tmpl, cleanup_mode == CLEANUP_ALL);
- if (!skip_prefix(sb->buf, tmpl.buf, &start))
- start = sb->buf;
- strbuf_release(&tmpl);
- return rest_is_empty(sb, start - sb->buf);
-}
-
static const char *find_author_by_nickname(const char *name)
{
struct rev_info revs;
@@ -1229,15 +1137,17 @@ static int parse_and_validate_options(int argc, const char *argv[],
if (argc == 0 && (also || (only && !amend && !allow_empty)))
die(_("No paths with --include/--only does not make sense."));
if (!cleanup_arg || !strcmp(cleanup_arg, "default"))
- cleanup_mode = use_editor ? CLEANUP_ALL : CLEANUP_SPACE;
+ cleanup_mode = use_editor ? COMMIT_MSG_CLEANUP_ALL :
+ COMMIT_MSG_CLEANUP_SPACE;
else if (!strcmp(cleanup_arg, "verbatim"))
- cleanup_mode = CLEANUP_NONE;
+ cleanup_mode = COMMIT_MSG_CLEANUP_NONE;
else if (!strcmp(cleanup_arg, "whitespace"))
- cleanup_mode = CLEANUP_SPACE;
+ cleanup_mode = COMMIT_MSG_CLEANUP_SPACE;
else if (!strcmp(cleanup_arg, "strip"))
- cleanup_mode = CLEANUP_ALL;
+ cleanup_mode = COMMIT_MSG_CLEANUP_ALL;
else if (!strcmp(cleanup_arg, "scissors"))
- cleanup_mode = use_editor ? CLEANUP_SCISSORS : CLEANUP_SPACE;
+ cleanup_mode = use_editor ? COMMIT_MSG_CLEANUP_SCISSORS :
+ COMMIT_MSG_CLEANUP_SPACE;
else
die(_("Invalid cleanup mode %s"), cleanup_arg);
@@ -1439,98 +1349,6 @@ int cmd_status(int argc, const char **argv, const char *prefix)
return 0;
}
-static const char *implicit_ident_advice(void)
-{
- char *user_config = expand_user_path("~/.gitconfig", 0);
- char *xdg_config = xdg_config_home("config");
- int config_exists = file_exists(user_config) || file_exists(xdg_config);
-
- free(user_config);
- free(xdg_config);
-
- if (config_exists)
- return _(implicit_ident_advice_config);
- else
- return _(implicit_ident_advice_noconfig);
-
-}
-
-static void print_summary(const char *prefix, const struct object_id *oid,
- int initial_commit)
-{
- struct rev_info rev;
- struct commit *commit;
- struct strbuf format = STRBUF_INIT;
- const char *head;
- struct pretty_print_context pctx = {0};
- struct strbuf author_ident = STRBUF_INIT;
- struct strbuf committer_ident = STRBUF_INIT;
-
- commit = lookup_commit(oid);
- if (!commit)
- die(_("couldn't look up newly created commit"));
- if (parse_commit(commit))
- die(_("could not parse newly created commit"));
-
- strbuf_addstr(&format, "format:%h] %s");
-
- format_commit_message(commit, "%an <%ae>", &author_ident, &pctx);
- format_commit_message(commit, "%cn <%ce>", &committer_ident, &pctx);
- if (strbuf_cmp(&author_ident, &committer_ident)) {
- strbuf_addstr(&format, "\n Author: ");
- strbuf_addbuf_percentquote(&format, &author_ident);
- }
- if (author_date_is_interesting()) {
- struct strbuf date = STRBUF_INIT;
- format_commit_message(commit, "%ad", &date, &pctx);
- strbuf_addstr(&format, "\n Date: ");
- strbuf_addbuf_percentquote(&format, &date);
- strbuf_release(&date);
- }
- if (!committer_ident_sufficiently_given()) {
- strbuf_addstr(&format, "\n Committer: ");
- strbuf_addbuf_percentquote(&format, &committer_ident);
- if (advice_implicit_identity) {
- strbuf_addch(&format, '\n');
- strbuf_addstr(&format, implicit_ident_advice());
- }
- }
- strbuf_release(&author_ident);
- strbuf_release(&committer_ident);
-
- init_revisions(&rev, prefix);
- setup_revisions(0, NULL, &rev, NULL);
-
- rev.diff = 1;
- rev.diffopt.output_format =
- DIFF_FORMAT_SHORTSTAT | DIFF_FORMAT_SUMMARY;
-
- rev.verbose_header = 1;
- rev.show_root_diff = 1;
- get_commit_format(format.buf, &rev);
- rev.always_show_header = 0;
- rev.diffopt.detect_rename = DIFF_DETECT_RENAME;
- rev.diffopt.break_opt = 0;
- diff_setup_done(&rev.diffopt);
-
- head = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
- if (!head)
- die_errno(_("unable to resolve HEAD after creating commit"));
- if (!strcmp(head, "HEAD"))
- head = _("detached HEAD");
- else
- skip_prefix(head, "refs/heads/", &head);
- printf("[%s%s ", head, initial_commit ? _(" (root-commit)") : "");
-
- if (!log_tree_commit(&rev, commit)) {
- rev.always_show_header = 1;
- rev.use_terminator = 1;
- log_tree_commit(&rev, commit);
- }
-
- strbuf_release(&format);
-}
-
static int git_commit_config(const char *k, const char *v, void *cb)
{
struct wt_status *s = cb;
@@ -1560,37 +1378,6 @@ static int git_commit_config(const char *k, const char *v, void *cb)
return git_status_config(k, v, s);
}
-static int run_rewrite_hook(const struct object_id *oldoid,
- const struct object_id *newoid)
-{
- struct child_process proc = CHILD_PROCESS_INIT;
- const char *argv[3];
- int code;
- struct strbuf sb = STRBUF_INIT;
-
- argv[0] = find_hook("post-rewrite");
- if (!argv[0])
- return 0;
-
- argv[1] = "amend";
- argv[2] = NULL;
-
- proc.argv = argv;
- proc.in = -1;
- proc.stdout_to_stderr = 1;
-
- code = start_command(&proc);
- if (code)
- return code;
- strbuf_addf(&sb, "%s %s\n", oid_to_hex(oldoid), oid_to_hex(newoid));
- sigchain_push(SIGPIPE, SIG_IGN);
- write_in_full(proc.in, sb.buf, sb.len);
- close(proc.in);
- strbuf_release(&sb);
- sigchain_pop(SIGPIPE);
- return finish_command(&proc);
-}
-
int run_commit_hook(int editor_is_used, const char *index_file, const char *name, ...)
{
struct argv_array hook_env = ARGV_ARRAY_INIT;
@@ -1673,13 +1460,11 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
struct strbuf sb = STRBUF_INIT;
struct strbuf author_ident = STRBUF_INIT;
const char *index_file, *reflog_msg;
- char *nl;
struct object_id oid;
struct commit_list *parents = NULL;
struct stat statbuf;
struct commit *current_head = NULL;
struct commit_extra_header *extra = NULL;
- struct ref_transaction *transaction;
struct strbuf err = STRBUF_INIT;
if (argc == 2 && !strcmp(argv[1], "-h"))
@@ -1770,17 +1555,17 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
}
if (verbose || /* Truncate the message just before the diff, if any. */
- cleanup_mode == CLEANUP_SCISSORS)
+ cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS)
strbuf_setlen(&sb, wt_status_locate_end(sb.buf, sb.len));
- if (cleanup_mode != CLEANUP_NONE)
- strbuf_stripspace(&sb, cleanup_mode == CLEANUP_ALL);
+ if (cleanup_mode != COMMIT_MSG_CLEANUP_NONE)
+ strbuf_stripspace(&sb, cleanup_mode == COMMIT_MSG_CLEANUP_ALL);
- if (message_is_empty(&sb) && !allow_empty_message) {
+ if (message_is_empty(&sb, cleanup_mode) && !allow_empty_message) {
rollback_index_files();
fprintf(stderr, _("Aborting commit due to empty commit message.\n"));
exit(1);
}
- if (template_untouched(&sb) && !allow_empty_message) {
+ if (template_untouched(&sb, template_file, cleanup_mode) && !allow_empty_message) {
rollback_index_files();
fprintf(stderr, _("Aborting commit; you did not edit the message.\n"));
exit(1);
@@ -1794,33 +1579,20 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
append_merge_tag_headers(parents, &tail);
}
- if (commit_tree_extended(sb.buf, sb.len, active_cache_tree->oid.hash,
- parents, oid.hash, author_ident.buf, sign_commit, extra)) {
+ if (commit_tree_extended(sb.buf, sb.len, &active_cache_tree->oid,
+ parents, &oid, author_ident.buf, sign_commit,
+ extra)) {
rollback_index_files();
die(_("failed to write commit object"));
}
strbuf_release(&author_ident);
free_commit_extra_headers(extra);
- nl = strchr(sb.buf, '\n');
- if (nl)
- strbuf_setlen(&sb, nl + 1 - sb.buf);
- else
- strbuf_addch(&sb, '\n');
- strbuf_insert(&sb, 0, reflog_msg, strlen(reflog_msg));
- strbuf_insert(&sb, strlen(reflog_msg), ": ", 2);
-
- transaction = ref_transaction_begin(&err);
- if (!transaction ||
- ref_transaction_update(transaction, "HEAD", &oid,
- current_head
- ? &current_head->object.oid : &null_oid,
- 0, sb.buf, &err) ||
- ref_transaction_commit(transaction, &err)) {
+ if (update_head_with_reflog(current_head, &oid, reflog_msg, &sb,
+ &err)) {
rollback_index_files();
die("%s", err.buf);
}
- ref_transaction_free(transaction);
unlink(git_path_cherry_pick_head());
unlink(git_path_revert_head());
@@ -1837,17 +1609,17 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
rerere(0);
run_commit_hook(use_editor, get_index_file(), "post-commit", NULL);
if (amend && !no_post_rewrite) {
- struct notes_rewrite_cfg *cfg;
- cfg = init_copy_notes_for_rewrite("amend");
- if (cfg) {
- /* we are amending, so current_head is not NULL */
- copy_note_for_rewrite(cfg, &current_head->object.oid, &oid);
- finish_copy_notes_for_rewrite(cfg, "Notes added by 'git commit --amend'");
- }
- run_rewrite_hook(&current_head->object.oid, &oid);
+ commit_post_rewrite(current_head, &oid);
+ }
+ if (!quiet) {
+ unsigned int flags = 0;
+
+ if (!current_head)
+ flags |= SUMMARY_INITIAL_COMMIT;
+ if (author_date_is_interesting())
+ flags |= SUMMARY_SHOW_AUTHOR_DATE;
+ print_commit_summary(prefix, &oid, flags);
}
- if (!quiet)
- print_summary(prefix, &oid, !current_head);
UNLEAK(err);
UNLEAK(sb);
diff --git a/builtin/describe.c b/builtin/describe.c
index 6fe1c51..c428984 100644
--- a/builtin/describe.c
+++ b/builtin/describe.c
@@ -383,7 +383,7 @@ static void describe_commit(struct object_id *oid, struct strbuf *dst)
if (!match_cnt) {
struct object_id *cmit_oid = &cmit->object.oid;
if (always) {
- strbuf_addstr(dst, find_unique_abbrev(cmit_oid->hash, abbrev));
+ strbuf_add_unique_abbrev(dst, cmit_oid->hash, abbrev);
if (suffix)
strbuf_addstr(dst, suffix);
return;
diff --git a/builtin/fetch-pack.c b/builtin/fetch-pack.c
index 366b9d1..a7bc136 100644
--- a/builtin/fetch-pack.c
+++ b/builtin/fetch-pack.c
@@ -53,6 +53,8 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
struct oid_array shallow = OID_ARRAY_INIT;
struct string_list deepen_not = STRING_LIST_INIT_DUP;
+ fetch_if_missing = 0;
+
packet_trace_identity("fetch-pack");
memset(&args, 0, sizeof(args));
@@ -143,6 +145,22 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
args.update_shallow = 1;
continue;
}
+ if (!strcmp("--from-promisor", arg)) {
+ args.from_promisor = 1;
+ continue;
+ }
+ if (!strcmp("--no-dependents", arg)) {
+ args.no_dependents = 1;
+ continue;
+ }
+ if (skip_prefix(arg, ("--" CL_ARG__FILTER "="), &arg)) {
+ parse_list_objects_filter(&args.filter_options, arg);
+ continue;
+ }
+ if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) {
+ list_objects_filter_set_no_filter(&args.filter_options);
+ continue;
+ }
usage(fetch_pack_usage);
}
if (deepen_not.nr)
diff --git a/builtin/fetch.c b/builtin/fetch.c
index 7bbcd26..8ee998e 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -19,6 +19,7 @@
#include "argv-array.h"
#include "utf8.h"
#include "packfile.h"
+#include "list-objects-filter-options.h"
static const char * const builtin_fetch_usage[] = {
N_("git fetch [<options>] [<repository> [<refspec>...]]"),
@@ -56,6 +57,7 @@ static int recurse_submodules_default = RECURSE_SUBMODULES_ON_DEMAND;
static int shown_url = 0;
static int refmap_alloc, refmap_nr;
static const char **refmap_array;
+static struct list_objects_filter_options filter_options;
static int git_fetch_config(const char *k, const char *v, void *cb)
{
@@ -161,6 +163,7 @@ static struct option builtin_fetch_options[] = {
TRANSPORT_FAMILY_IPV4),
OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
TRANSPORT_FAMILY_IPV6),
+ OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
OPT_END()
};
@@ -1045,6 +1048,11 @@ static struct transport *prepare_transport(struct remote *remote, int deepen)
set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, "yes");
if (update_shallow)
set_option(transport, TRANS_OPT_UPDATE_SHALLOW, "yes");
+ if (filter_options.choice) {
+ set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER,
+ filter_options.filter_spec);
+ set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
+ }
return transport;
}
@@ -1265,6 +1273,56 @@ static int fetch_multiple(struct string_list *list)
return result;
}
+/*
+ * Fetching from the promisor remote should use the given filter-spec
+ * or inherit the default filter-spec from the config.
+ */
+static inline void fetch_one_setup_partial(struct remote *remote)
+{
+ /*
+ * Explicit --no-filter argument overrides everything, regardless
+ * of any prior partial clones and fetches.
+ */
+ if (filter_options.no_filter)
+ return;
+
+ /*
+ * If no prior partial clone/fetch and the current fetch DID NOT
+ * request a partial-fetch, do a normal fetch.
+ */
+ if (!repository_format_partial_clone && !filter_options.choice)
+ return;
+
+ /*
+ * If this is the FIRST partial-fetch request, we enable partial
+ * on this repo and remember the given filter-spec as the default
+ * for subsequent fetches to this remote.
+ */
+ if (!repository_format_partial_clone && filter_options.choice) {
+ partial_clone_register(remote->name, &filter_options);
+ return;
+ }
+
+ /*
+ * We are currently limited to only ONE promisor remote and only
+ * allow partial-fetches from the promisor remote.
+ */
+ if (strcmp(remote->name, repository_format_partial_clone)) {
+ if (filter_options.choice)
+ die(_("--filter can only be used with the remote configured in core.partialClone"));
+ return;
+ }
+
+ /*
+ * Do a partial-fetch from the promisor remote using either the
+ * explicitly given filter-spec or inherit the filter-spec from
+ * the config.
+ */
+ if (!filter_options.choice)
+ partial_clone_get_default_filter_spec(&filter_options);
+ return;
+}
+
static int fetch_one(struct remote *remote, int argc, const char **argv)
{
static const char **refs = NULL;
@@ -1320,12 +1378,14 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
{
int i;
struct string_list list = STRING_LIST_INIT_DUP;
- struct remote *remote;
+ struct remote *remote = NULL;
int result = 0;
struct argv_array argv_gc_auto = ARGV_ARRAY_INIT;
packet_trace_identity("fetch");
+ fetch_if_missing = 0;
+
/* Record the command line for the reflog */
strbuf_addstr(&default_rla, "fetch");
for (i = 1; i < argc; i++)
@@ -1359,23 +1419,23 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
if (depth || deepen_since || deepen_not.nr)
deepen = 1;
+ if (filter_options.choice && !repository_format_partial_clone)
+ die("--filter can only be used when extensions.partialClone is set");
+
if (all) {
if (argc == 1)
die(_("fetch --all does not take a repository argument"));
else if (argc > 1)
die(_("fetch --all does not make sense with refspecs"));
(void) for_each_remote(get_one_remote_for_fetch, &list);
- result = fetch_multiple(&list);
} else if (argc == 0) {
/* No arguments -- use default remote */
remote = remote_get(NULL);
- result = fetch_one(remote, argc, argv);
} else if (multiple) {
/* All arguments are assumed to be remotes or groups */
for (i = 0; i < argc; i++)
if (!add_remote_or_group(argv[i], &list))
die(_("No such remote or remote group: %s"), argv[i]);
- result = fetch_multiple(&list);
} else {
/* Single remote or group */
(void) add_remote_or_group(argv[0], &list);
@@ -1383,14 +1443,25 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
/* More than one remote */
if (argc > 1)
die(_("Fetching a group and specifying refspecs does not make sense"));
- result = fetch_multiple(&list);
} else {
/* Zero or one remotes */
remote = remote_get(argv[0]);
- result = fetch_one(remote, argc-1, argv+1);
+ argc--;
+ argv++;
}
}
+ if (remote) {
+ if (filter_options.choice || repository_format_partial_clone)
+ fetch_one_setup_partial(remote);
+ result = fetch_one(remote, argc, argv);
+ } else {
+ if (filter_options.choice)
+ die(_("--filter can only be used with the remote configured in core.partialClone"));
+ /* TODO should this also die if we have a previous partial-clone? */
+ result = fetch_multiple(&list);
+ }
+
if (!result && (recurse_submodules != RECURSE_SUBMODULES_OFF)) {
struct argv_array options = ARGV_ARRAY_INIT;
diff --git a/builtin/fsck.c b/builtin/fsck.c
index 246237e..ef78c6c 100644
--- a/builtin/fsck.c
+++ b/builtin/fsck.c
@@ -149,6 +149,15 @@ static int mark_object(struct object *obj, int type, void *data, struct fsck_opt
if (obj->flags & REACHABLE)
return 0;
obj->flags |= REACHABLE;
+
+ if (is_promisor_object(&obj->oid))
+ /*
+ * Further recursion does not need to be performed on this
+ * object since it is a promisor object (so it does not need to
+ * be added to "pending").
+ */
+ return 0;
+
if (!(obj->flags & HAS_OBJ)) {
if (parent && !has_object_file(&obj->oid)) {
printf("broken link from %7s %s\n",
@@ -171,7 +180,13 @@ static void mark_object_reachable(struct object *obj)
static int traverse_one_object(struct object *obj)
{
- return fsck_walk(obj, obj, &fsck_walk_options);
+ int result = fsck_walk(obj, obj, &fsck_walk_options);
+
+ if (obj->type == OBJ_TREE) {
+ struct tree *tree = (struct tree *)obj;
+ free_tree_buffer(tree);
+ }
+ return result;
}
static int traverse_reachable(void)
@@ -208,6 +223,8 @@ static void check_reachable_object(struct object *obj)
* do a full fsck
*/
if (!(obj->flags & HAS_OBJ)) {
+ if (is_promisor_object(&obj->oid))
+ return;
if (has_sha1_pack(obj->oid.hash))
return; /* it is in pack - forget about it */
printf("missing %s %s\n", printable_type(obj),
@@ -398,7 +415,7 @@ static void fsck_handle_reflog_oid(const char *refname, struct object_id *oid,
xstrfmt("%s@{%"PRItime"}", refname, timestamp));
obj->flags |= USED;
mark_object_reachable(obj);
- } else {
+ } else if (!is_promisor_object(oid)) {
error("%s: invalid reflog entry %s", refname, oid_to_hex(oid));
errors_found |= ERROR_REACHABLE;
}
@@ -434,6 +451,14 @@ static int fsck_handle_ref(const char *refname, const struct object_id *oid,
obj = parse_object(oid);
if (!obj) {
+ if (is_promisor_object(oid)) {
+ /*
+ * Increment default_refs anyway, because this is a
+ * valid ref.
+ */
+ default_refs++;
+ return 0;
+ }
error("%s: invalid sha1 pointer %s", refname, oid_to_hex(oid));
errors_found |= ERROR_REACHABLE;
/* We'll continue with the rest despite the error.. */
@@ -659,6 +684,9 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
int i;
struct alternate_object_database *alt;
+ /* fsck knows how to handle missing promisor objects */
+ fetch_if_missing = 0;
+
errors_found = 0;
check_replace_refs = 0;
@@ -731,6 +759,8 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
struct object *obj = lookup_object(oid.hash);
if (!obj || !(obj->flags & HAS_OBJ)) {
+ if (is_promisor_object(&oid))
+ continue;
error("%s: object missing", oid_to_hex(&oid));
errors_found |= ERROR_OBJECT;
continue;
diff --git a/builtin/gc.c b/builtin/gc.c
index 3c5eae0..77fa720 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -458,6 +458,9 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
argv_array_push(&prune, prune_expire);
if (quiet)
argv_array_push(&prune, "--no-progress");
+ if (repository_format_partial_clone)
+ argv_array_push(&prune,
+ "--exclude-promisor-objects");
if (run_command_v_opt(prune.argv, RUN_GIT_CMD))
return error(FAILED_RUN, prune.argv[0]);
}
diff --git a/builtin/hash-object.c b/builtin/hash-object.c
index c532ff9..526da5c 100644
--- a/builtin/hash-object.c
+++ b/builtin/hash-object.c
@@ -24,7 +24,8 @@ static int hash_literally(struct object_id *oid, int fd, const char *type, unsig
if (strbuf_read(&buf, fd, 4096) < 0)
ret = -1;
else
- ret = hash_sha1_file_literally(buf.buf, buf.len, type, oid, flags);
+ ret = hash_object_file_literally(buf.buf, buf.len, type, oid,
+ flags);
strbuf_release(&buf);
return ret;
}
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index 8b87090..157bceb 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -91,7 +91,7 @@ static unsigned int input_offset, input_len;
static off_t consumed_bytes;
static off_t max_input_size;
static unsigned deepest_delta;
-static git_SHA_CTX input_ctx;
+static git_hash_ctx input_ctx;
static uint32_t input_crc32;
static int input_fd, output_fd;
static const char *curr_pack;
@@ -253,7 +253,7 @@ static void flush(void)
if (input_offset) {
if (output_fd >= 0)
write_or_die(output_fd, input_buffer, input_offset);
- git_SHA1_Update(&input_ctx, input_buffer, input_offset);
+ the_hash_algo->update_fn(&input_ctx, input_buffer, input_offset);
memmove(input_buffer, input_buffer + input_offset, input_len);
input_offset = 0;
}
@@ -326,7 +326,7 @@ static const char *open_pack_file(const char *pack_name)
output_fd = -1;
nothread_data.pack_fd = input_fd;
}
- git_SHA1_Init(&input_ctx);
+ the_hash_algo->init_fn(&input_ctx);
return pack_name;
}
@@ -437,22 +437,22 @@ static int is_delta_type(enum object_type type)
}
static void *unpack_entry_data(off_t offset, unsigned long size,
- enum object_type type, unsigned char *sha1)
+ enum object_type type, struct object_id *oid)
{
static char fixed_buf[8192];
int status;
git_zstream stream;
void *buf;
- git_SHA_CTX c;
+ git_hash_ctx c;
char hdr[32];
int hdrlen;
if (!is_delta_type(type)) {
hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), size) + 1;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, hdrlen);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
} else
- sha1 = NULL;
+ oid = NULL;
if (type == OBJ_BLOB && size > big_file_threshold)
buf = fixed_buf;
else
@@ -469,8 +469,8 @@ static void *unpack_entry_data(off_t offset, unsigned long size,
stream.avail_in = input_len;
status = git_inflate(&stream, 0);
use(input_len - stream.avail_in);
- if (sha1)
- git_SHA1_Update(&c, last_out, stream.next_out - last_out);
+ if (oid)
+ the_hash_algo->update_fn(&c, last_out, stream.next_out - last_out);
if (buf == fixed_buf) {
stream.next_out = buf;
stream.avail_out = sizeof(fixed_buf);
@@ -479,15 +479,15 @@ static void *unpack_entry_data(off_t offset, unsigned long size,
if (stream.total_out != size || status != Z_STREAM_END)
bad_object(offset, _("inflate returned %d"), status);
git_inflate_end(&stream);
- if (sha1)
- git_SHA1_Final(sha1, &c);
+ if (oid)
+ the_hash_algo->final_fn(oid->hash, &c);
return buf == fixed_buf ? NULL : buf;
}
static void *unpack_raw_entry(struct object_entry *obj,
off_t *ofs_offset,
- unsigned char *ref_sha1,
- unsigned char *sha1)
+ struct object_id *ref_oid,
+ struct object_id *oid)
{
unsigned char *p;
unsigned long size, c;
@@ -515,8 +515,8 @@ static void *unpack_raw_entry(struct object_entry *obj,
switch (obj->type) {
case OBJ_REF_DELTA:
- hashcpy(ref_sha1, fill(20));
- use(20);
+ hashcpy(ref_oid->hash, fill(the_hash_algo->rawsz));
+ use(the_hash_algo->rawsz);
break;
case OBJ_OFS_DELTA:
p = fill(1);
@@ -546,7 +546,7 @@ static void *unpack_raw_entry(struct object_entry *obj,
}
obj->hdr_size = consumed_bytes - obj->idx.offset;
- data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, sha1);
+ data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, oid);
obj->idx.crc32 = input_crc32;
return data;
}
@@ -958,9 +958,8 @@ static void resolve_delta(struct object_entry *delta_obj,
free(delta_data);
if (!result->data)
bad_object(delta_obj->idx.offset, _("failed to apply delta"));
- hash_sha1_file(result->data, result->size,
- type_name(delta_obj->real_type),
- delta_obj->idx.oid.hash);
+ hash_object_file(result->data, result->size,
+ type_name(delta_obj->real_type), &delta_obj->idx.oid);
sha1_object(result->data, NULL, result->size, delta_obj->real_type,
&delta_obj->idx.oid);
counter_lock();
@@ -1119,11 +1118,11 @@ static void *threaded_second_pass(void *data)
* - calculate SHA1 of all non-delta objects;
* - remember base (SHA1 or offset) for all deltas.
*/
-static void parse_pack_objects(unsigned char *sha1)
+static void parse_pack_objects(unsigned char *hash)
{
int i, nr_delays = 0;
struct ofs_delta_entry *ofs_delta = ofs_deltas;
- unsigned char ref_delta_sha1[20];
+ struct object_id ref_delta_oid;
struct stat st;
if (verbose)
@@ -1133,8 +1132,8 @@ static void parse_pack_objects(unsigned char *sha1)
for (i = 0; i < nr_objects; i++) {
struct object_entry *obj = &objects[i];
void *data = unpack_raw_entry(obj, &ofs_delta->offset,
- ref_delta_sha1,
- obj->idx.oid.hash);
+ &ref_delta_oid,
+ &obj->idx.oid);
obj->real_type = obj->type;
if (obj->type == OBJ_OFS_DELTA) {
nr_ofs_deltas++;
@@ -1142,7 +1141,7 @@ static void parse_pack_objects(unsigned char *sha1)
ofs_delta++;
} else if (obj->type == OBJ_REF_DELTA) {
ALLOC_GROW(ref_deltas, nr_ref_deltas + 1, ref_deltas_alloc);
- hashcpy(ref_deltas[nr_ref_deltas].sha1, ref_delta_sha1);
+ hashcpy(ref_deltas[nr_ref_deltas].sha1, ref_delta_oid.hash);
ref_deltas[nr_ref_deltas].obj_no = i;
nr_ref_deltas++;
} else if (!data) {
@@ -1160,10 +1159,10 @@ static void parse_pack_objects(unsigned char *sha1)
/* Check pack integrity */
flush();
- git_SHA1_Final(sha1, &input_ctx);
- if (hashcmp(fill(20), sha1))
+ the_hash_algo->final_fn(hash, &input_ctx);
+ if (hashcmp(fill(the_hash_algo->rawsz), hash))
die(_("pack is corrupted (SHA1 mismatch)"));
- use(20);
+ use(the_hash_algo->rawsz);
/* If input_fd is a file, we should have reached its end now. */
if (fstat(input_fd, &st))
@@ -1239,21 +1238,21 @@ static void resolve_deltas(void)
/*
* Third pass:
* - append objects to convert thin pack to full pack if required
- * - write the final 20-byte SHA-1
+ * - write the final pack hash
*/
-static void fix_unresolved_deltas(struct sha1file *f);
-static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_sha1)
+static void fix_unresolved_deltas(struct hashfile *f);
+static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_hash)
{
if (nr_ref_deltas + nr_ofs_deltas == nr_resolved_deltas) {
stop_progress(&progress);
- /* Flush remaining pack final 20-byte SHA1. */
+ /* Flush remaining pack final hash. */
flush();
return;
}
if (fix_thin_pack) {
- struct sha1file *f;
- unsigned char read_sha1[20], tail_sha1[20];
+ struct hashfile *f;
+ unsigned char read_hash[GIT_MAX_RAWSZ], tail_hash[GIT_MAX_RAWSZ];
struct strbuf msg = STRBUF_INIT;
int nr_unresolved = nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas;
int nr_objects_initial = nr_objects;
@@ -1262,7 +1261,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha
REALLOC_ARRAY(objects, nr_objects + nr_unresolved + 1);
memset(objects + nr_objects + 1, 0,
nr_unresolved * sizeof(*objects));
- f = sha1fd(output_fd, curr_pack);
+ f = hashfd(output_fd, curr_pack);
fix_unresolved_deltas(f);
strbuf_addf(&msg, Q_("completed with %d local object",
"completed with %d local objects",
@@ -1270,12 +1269,12 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha
nr_objects - nr_objects_initial);
stop_progress_msg(&progress, msg.buf);
strbuf_release(&msg);
- sha1close(f, tail_sha1, 0);
- hashcpy(read_sha1, pack_sha1);
- fixup_pack_header_footer(output_fd, pack_sha1,
+ finalize_hashfile(f, tail_hash, 0);
+ hashcpy(read_hash, pack_hash);
+ fixup_pack_header_footer(output_fd, pack_hash,
curr_pack, nr_objects,
- read_sha1, consumed_bytes-20);
- if (hashcmp(read_sha1, tail_sha1) != 0)
+ read_hash, consumed_bytes-the_hash_algo->rawsz);
+ if (hashcmp(read_hash, tail_hash) != 0)
die(_("Unexpected tail checksum for %s "
"(disk corruption?)"), curr_pack);
}
@@ -1286,7 +1285,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha
nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas);
}
-static int write_compressed(struct sha1file *f, void *in, unsigned int size)
+static int write_compressed(struct hashfile *f, void *in, unsigned int size)
{
git_zstream stream;
int status;
@@ -1300,7 +1299,7 @@ static int write_compressed(struct sha1file *f, void *in, unsigned int size)
stream.next_out = outbuf;
stream.avail_out = sizeof(outbuf);
status = git_deflate(&stream, Z_FINISH);
- sha1write(f, outbuf, sizeof(outbuf) - stream.avail_out);
+ hashwrite(f, outbuf, sizeof(outbuf) - stream.avail_out);
} while (status == Z_OK);
if (status != Z_STREAM_END)
@@ -1310,7 +1309,7 @@ static int write_compressed(struct sha1file *f, void *in, unsigned int size)
return size;
}
-static struct object_entry *append_obj_to_pack(struct sha1file *f,
+static struct object_entry *append_obj_to_pack(struct hashfile *f,
const unsigned char *sha1, void *buf,
unsigned long size, enum object_type type)
{
@@ -1327,7 +1326,7 @@ static struct object_entry *append_obj_to_pack(struct sha1file *f,
}
header[n++] = c;
crc32_begin(f);
- sha1write(f, header, n);
+ hashwrite(f, header, n);
obj[0].size = size;
obj[0].hdr_size = n;
obj[0].type = type;
@@ -1335,7 +1334,7 @@ static struct object_entry *append_obj_to_pack(struct sha1file *f,
obj[1].idx.offset = obj[0].idx.offset + n;
obj[1].idx.offset += write_compressed(f, buf, size);
obj[0].idx.crc32 = crc32_end(f);
- sha1flush(f);
+ hashflush(f);
hashcpy(obj->idx.oid.hash, sha1);
return obj;
}
@@ -1347,7 +1346,7 @@ static int delta_pos_compare(const void *_a, const void *_b)
return a->obj_no - b->obj_no;
}
-static void fix_unresolved_deltas(struct sha1file *f)
+static void fix_unresolved_deltas(struct hashfile *f)
{
struct ref_delta_entry **sorted_by_pos;
int i;
@@ -1389,15 +1388,60 @@ static void fix_unresolved_deltas(struct sha1file *f)
free(sorted_by_pos);
}
+static const char *derive_filename(const char *pack_name, const char *suffix,
+ struct strbuf *buf)
+{
+ size_t len;
+ if (!strip_suffix(pack_name, ".pack", &len))
+ die(_("packfile name '%s' does not end with '.pack'"),
+ pack_name);
+ strbuf_add(buf, pack_name, len);
+ strbuf_addch(buf, '.');
+ strbuf_addstr(buf, suffix);
+ return buf->buf;
+}
+
+static void write_special_file(const char *suffix, const char *msg,
+ const char *pack_name, const unsigned char *hash,
+ const char **report)
+{
+ struct strbuf name_buf = STRBUF_INIT;
+ const char *filename;
+ int fd;
+ int msg_len = strlen(msg);
+
+ if (pack_name)
+ filename = derive_filename(pack_name, suffix, &name_buf);
+ else
+ filename = odb_pack_name(&name_buf, hash, suffix);
+
+ fd = odb_pack_keep(filename);
+ if (fd < 0) {
+ if (errno != EEXIST)
+ die_errno(_("cannot write %s file '%s'"),
+ suffix, filename);
+ } else {
+ if (msg_len > 0) {
+ write_or_die(fd, msg, msg_len);
+ write_or_die(fd, "\n", 1);
+ }
+ if (close(fd) != 0)
+ die_errno(_("cannot close written %s file '%s'"),
+ suffix, filename);
+ if (report)
+ *report = suffix;
+ }
+ strbuf_release(&name_buf);
+}
+
static void final(const char *final_pack_name, const char *curr_pack_name,
const char *final_index_name, const char *curr_index_name,
- const char *keep_name, const char *keep_msg,
- unsigned char *sha1)
+ const char *keep_msg, const char *promisor_msg,
+ unsigned char *hash)
{
const char *report = "pack";
struct strbuf pack_name = STRBUF_INIT;
struct strbuf index_name = STRBUF_INIT;
- struct strbuf keep_name_buf = STRBUF_INIT;
int err;
if (!from_stdin) {
@@ -1409,32 +1453,16 @@ static void final(const char *final_pack_name, const char *curr_pack_name,
die_errno(_("error while closing pack file"));
}
- if (keep_msg) {
- int keep_fd, keep_msg_len = strlen(keep_msg);
-
- if (!keep_name)
- keep_name = odb_pack_name(&keep_name_buf, sha1, "keep");
-
- keep_fd = odb_pack_keep(keep_name);
- if (keep_fd < 0) {
- if (errno != EEXIST)
- die_errno(_("cannot write keep file '%s'"),
- keep_name);
- } else {
- if (keep_msg_len > 0) {
- write_or_die(keep_fd, keep_msg, keep_msg_len);
- write_or_die(keep_fd, "\n", 1);
- }
- if (close(keep_fd) != 0)
- die_errno(_("cannot close written keep file '%s'"),
- keep_name);
- report = "keep";
- }
- }
+ if (keep_msg)
+ write_special_file("keep", keep_msg, final_pack_name, hash,
+ &report);
+ if (promisor_msg)
+ write_special_file("promisor", promisor_msg, final_pack_name,
+ hash, NULL);
if (final_pack_name != curr_pack_name) {
if (!final_pack_name)
- final_pack_name = odb_pack_name(&pack_name, sha1, "pack");
+ final_pack_name = odb_pack_name(&pack_name, hash, "pack");
if (finalize_object_file(curr_pack_name, final_pack_name))
die(_("cannot store pack file"));
} else if (from_stdin)
@@ -1442,18 +1470,18 @@ static void final(const char *final_pack_name, const char *curr_pack_name,
if (final_index_name != curr_index_name) {
if (!final_index_name)
- final_index_name = odb_pack_name(&index_name, sha1, "idx");
+ final_index_name = odb_pack_name(&index_name, hash, "idx");
if (finalize_object_file(curr_index_name, final_index_name))
die(_("cannot store index file"));
} else
chmod(final_index_name, 0444);
if (!from_stdin) {
- printf("%s\n", sha1_to_hex(sha1));
+ printf("%s\n", sha1_to_hex(hash));
} else {
struct strbuf buf = STRBUF_INIT;
- strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(sha1));
+ strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(hash));
write_or_die(1, buf.buf, buf.len);
strbuf_release(&buf);
@@ -1472,7 +1500,6 @@ static void final(const char *final_pack_name, const char *curr_pack_name,
strbuf_release(&index_name);
strbuf_release(&pack_name);
- strbuf_release(&keep_name_buf);
}
static int git_index_pack_config(const char *k, const char *v, void *cb)
@@ -1615,32 +1642,26 @@ static void show_pack_info(int stat_only)
}
}
-static const char *derive_filename(const char *pack_name, const char *suffix,
- struct strbuf *buf)
-{
- size_t len;
- if (!strip_suffix(pack_name, ".pack", &len))
- die(_("packfile name '%s' does not end with '.pack'"),
- pack_name);
- strbuf_add(buf, pack_name, len);
- strbuf_addstr(buf, suffix);
- return buf->buf;
-}
-
int cmd_index_pack(int argc, const char **argv, const char *prefix)
{
int i, fix_thin_pack = 0, verify = 0, stat_only = 0;
const char *curr_index;
const char *index_name = NULL, *pack_name = NULL;
- const char *keep_name = NULL, *keep_msg = NULL;
- struct strbuf index_name_buf = STRBUF_INIT,
- keep_name_buf = STRBUF_INIT;
+ const char *keep_msg = NULL;
+ const char *promisor_msg = NULL;
+ struct strbuf index_name_buf = STRBUF_INIT;
struct pack_idx_entry **idx_objects;
struct pack_idx_option opts;
- unsigned char pack_sha1[20];
+ unsigned char pack_hash[GIT_MAX_RAWSZ];
unsigned foreign_nr = 1; /* zero is a "good" value, assume bad */
int report_end_of_input = 0;
+ /*
+ * index-pack never needs to fetch missing objects, since it only
+ * accesses the repo to do hash collision checks
+ */
+ fetch_if_missing = 0;
+
if (argc == 2 && !strcmp(argv[1], "-h"))
usage(index_pack_usage);
@@ -1678,6 +1699,8 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
stat_only = 1;
} else if (skip_to_optional_arg(arg, "--keep", &keep_msg)) {
; /* nothing to do */
+ } else if (skip_to_optional_arg(arg, "--promisor", &promisor_msg)) {
+ ; /* already parsed */
} else if (starts_with(arg, "--threads=")) {
char *end;
nr_threads = strtoul(arg+10, &end, 0);
@@ -1740,9 +1763,7 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
if (from_stdin && !startup_info->have_repository)
die(_("--stdin requires a git repository"));
if (!index_name && pack_name)
- index_name = derive_filename(pack_name, ".idx", &index_name_buf);
- if (keep_msg && !keep_name && pack_name)
- keep_name = derive_filename(pack_name, ".keep", &keep_name_buf);
+ index_name = derive_filename(pack_name, "idx", &index_name_buf);
if (verify) {
if (!index_name)
@@ -1768,11 +1789,11 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
if (show_stat)
obj_stat = xcalloc(st_add(nr_objects, 1), sizeof(struct object_stat));
ofs_deltas = xcalloc(nr_objects, sizeof(struct ofs_delta_entry));
- parse_pack_objects(pack_sha1);
+ parse_pack_objects(pack_hash);
if (report_end_of_input)
write_in_full(2, "\0", 1);
resolve_deltas();
- conclude_pack(fix_thin_pack, curr_pack, pack_sha1);
+ conclude_pack(fix_thin_pack, curr_pack, pack_hash);
free(ofs_deltas);
free(ref_deltas);
if (strict)
@@ -1784,19 +1805,18 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
ALLOC_ARRAY(idx_objects, nr_objects);
for (i = 0; i < nr_objects; i++)
idx_objects[i] = &objects[i].idx;
- curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_sha1);
+ curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_hash);
free(idx_objects);
if (!verify)
final(pack_name, curr_pack,
index_name, curr_index,
- keep_name, keep_msg,
- pack_sha1);
+ keep_msg, promisor_msg,
+ pack_hash);
else
close(input_fd);
free(objects);
strbuf_release(&index_name_buf);
- strbuf_release(&keep_name_buf);
if (pack_name == NULL)
free((void *) curr_pack);
if (index_name == NULL)
diff --git a/builtin/merge.c b/builtin/merge.c
index 9dec140..5e5e449 100644
--- a/builtin/merge.c
+++ b/builtin/merge.c
@@ -820,8 +820,8 @@ static int merge_trivial(struct commit *head, struct commit_list *remoteheads)
pptr = commit_list_append(head, pptr);
pptr = commit_list_append(remoteheads->item, pptr);
prepare_to_commit(remoteheads);
- if (commit_tree(merge_msg.buf, merge_msg.len, result_tree.hash, parents,
- result_commit.hash, NULL, sign_commit))
+ if (commit_tree(merge_msg.buf, merge_msg.len, &result_tree, parents,
+ &result_commit, NULL, sign_commit))
die(_("failed to write commit object"));
finish(head, remoteheads, &result_commit, "In-index merge");
drop_save();
@@ -845,8 +845,8 @@ static int finish_automerge(struct commit *head,
commit_list_insert(head, &parents);
strbuf_addch(&merge_msg, '\n');
prepare_to_commit(remoteheads);
- if (commit_tree(merge_msg.buf, merge_msg.len, result_tree->hash, parents,
- result_commit.hash, NULL, sign_commit))
+ if (commit_tree(merge_msg.buf, merge_msg.len, result_tree, parents,
+ &result_commit, NULL, sign_commit))
die(_("failed to write commit object"));
strbuf_addf(&buf, "Merge made by the '%s' strategy.", wt_strategy);
finish(head, remoteheads, &result_commit, buf.buf);
diff --git a/builtin/mktag.c b/builtin/mktag.c
index 031b750..beb5528 100644
--- a/builtin/mktag.c
+++ b/builtin/mktag.c
@@ -151,7 +151,7 @@ static int verify_tag(char *buffer, unsigned long size)
int cmd_mktag(int argc, const char **argv, const char *prefix)
{
struct strbuf buf = STRBUF_INIT;
- unsigned char result_sha1[20];
+ struct object_id result;
if (argc != 1)
usage("git mktag");
@@ -165,10 +165,10 @@ int cmd_mktag(int argc, const char **argv, const char *prefix)
if (verify_tag(buf.buf, buf.len) < 0)
die("invalid tag signature file");
- if (write_sha1_file(buf.buf, buf.len, tag_type, result_sha1) < 0)
+ if (write_object_file(buf.buf, buf.len, tag_type, &result) < 0)
die("unable to write tag file");
strbuf_release(&buf);
- printf("%s\n", sha1_to_hex(result_sha1));
+ printf("%s\n", oid_to_hex(&result));
return 0;
}
diff --git a/builtin/mktree.c b/builtin/mktree.c
index b7b1cb5..f5f3c0e 100644
--- a/builtin/mktree.c
+++ b/builtin/mktree.c
@@ -40,7 +40,7 @@ static int ent_compare(const void *a_, const void *b_)
b->name, b->len, b->mode);
}
-static void write_tree(unsigned char *sha1)
+static void write_tree(struct object_id *oid)
{
struct strbuf buf;
size_t size;
@@ -57,7 +57,7 @@ static void write_tree(unsigned char *sha1)
strbuf_add(&buf, ent->sha1, 20);
}
- write_sha1_file(buf.buf, buf.len, tree_type, sha1);
+ write_object_file(buf.buf, buf.len, tree_type, oid);
strbuf_release(&buf);
}
@@ -142,7 +142,7 @@ static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_miss
int cmd_mktree(int ac, const char **av, const char *prefix)
{
struct strbuf sb = STRBUF_INIT;
- unsigned char sha1[20];
+ struct object_id oid;
int nul_term_line = 0;
int allow_missing = 0;
int is_batch_mode = 0;
@@ -181,8 +181,8 @@ int cmd_mktree(int ac, const char **av, const char *prefix)
*/
; /* skip creating an empty tree */
} else {
- write_tree(sha1);
- puts(sha1_to_hex(sha1));
+ write_tree(&oid);
+ puts(oid_to_hex(&oid));
fflush(stdout);
}
used=0; /* reset tree entry buffer for re-use in batch mode */
diff --git a/builtin/notes.c b/builtin/notes.c
index 7c81761..39304ba 100644
--- a/builtin/notes.c
+++ b/builtin/notes.c
@@ -198,9 +198,9 @@ static void prepare_note_data(const struct object_id *object, struct note_data *
}
}
-static void write_note_data(struct note_data *d, unsigned char *sha1)
+static void write_note_data(struct note_data *d, struct object_id *oid)
{
- if (write_sha1_file(d->buf.buf, d->buf.len, blob_type, sha1)) {
+ if (write_object_file(d->buf.buf, d->buf.len, blob_type, oid)) {
error(_("unable to write note object"));
if (d->edit_path)
error(_("the note contents have been left in %s"),
@@ -459,7 +459,7 @@ static int add(int argc, const char **argv, const char *prefix)
prepare_note_data(&object, &d, note ? note->hash : NULL);
if (d.buf.len || allow_empty) {
- write_note_data(&d, new_note.hash);
+ write_note_data(&d, &new_note);
if (add_note(t, &object, &new_note, combine_notes_overwrite))
die("BUG: combine_notes_overwrite failed");
commit_notes(t, "Notes added by 'git notes add'");
@@ -619,7 +619,7 @@ static int append_edit(int argc, const char **argv, const char *prefix)
}
if (d.buf.len || allow_empty) {
- write_note_data(&d, new_note.hash);
+ write_note_data(&d, &new_note);
if (add_note(t, &object, &new_note, combine_notes_overwrite))
die("BUG: combine_notes_overwrite failed");
logmsg = xstrfmt("Notes added by 'git notes %s'", argv[0]);
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index bfda860..2b15afd 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -26,7 +26,7 @@
#include "reachable.h"
#include "sha1-array.h"
#include "argv-array.h"
-#include "mru.h"
+#include "list.h"
#include "packfile.h"
static const char *pack_usage[] = {
@@ -75,6 +75,8 @@ static int use_bitmap_index = -1;
static int write_bitmap_index;
static uint16_t write_bitmap_options;
+static int exclude_promisor_objects;
+
static unsigned long delta_cache_size = 0;
static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
static unsigned long cache_max_small_delta_size = 1000;
@@ -84,8 +86,9 @@ static unsigned long window_memory_limit = 0;
static struct list_objects_filter_options filter_options;
enum missing_action {
- MA_ERROR = 0, /* fail if any missing objects are encountered */
- MA_ALLOW_ANY, /* silently allow ALL missing objects */
+ MA_ERROR = 0, /* fail if any missing objects are encountered */
+ MA_ALLOW_ANY, /* silently allow ALL missing objects */
+ MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */
};
static enum missing_action arg_missing_action;
static show_object_fn fn_show_object;
@@ -161,7 +164,7 @@ static unsigned long do_compress(void **pptr, unsigned long size)
return stream.total_out;
}
-static unsigned long write_large_blob_data(struct git_istream *st, struct sha1file *f,
+static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f,
const struct object_id *oid)
{
git_zstream stream;
@@ -185,7 +188,7 @@ static unsigned long write_large_blob_data(struct git_istream *st, struct sha1fi
stream.next_out = obuf;
stream.avail_out = sizeof(obuf);
zret = git_deflate(&stream, readlen ? 0 : Z_FINISH);
- sha1write(f, obuf, stream.next_out - obuf);
+ hashwrite(f, obuf, stream.next_out - obuf);
olen += stream.next_out - obuf;
}
if (stream.avail_in)
@@ -230,7 +233,7 @@ static int check_pack_inflate(struct packed_git *p,
stream.total_in == len) ? 0 : -1;
}
-static void copy_pack_data(struct sha1file *f,
+static void copy_pack_data(struct hashfile *f,
struct packed_git *p,
struct pack_window **w_curs,
off_t offset,
@@ -243,14 +246,14 @@ static void copy_pack_data(struct sha1file *f,
in = use_pack(p, w_curs, offset, &avail);
if (avail > len)
avail = (unsigned long)len;
- sha1write(f, in, avail);
+ hashwrite(f, in, avail);
offset += avail;
len -= avail;
}
}
/* Return 0 if we will bust the pack-size limit */
-static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry,
+static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry,
unsigned long limit, int usable_delta)
{
unsigned long size, datalen;
@@ -323,8 +326,8 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
free(buf);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, dheader + pos, sizeof(dheader) - pos);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, dheader + pos, sizeof(dheader) - pos);
hdrlen += sizeof(dheader) - pos;
} else if (type == OBJ_REF_DELTA) {
/*
@@ -337,8 +340,8 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
free(buf);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, entry->delta->idx.oid.hash, 20);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, entry->delta->idx.oid.hash, 20);
hdrlen += 20;
} else {
if (limit && hdrlen + datalen + 20 >= limit) {
@@ -347,13 +350,13 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
free(buf);
return 0;
}
- sha1write(f, header, hdrlen);
+ hashwrite(f, header, hdrlen);
}
if (st) {
datalen = write_large_blob_data(st, f, &entry->idx.oid);
close_istream(st);
} else {
- sha1write(f, buf, datalen);
+ hashwrite(f, buf, datalen);
free(buf);
}
@@ -361,7 +364,7 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
}
/* Return 0 if we will bust the pack-size limit */
-static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
+static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
unsigned long limit, int usable_delta)
{
struct packed_git *p = entry->in_pack;
@@ -412,8 +415,8 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
unuse_pack(&w_curs);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, dheader + pos, sizeof(dheader) - pos);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, dheader + pos, sizeof(dheader) - pos);
hdrlen += sizeof(dheader) - pos;
reused_delta++;
} else if (type == OBJ_REF_DELTA) {
@@ -421,8 +424,8 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
unuse_pack(&w_curs);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, entry->delta->idx.oid.hash, 20);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, entry->delta->idx.oid.hash, 20);
hdrlen += 20;
reused_delta++;
} else {
@@ -430,7 +433,7 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
unuse_pack(&w_curs);
return 0;
}
- sha1write(f, header, hdrlen);
+ hashwrite(f, header, hdrlen);
}
copy_pack_data(f, p, &w_curs, offset, datalen);
unuse_pack(&w_curs);
@@ -439,7 +442,7 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
}
/* Return 0 if we will bust the pack-size limit */
-static off_t write_object(struct sha1file *f,
+static off_t write_object(struct hashfile *f,
struct object_entry *entry,
off_t write_offset)
{
@@ -512,7 +515,7 @@ enum write_one_status {
WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
};
-static enum write_one_status write_one(struct sha1file *f,
+static enum write_one_status write_one(struct hashfile *f,
struct object_entry *e,
off_t *offset)
{
@@ -731,7 +734,7 @@ static struct object_entry **compute_write_order(void)
return wo;
}
-static off_t write_reused_pack(struct sha1file *f)
+static off_t write_reused_pack(struct hashfile *f)
{
unsigned char buffer[8192];
off_t to_write, total;
@@ -762,7 +765,7 @@ static off_t write_reused_pack(struct sha1file *f)
if (read_pack > to_write)
read_pack = to_write;
- sha1write(f, buffer, read_pack);
+ hashwrite(f, buffer, read_pack);
to_write -= read_pack;
/*
@@ -791,7 +794,7 @@ static const char no_split_warning[] = N_(
static void write_pack_file(void)
{
uint32_t i = 0, j;
- struct sha1file *f;
+ struct hashfile *f;
off_t offset;
uint32_t nr_remaining = nr_result;
time_t last_mtime = 0;
@@ -807,7 +810,7 @@ static void write_pack_file(void)
char *pack_tmp_name = NULL;
if (pack_to_stdout)
- f = sha1fd_throughput(1, "<stdout>", progress_state);
+ f = hashfd_throughput(1, "<stdout>", progress_state);
else
f = create_tmp_packfile(&pack_tmp_name);
@@ -834,11 +837,11 @@ static void write_pack_file(void)
* If so, rewrite it like in fast-import
*/
if (pack_to_stdout) {
- sha1close(f, oid.hash, CSUM_CLOSE);
+ finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE);
} else if (nr_written == nr_remaining) {
- sha1close(f, oid.hash, CSUM_FSYNC);
+ finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
} else {
- int fd = sha1close(f, oid.hash, 0);
+ int fd = finalize_hashfile(f, oid.hash, 0);
fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
nr_written, oid.hash, offset);
close(fd);
@@ -1006,8 +1009,8 @@ static int want_object_in_pack(const struct object_id *oid,
struct packed_git **found_pack,
off_t *found_offset)
{
- struct mru_entry *entry;
int want;
+ struct list_head *pos;
if (!exclude && local && has_loose_object_nonlocal(oid->hash))
return 0;
@@ -1023,8 +1026,8 @@ static int want_object_in_pack(const struct object_id *oid,
return want;
}
- for (entry = packed_git_mru.head; entry; entry = entry->next) {
- struct packed_git *p = entry->item;
+ list_for_each(pos, &packed_git_mru) {
+ struct packed_git *p = list_entry(pos, struct packed_git, mru);
off_t offset;
if (p == *found_pack)
@@ -1041,7 +1044,7 @@ static int want_object_in_pack(const struct object_id *oid,
}
want = want_found_object(exclude, p);
if (!exclude && want > 0)
- mru_mark(&packed_git_mru, entry);
+ list_move(&p->mru, &packed_git_mru);
if (want != -1)
return want;
}
@@ -2578,6 +2581,20 @@ static void show_object__ma_allow_any(struct object *obj, const char *name, void
show_object(obj, name, data);
}
+static void show_object__ma_allow_promisor(struct object *obj, const char *name, void *data)
+{
+ assert(arg_missing_action == MA_ALLOW_PROMISOR);
+
+ /*
+ * Quietly ignore EXPECTED missing objects. This avoids problems with
+ * staging them now and getting an odd error later.
+ */
+ if (!has_object_file(&obj->oid) && is_promisor_object(&obj->oid))
+ return;
+
+ show_object(obj, name, data);
+}
+
static int option_parse_missing_action(const struct option *opt,
const char *arg, int unset)
{
@@ -2592,10 +2609,18 @@ static int option_parse_missing_action(const struct option *opt,
if (!strcmp(arg, "allow-any")) {
arg_missing_action = MA_ALLOW_ANY;
+ fetch_if_missing = 0;
fn_show_object = show_object__ma_allow_any;
return 0;
}
+ if (!strcmp(arg, "allow-promisor")) {
+ arg_missing_action = MA_ALLOW_PROMISOR;
+ fetch_if_missing = 0;
+ fn_show_object = show_object__ma_allow_promisor;
+ return 0;
+ }
+
die(_("invalid value for --missing"));
return 0;
}
@@ -2768,7 +2793,7 @@ static void loosen_unused_packed_objects(struct rev_info *revs)
if (!packlist_find(&to_pack, oid.hash, NULL) &&
!has_sha1_pack_kept_or_nonlocal(&oid) &&
!loosened_object_can_be_discarded(&oid, p->mtime))
- if (force_object_loose(oid.hash, p->mtime))
+ if (force_object_loose(&oid, p->mtime))
die("unable to force loose object");
}
}
@@ -3009,6 +3034,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
{ OPTION_CALLBACK, 0, "missing", NULL, N_("action"),
N_("handling for missing objects"), PARSE_OPT_NONEG,
option_parse_missing_action },
+ OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
+ N_("do not pack objects in promisor packfiles")),
OPT_END(),
};
@@ -3054,6 +3081,12 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
argv_array_push(&rp, "--unpacked");
}
+ if (exclude_promisor_objects) {
+ use_internal_rev_list = 1;
+ fetch_if_missing = 0;
+ argv_array_push(&rp, "--exclude-promisor-objects");
+ }
+
if (!reuse_object)
reuse_delta = 0;
if (pack_compression_level == -1)
diff --git a/builtin/prune.c b/builtin/prune.c
index c175f15..4394d01 100644
--- a/builtin/prune.c
+++ b/builtin/prune.c
@@ -101,12 +101,15 @@ int cmd_prune(int argc, const char **argv, const char *prefix)
{
struct rev_info revs;
struct progress *progress = NULL;
+ int exclude_promisor_objects = 0;
const struct option options[] = {
OPT__DRY_RUN(&show_only, N_("do not remove, show only")),
OPT__VERBOSE(&verbose, N_("report pruned objects")),
OPT_BOOL(0, "progress", &show_progress, N_("show progress")),
OPT_EXPIRY_DATE(0, "expire", &expire,
N_("expire objects older than <time>")),
+ OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
+ N_("limit traversal to objects outside promisor packfiles")),
OPT_END()
};
char *s;
@@ -139,6 +142,10 @@ int cmd_prune(int argc, const char **argv, const char *prefix)
show_progress = isatty(2);
if (show_progress)
progress = start_delayed_progress(_("Checking connectivity"), 0);
+ if (exclude_promisor_objects) {
+ fetch_if_missing = 0;
+ revs.exclude_promisor_objects = 1;
+ }
mark_reachable_objects(&revs, 1, expire, progress);
stop_progress(&progress);
diff --git a/builtin/pull.c b/builtin/pull.c
index 511dbbe..1876271 100644
--- a/builtin/pull.c
+++ b/builtin/pull.c
@@ -574,6 +574,7 @@ static int rebase_submodules(void)
cp.no_stdin = 1;
argv_array_pushl(&cp.args, "submodule", "update",
"--recursive", "--rebase", NULL);
+ argv_push_verbosity(&cp.args);
return run_command(&cp);
}
@@ -586,6 +587,7 @@ static int update_submodules(void)
cp.no_stdin = 1;
argv_array_pushl(&cp.args, "submodule", "update",
"--recursive", "--checkout", NULL);
+ argv_push_verbosity(&cp.args);
return run_command(&cp);
}
diff --git a/builtin/rebase--helper.c b/builtin/rebase--helper.c
index 7daee54..00faf14 100644
--- a/builtin/rebase--helper.c
+++ b/builtin/rebase--helper.c
@@ -43,7 +43,7 @@ int cmd_rebase__helper(int argc, const char **argv, const char *prefix)
OPT_END()
};
- git_config(git_default_config, NULL);
+ sequencer_init_config(&opts);
git_config_get_bool("rebase.abbreviatecommands", &abbreviate_commands);
opts.action = REPLAY_INTERACTIVE_REBASE;
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
index b7ce7c7..75e7f18 100644
--- a/builtin/receive-pack.c
+++ b/builtin/receive-pack.c
@@ -69,7 +69,7 @@ static int sent_capabilities;
static int shallow_update;
static const char *alt_shallow_file;
static struct strbuf push_cert = STRBUF_INIT;
-static unsigned char push_cert_sha1[20];
+static struct object_id push_cert_oid;
static struct signature_check sigcheck;
static const char *push_cert_nonce;
static const char *cert_nonce_seed;
@@ -633,8 +633,9 @@ static void prepare_push_cert_sha1(struct child_process *proc)
int bogs /* beginning_of_gpg_sig */;
already_done = 1;
- if (write_sha1_file(push_cert.buf, push_cert.len, "blob", push_cert_sha1))
- hashclr(push_cert_sha1);
+ if (write_object_file(push_cert.buf, push_cert.len, "blob",
+ &push_cert_oid))
+ oidclr(&push_cert_oid);
memset(&sigcheck, '\0', sizeof(sigcheck));
sigcheck.result = 'N';
@@ -655,9 +656,9 @@ static void prepare_push_cert_sha1(struct child_process *proc)
strbuf_release(&gpg_status);
nonce_status = check_nonce(push_cert.buf, bogs);
}
- if (!is_null_sha1(push_cert_sha1)) {
+ if (!is_null_oid(&push_cert_oid)) {
argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT=%s",
- sha1_to_hex(push_cert_sha1));
+ oid_to_hex(&push_cert_oid));
argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT_SIGNER=%s",
sigcheck.signer ? sigcheck.signer : "");
argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT_KEY=%s",
diff --git a/builtin/repack.c b/builtin/repack.c
index f17a68a..7bdb401 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -83,7 +83,8 @@ static void remove_pack_on_signal(int signo)
/*
* Adds all packs hex strings to the fname list, which do not
- * have a corresponding .keep file.
+ * have a corresponding .keep or .promisor file. These packs are not to
+ * be kept if we are going to pack everything into one file.
*/
static void get_non_kept_pack_filenames(struct string_list *fname_list)
{
@@ -101,7 +102,8 @@ static void get_non_kept_pack_filenames(struct string_list *fname_list)
fname = xmemdupz(e->d_name, len);
- if (!file_exists(mkpath("%s/%s.keep", packdir, fname)))
+ if (!file_exists(mkpath("%s/%s.keep", packdir, fname)) &&
+ !file_exists(mkpath("%s/%s.promisor", packdir, fname)))
string_list_append_nodup(fname_list, fname);
else
free(fname);
@@ -232,6 +234,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
argv_array_push(&cmd.args, "--all");
argv_array_push(&cmd.args, "--reflog");
argv_array_push(&cmd.args, "--indexed-objects");
+ if (repository_format_partial_clone)
+ argv_array_push(&cmd.args, "--exclude-promisor-objects");
if (window)
argv_array_pushf(&cmd.args, "--window=%s", window);
if (window_memory)
diff --git a/builtin/replace.c b/builtin/replace.c
index 303ca13..9f01f3f 100644
--- a/builtin/replace.c
+++ b/builtin/replace.c
@@ -355,7 +355,7 @@ static void check_one_mergetag(struct commit *commit,
struct tag *tag;
int i;
- hash_sha1_file(extra->value, extra->len, type_name(OBJ_TAG), tag_oid.hash);
+ hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &tag_oid);
tag = lookup_tag(&tag_oid);
if (!tag)
die(_("bad mergetag in commit '%s'"), ref);
@@ -410,7 +410,7 @@ static int create_graft(int argc, const char **argv, int force)
check_mergetags(commit, argc, argv);
- if (write_sha1_file(buf.buf, buf.len, commit_type, new_oid.hash))
+ if (write_object_file(buf.buf, buf.len, commit_type, &new_oid))
die(_("could not write replacement commit for: '%s'"), old_ref);
strbuf_release(&buf);
diff --git a/builtin/reset.c b/builtin/reset.c
index e15f595..5da0f75 100644
--- a/builtin/reset.c
+++ b/builtin/reset.c
@@ -106,24 +106,16 @@ out:
static void print_new_head_line(struct commit *commit)
{
- const char *hex, *body;
- const char *msg;
-
- hex = find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV);
- printf(_("HEAD is now at %s"), hex);
- msg = logmsg_reencode(commit, NULL, get_log_output_encoding());
- body = strstr(msg, "\n\n");
- if (body) {
- const char *eol;
- size_t len;
- body = skip_blank_lines(body + 2);
- eol = strchr(body, '\n');
- len = eol ? eol - body : strlen(body);
- printf(" %.*s\n", (int) len, body);
- }
- else
- printf("\n");
- unuse_commit_buffer(commit, msg);
+ struct strbuf buf = STRBUF_INIT;
+
+ printf(_("HEAD is now at %s"),
+ find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV));
+
+ pp_commit_easy(CMIT_FMT_ONELINE, commit, &buf);
+ if (buf.len > 0)
+ printf(" %s", buf.buf);
+ putchar('\n');
+ strbuf_release(&buf);
}
static void update_index_from_diff(struct diff_queue_struct *q,
diff --git a/builtin/rev-list.c b/builtin/rev-list.c
index d5345b6..d320b6f 100644
--- a/builtin/rev-list.c
+++ b/builtin/rev-list.c
@@ -15,6 +15,7 @@
#include "progress.h"
#include "reflog-walk.h"
#include "oidset.h"
+#include "packfile.h"
static const char rev_list_usage[] =
"git rev-list [OPTION] <commit-id>... [ -- paths... ]\n"
@@ -67,6 +68,7 @@ enum missing_action {
MA_ERROR = 0, /* fail if any missing objects are encountered */
MA_ALLOW_ANY, /* silently allow ALL missing objects */
MA_PRINT, /* print ALL missing objects in special section */
+ MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */
};
static enum missing_action arg_missing_action;
@@ -132,7 +134,7 @@ static void show_commit(struct commit *commit, void *data)
else
putchar('\n');
- if (revs->verbose_header && get_cached_commit_buffer(commit, NULL)) {
+ if (revs->verbose_header) {
struct strbuf buf = STRBUF_INIT;
struct pretty_print_context ctx = {0};
ctx.abbrev = revs->abbrev;
@@ -197,6 +199,12 @@ static void finish_commit(struct commit *commit, void *data)
static inline void finish_object__ma(struct object *obj)
{
+ /*
+ * Whether or not we try to dynamically fetch missing objects
+ * from the server, we currently DO NOT have the object. We
+ * can either print, allow (ignore), or conditionally allow
+ * (ignore) them.
+ */
switch (arg_missing_action) {
case MA_ERROR:
die("missing blob object '%s'", oid_to_hex(&obj->oid));
@@ -209,25 +217,36 @@ static inline void finish_object__ma(struct object *obj)
oidset_insert(&missing_objects, &obj->oid);
return;
+ case MA_ALLOW_PROMISOR:
+ if (is_promisor_object(&obj->oid))
+ return;
+ die("unexpected missing blob object '%s'",
+ oid_to_hex(&obj->oid));
+ return;
+
default:
BUG("unhandled missing_action");
return;
}
}
-static void finish_object(struct object *obj, const char *name, void *cb_data)
+static int finish_object(struct object *obj, const char *name, void *cb_data)
{
struct rev_list_info *info = cb_data;
- if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid))
+ if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid)) {
finish_object__ma(obj);
+ return 1;
+ }
if (info->revs->verify_objects && !obj->parsed && obj->type != OBJ_COMMIT)
parse_object(&obj->oid);
+ return 0;
}
static void show_object(struct object *obj, const char *name, void *cb_data)
{
struct rev_list_info *info = cb_data;
- finish_object(obj, name, cb_data);
+ if (finish_object(obj, name, cb_data))
+ return;
display_progress(progress, ++progress_counter);
if (info->flags & REV_LIST_QUIET)
return;
@@ -315,11 +334,19 @@ static inline int parse_missing_action_value(const char *value)
if (!strcmp(value, "allow-any")) {
arg_missing_action = MA_ALLOW_ANY;
+ fetch_if_missing = 0;
return 1;
}
if (!strcmp(value, "print")) {
arg_missing_action = MA_PRINT;
+ fetch_if_missing = 0;
+ return 1;
+ }
+
+ if (!strcmp(value, "allow-promisor")) {
+ arg_missing_action = MA_ALLOW_PROMISOR;
+ fetch_if_missing = 0;
return 1;
}
@@ -344,6 +371,35 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
init_revisions(&revs, prefix);
revs.abbrev = DEFAULT_ABBREV;
revs.commit_format = CMIT_FMT_UNSPECIFIED;
+
+ /*
+ * Scan the argument list before invoking setup_revisions(), so that we
+ * know if fetch_if_missing needs to be set to 0.
+ *
+ * "--exclude-promisor-objects" acts as a pre-filter on missing objects
+ * by not crossing the boundary from realized objects to promisor
+ * objects.
+ *
+ * Let "--missing" to conditionally set fetch_if_missing.
+ */
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (!strcmp(arg, "--exclude-promisor-objects")) {
+ fetch_if_missing = 0;
+ revs.exclude_promisor_objects = 1;
+ break;
+ }
+ }
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (skip_prefix(arg, "--missing=", &arg)) {
+ if (revs.exclude_promisor_objects)
+ die(_("cannot combine --exclude-promisor-objects and --missing"));
+ if (parse_missing_action_value(arg))
+ break;
+ }
+ }
+
argc = setup_revisions(argc, argv, &revs, NULL);
memset(&info, 0, sizeof(info));
@@ -404,7 +460,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
continue;
}
if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) {
- list_objects_filter_release(&filter_options);
+ list_objects_filter_set_no_filter(&filter_options);
continue;
}
if (!strcmp(arg, "--filter-print-omitted")) {
@@ -412,9 +468,10 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
continue;
}
- if (skip_prefix(arg, "--missing=", &arg) &&
- parse_missing_action_value(arg))
- continue;
+ if (!strcmp(arg, "--exclude-promisor-objects"))
+ continue; /* already handled above */
+ if (skip_prefix(arg, "--missing=", &arg))
+ continue; /* already handled above */
usage(rev_list_usage);
diff --git a/builtin/rev-parse.c b/builtin/rev-parse.c
index 171c7a2..a1e680b 100644
--- a/builtin/rev-parse.c
+++ b/builtin/rev-parse.c
@@ -516,7 +516,7 @@ static int cmd_parseopt(int argc, const char **argv, const char *prefix)
PARSE_OPT_SHELL_EVAL);
strbuf_addstr(&parsed, " --");
- sq_quote_argv(&parsed, argv, 0);
+ sq_quote_argv(&parsed, argv);
puts(parsed.buf);
return 0;
}
@@ -526,7 +526,7 @@ static int cmd_sq_quote(int argc, const char **argv)
struct strbuf buf = STRBUF_INIT;
if (argc)
- sq_quote_argv(&buf, argv, 0);
+ sq_quote_argv(&buf, argv);
printf("%s\n", buf.buf);
strbuf_release(&buf);
diff --git a/builtin/revert.c b/builtin/revert.c
index b9d927e..76f0a35 100644
--- a/builtin/revert.c
+++ b/builtin/revert.c
@@ -208,7 +208,7 @@ int cmd_revert(int argc, const char **argv, const char *prefix)
if (isatty(0))
opts.edit = 1;
opts.action = REPLAY_REVERT;
- git_config(git_default_config, NULL);
+ sequencer_init_config(&opts);
res = run_sequencer(argc, argv, &opts);
if (res < 0)
die(_("revert failed"));
@@ -221,7 +221,7 @@ int cmd_cherry_pick(int argc, const char **argv, const char *prefix)
int res;
opts.action = REPLAY_PICK;
- git_config(git_default_config, NULL);
+ sequencer_init_config(&opts);
res = run_sequencer(argc, argv, &opts);
if (res < 0)
die(_("cherry-pick failed"));
diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c
index a5c4a8a..b1daca9 100644
--- a/builtin/submodule--helper.c
+++ b/builtin/submodule--helper.c
@@ -20,6 +20,7 @@
#define OPT_QUIET (1 << 0)
#define OPT_CACHED (1 << 1)
#define OPT_RECURSIVE (1 << 2)
+#define OPT_FORCE (1 << 3)
typedef void (*each_submodule_fn)(const struct cache_entry *list_item,
void *cb_data);
@@ -50,6 +51,20 @@ static char *get_default_remote(void)
return ret;
}
+static int print_default_remote(int argc, const char **argv, const char *prefix)
+{
+ const char *remote;
+
+ if (argc != 1)
+ die(_("submodule--helper print-default-remote takes no arguments"));
+
+ remote = get_default_remote();
+ if (remote)
+ printf("%s\n", remote);
+
+ return 0;
+}
+
static int starts_with_dot_slash(const char *str)
{
return str[0] == '.' && is_dir_sep(str[1]);
@@ -358,6 +373,25 @@ static void module_list_active(struct module_list *list)
*list = active_modules;
}
+static char *get_up_path(const char *path)
+{
+ int i;
+ struct strbuf sb = STRBUF_INIT;
+
+ for (i = count_slashes(path); i; i--)
+ strbuf_addstr(&sb, "../");
+
+ /*
+ * Check if 'path' ends with slash or not
+ * for having the same output for dir/sub_dir
+ * and dir/sub_dir/
+ */
+ if (!is_dir_sep(path[strlen(path) - 1]))
+ strbuf_addstr(&sb, "../");
+
+ return strbuf_detach(&sb, NULL);
+}
+
static int module_list(int argc, const char **argv, const char *prefix)
{
int i;
@@ -718,6 +752,309 @@ static int module_name(int argc, const char **argv, const char *prefix)
return 0;
}
+struct sync_cb {
+ const char *prefix;
+ unsigned int flags;
+};
+
+#define SYNC_CB_INIT { NULL, 0 }
+
+static void sync_submodule(const char *path, const char *prefix,
+ unsigned int flags)
+{
+ const struct submodule *sub;
+ char *remote_key = NULL;
+ char *sub_origin_url, *super_config_url, *displaypath;
+ struct strbuf sb = STRBUF_INIT;
+ struct child_process cp = CHILD_PROCESS_INIT;
+ char *sub_config_path = NULL;
+
+ if (!is_submodule_active(the_repository, path))
+ return;
+
+ sub = submodule_from_path(&null_oid, path);
+
+ if (sub && sub->url) {
+ if (starts_with_dot_dot_slash(sub->url) ||
+ starts_with_dot_slash(sub->url)) {
+ char *remote_url, *up_path;
+ char *remote = get_default_remote();
+ strbuf_addf(&sb, "remote.%s.url", remote);
+
+ if (git_config_get_string(sb.buf, &remote_url))
+ remote_url = xgetcwd();
+
+ up_path = get_up_path(path);
+ sub_origin_url = relative_url(remote_url, sub->url, up_path);
+ super_config_url = relative_url(remote_url, sub->url, NULL);
+
+ free(remote);
+ free(up_path);
+ free(remote_url);
+ } else {
+ sub_origin_url = xstrdup(sub->url);
+ super_config_url = xstrdup(sub->url);
+ }
+ } else {
+ sub_origin_url = xstrdup("");
+ super_config_url = xstrdup("");
+ }
+
+ displaypath = get_submodule_displaypath(path, prefix);
+
+ if (!(flags & OPT_QUIET))
+ printf(_("Synchronizing submodule url for '%s'\n"),
+ displaypath);
+
+ strbuf_reset(&sb);
+ strbuf_addf(&sb, "submodule.%s.url", sub->name);
+ if (git_config_set_gently(sb.buf, super_config_url))
+ die(_("failed to register url for submodule path '%s'"),
+ displaypath);
+
+ if (!is_submodule_populated_gently(path, NULL))
+ goto cleanup;
+
+ prepare_submodule_repo_env(&cp.env_array);
+ cp.git_cmd = 1;
+ cp.dir = path;
+ argv_array_pushl(&cp.args, "submodule--helper",
+ "print-default-remote", NULL);
+
+ strbuf_reset(&sb);
+ if (capture_command(&cp, &sb, 0))
+ die(_("failed to get the default remote for submodule '%s'"),
+ path);
+
+ strbuf_strip_suffix(&sb, "\n");
+ remote_key = xstrfmt("remote.%s.url", sb.buf);
+
+ strbuf_reset(&sb);
+ submodule_to_gitdir(&sb, path);
+ strbuf_addstr(&sb, "/config");
+
+ if (git_config_set_in_file_gently(sb.buf, remote_key, sub_origin_url))
+ die(_("failed to update remote for submodule '%s'"),
+ path);
+
+ if (flags & OPT_RECURSIVE) {
+ struct child_process cpr = CHILD_PROCESS_INIT;
+
+ cpr.git_cmd = 1;
+ cpr.dir = path;
+ prepare_submodule_repo_env(&cpr.env_array);
+
+ argv_array_push(&cpr.args, "--super-prefix");
+ argv_array_pushf(&cpr.args, "%s/", displaypath);
+ argv_array_pushl(&cpr.args, "submodule--helper", "sync",
+ "--recursive", NULL);
+
+ if (flags & OPT_QUIET)
+ argv_array_push(&cpr.args, "--quiet");
+
+ if (run_command(&cpr))
+ die(_("failed to recurse into submodule '%s'"),
+ path);
+ }
+
+cleanup:
+ free(super_config_url);
+ free(sub_origin_url);
+ strbuf_release(&sb);
+ free(remote_key);
+ free(displaypath);
+ free(sub_config_path);
+}
+
+static void sync_submodule_cb(const struct cache_entry *list_item, void *cb_data)
+{
+ struct sync_cb *info = cb_data;
+ sync_submodule(list_item->name, info->prefix, info->flags);
+
+}
+
+static int module_sync(int argc, const char **argv, const char *prefix)
+{
+ struct sync_cb info = SYNC_CB_INIT;
+ struct pathspec pathspec;
+ struct module_list list = MODULE_LIST_INIT;
+ int quiet = 0;
+ int recursive = 0;
+
+ struct option module_sync_options[] = {
+ OPT__QUIET(&quiet, N_("Suppress output of synchronizing submodule url")),
+ OPT_BOOL(0, "recursive", &recursive,
+ N_("Recurse into nested submodules")),
+ OPT_END()
+ };
+
+ const char *const git_submodule_helper_usage[] = {
+ N_("git submodule--helper sync [--quiet] [--recursive] [<path>]"),
+ NULL
+ };
+
+ argc = parse_options(argc, argv, prefix, module_sync_options,
+ git_submodule_helper_usage, 0);
+
+ if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
+ return 1;
+
+ info.prefix = prefix;
+ if (quiet)
+ info.flags |= OPT_QUIET;
+ if (recursive)
+ info.flags |= OPT_RECURSIVE;
+
+ for_each_listed_submodule(&list, sync_submodule_cb, &info);
+
+ return 0;
+}
+
+struct deinit_cb {
+ const char *prefix;
+ unsigned int flags;
+};
+#define DEINIT_CB_INIT { NULL, 0 }
+
+static void deinit_submodule(const char *path, const char *prefix,
+ unsigned int flags)
+{
+ const struct submodule *sub;
+ char *displaypath = NULL;
+ struct child_process cp_config = CHILD_PROCESS_INIT;
+ struct strbuf sb_config = STRBUF_INIT;
+ char *sub_git_dir = xstrfmt("%s/.git", path);
+
+ sub = submodule_from_path(&null_oid, path);
+
+ if (!sub || !sub->name)
+ goto cleanup;
+
+ displaypath = get_submodule_displaypath(path, prefix);
+
+ /* remove the submodule work tree (unless the user already did it) */
+ if (is_directory(path)) {
+ struct strbuf sb_rm = STRBUF_INIT;
+ const char *format;
+
+ /*
+ * protect submodules containing a .git directory
+ * NEEDSWORK: instead of dying, automatically call
+ * absorbgitdirs and (possibly) warn.
+ */
+ if (is_directory(sub_git_dir))
+ die(_("Submodule work tree '%s' contains a .git "
+ "directory (use 'rm -rf' if you really want "
+ "to remove it including all of its history)"),
+ displaypath);
+
+ if (!(flags & OPT_FORCE)) {
+ struct child_process cp_rm = CHILD_PROCESS_INIT;
+ cp_rm.git_cmd = 1;
+ argv_array_pushl(&cp_rm.args, "rm", "-qn",
+ path, NULL);
+
+ if (run_command(&cp_rm))
+ die(_("Submodule work tree '%s' contains local "
+ "modifications; use '-f' to discard them"),
+ displaypath);
+ }
+
+ strbuf_addstr(&sb_rm, path);
+
+ if (!remove_dir_recursively(&sb_rm, 0))
+ format = _("Cleared directory '%s'\n");
+ else
+ format = _("Could not remove submodule work tree '%s'\n");
+
+ if (!(flags & OPT_QUIET))
+ printf(format, displaypath);
+
+ strbuf_release(&sb_rm);
+ }
+
+ if (mkdir(path, 0777))
+ printf(_("could not create empty submodule directory %s"),
+ displaypath);
+
+ cp_config.git_cmd = 1;
+ argv_array_pushl(&cp_config.args, "config", "--get-regexp", NULL);
+ argv_array_pushf(&cp_config.args, "submodule.%s\\.", sub->name);
+
+ /* remove the .git/config entries (unless the user already did it) */
+ if (!capture_command(&cp_config, &sb_config, 0) && sb_config.len) {
+ char *sub_key = xstrfmt("submodule.%s", sub->name);
+ /*
+ * remove the whole section so we have a clean state when
+ * the user later decides to init this submodule again
+ */
+ git_config_rename_section_in_file(NULL, sub_key, NULL);
+ if (!(flags & OPT_QUIET))
+ printf(_("Submodule '%s' (%s) unregistered for path '%s'\n"),
+ sub->name, sub->url, displaypath);
+ free(sub_key);
+ }
+
+cleanup:
+ free(displaypath);
+ free(sub_git_dir);
+ strbuf_release(&sb_config);
+}
+
+static void deinit_submodule_cb(const struct cache_entry *list_item,
+ void *cb_data)
+{
+ struct deinit_cb *info = cb_data;
+ deinit_submodule(list_item->name, info->prefix, info->flags);
+}
+
+static int module_deinit(int argc, const char **argv, const char *prefix)
+{
+ struct deinit_cb info = DEINIT_CB_INIT;
+ struct pathspec pathspec;
+ struct module_list list = MODULE_LIST_INIT;
+ int quiet = 0;
+ int force = 0;
+ int all = 0;
+
+ struct option module_deinit_options[] = {
+ OPT__QUIET(&quiet, N_("Suppress submodule status output")),
+ OPT__FORCE(&force, N_("Remove submodule working trees even if they contain local changes")),
+ OPT_BOOL(0, "all", &all, N_("Unregister all submodules")),
+ OPT_END()
+ };
+
+ const char *const git_submodule_helper_usage[] = {
+ N_("git submodule deinit [--quiet] [-f | --force] [--all | [--] [<path>...]]"),
+ NULL
+ };
+
+ argc = parse_options(argc, argv, prefix, module_deinit_options,
+ git_submodule_helper_usage, 0);
+
+ if (all && argc) {
+ error("pathspec and --all are incompatible");
+ usage_with_options(git_submodule_helper_usage,
+ module_deinit_options);
+ }
+
+ if (!argc && !all)
+ die(_("Use '--all' if you really want to deinitialize all submodules"));
+
+ if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
+ BUG("module_list_compute should not choke on empty pathspec");
+
+ info.prefix = prefix;
+ if (quiet)
+ info.flags |= OPT_QUIET;
+ if (force)
+ info.flags |= OPT_FORCE;
+
+ for_each_listed_submodule(&list, deinit_submodule_cb, &info);
+
+ return 0;
+}
+
static int clone_submodule(const char *path, const char *gitdir, const char *url,
const char *depth, struct string_list *reference,
int quiet, int progress)
@@ -1498,6 +1835,9 @@ static struct cmd_struct commands[] = {
{"resolve-relative-url-test", resolve_relative_url_test, 0},
{"init", module_init, SUPPORT_SUPER_PREFIX},
{"status", module_status, SUPPORT_SUPER_PREFIX},
+ {"print-default-remote", print_default_remote, 0},
+ {"sync", module_sync, SUPPORT_SUPER_PREFIX},
+ {"deinit", module_deinit, 0},
{"remote-branch", resolve_remote_submodule_branch, 0},
{"push-check", push_check, 0},
{"absorb-git-dirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX},
diff --git a/builtin/tag.c b/builtin/tag.c
index 7e0881f..7bc499b 100644
--- a/builtin/tag.c
+++ b/builtin/tag.c
@@ -187,7 +187,7 @@ static int build_tag_object(struct strbuf *buf, int sign, struct object_id *resu
{
if (sign && do_sign(buf) < 0)
return error(_("unable to sign the tag"));
- if (write_sha1_file(buf->buf, buf->len, tag_type, result->hash) < 0)
+ if (write_object_file(buf->buf, buf->len, tag_type, result) < 0)
return error(_("unable to write tag file"));
return 0;
}
diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c
index e930790..9f96949 100644
--- a/builtin/unpack-objects.c
+++ b/builtin/unpack-objects.c
@@ -21,7 +21,7 @@ static unsigned char buffer[4096];
static unsigned int offset, len;
static off_t consumed_bytes;
static off_t max_input_size;
-static git_SHA_CTX ctx;
+static git_hash_ctx ctx;
static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
/*
@@ -62,7 +62,7 @@ static void *fill(int min)
if (min > sizeof(buffer))
die("cannot fill %d bytes", min);
if (offset) {
- git_SHA1_Update(&ctx, buffer, offset);
+ the_hash_algo->update_fn(&ctx, buffer, offset);
memmove(buffer, buffer + offset, len);
offset = 0;
}
@@ -172,7 +172,8 @@ static void write_cached_object(struct object *obj, struct obj_buffer *obj_buf)
{
struct object_id oid;
- if (write_sha1_file(obj_buf->buffer, obj_buf->size, type_name(obj->type), oid.hash) < 0)
+ if (write_object_file(obj_buf->buffer, obj_buf->size,
+ type_name(obj->type), &oid) < 0)
die("failed to write object %s", oid_to_hex(&obj->oid));
obj->flags |= FLAG_WRITTEN;
}
@@ -237,14 +238,16 @@ static void write_object(unsigned nr, enum object_type type,
void *buf, unsigned long size)
{
if (!strict) {
- if (write_sha1_file(buf, size, type_name(type), obj_list[nr].oid.hash) < 0)
+ if (write_object_file(buf, size, type_name(type),
+ &obj_list[nr].oid) < 0)
die("failed to write object");
added_object(nr, type, buf, size);
free(buf);
obj_list[nr].obj = NULL;
} else if (type == OBJ_BLOB) {
struct blob *blob;
- if (write_sha1_file(buf, size, type_name(type), obj_list[nr].oid.hash) < 0)
+ if (write_object_file(buf, size, type_name(type),
+ &obj_list[nr].oid) < 0)
die("failed to write object");
added_object(nr, type, buf, size);
free(buf);
@@ -258,7 +261,7 @@ static void write_object(unsigned nr, enum object_type type,
} else {
struct object *obj;
int eaten;
- hash_sha1_file(buf, size, type_name(type), obj_list[nr].oid.hash);
+ hash_object_file(buf, size, type_name(type), &obj_list[nr].oid);
added_object(nr, type, buf, size);
obj = parse_object_buffer(&obj_list[nr].oid, type, size, buf,
&eaten);
@@ -345,8 +348,8 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
struct object_id base_oid;
if (type == OBJ_REF_DELTA) {
- hashcpy(base_oid.hash, fill(GIT_SHA1_RAWSZ));
- use(GIT_SHA1_RAWSZ);
+ hashcpy(base_oid.hash, fill(the_hash_algo->rawsz));
+ use(the_hash_algo->rawsz);
delta_data = get_data(delta_size);
if (dry_run || !delta_data) {
free(delta_data);
@@ -564,15 +567,15 @@ int cmd_unpack_objects(int argc, const char **argv, const char *prefix)
/* We don't take any non-flag arguments now.. Maybe some day */
usage(unpack_usage);
}
- git_SHA1_Init(&ctx);
+ the_hash_algo->init_fn(&ctx);
unpack_all();
- git_SHA1_Update(&ctx, buffer, offset);
- git_SHA1_Final(oid.hash, &ctx);
+ the_hash_algo->update_fn(&ctx, buffer, offset);
+ the_hash_algo->final_fn(oid.hash, &ctx);
if (strict)
write_rest();
- if (hashcmp(fill(GIT_SHA1_RAWSZ), oid.hash))
+ if (hashcmp(fill(the_hash_algo->rawsz), oid.hash))
die("final sha1 did not match");
- use(GIT_SHA1_RAWSZ);
+ use(the_hash_algo->rawsz);
/* Write the last part of the buffer to stdout */
while (len) {
diff --git a/builtin/worktree.c b/builtin/worktree.c
index 7cef5b1..9efdc22 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -14,7 +14,7 @@
#include "worktree.h"
static const char * const worktree_usage[] = {
- N_("git worktree add [<options>] <path> [<branch>]"),
+ N_("git worktree add [<options>] <path> [<commit-ish>]"),
N_("git worktree list [<options>]"),
N_("git worktree lock [<options>] <path>"),
N_("git worktree prune [<options>]"),
diff --git a/bulk-checkin.c b/bulk-checkin.c
index bb78849..70b14fd 100644
--- a/bulk-checkin.c
+++ b/bulk-checkin.c
@@ -12,7 +12,7 @@ static struct bulk_checkin_state {
unsigned plugged:1;
char *pack_tmp_name;
- struct sha1file *f;
+ struct hashfile *f;
off_t offset;
struct pack_idx_option pack_idx_opts;
@@ -35,9 +35,9 @@ static void finish_bulk_checkin(struct bulk_checkin_state *state)
unlink(state->pack_tmp_name);
goto clear_exit;
} else if (state->nr_written == 1) {
- sha1close(state->f, oid.hash, CSUM_FSYNC);
+ finalize_hashfile(state->f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
} else {
- int fd = sha1close(state->f, oid.hash, 0);
+ int fd = finalize_hashfile(state->f, oid.hash, 0);
fixup_pack_header_footer(fd, oid.hash, state->pack_tmp_name,
state->nr_written, oid.hash,
state->offset);
@@ -93,7 +93,7 @@ static int already_written(struct bulk_checkin_state *state, unsigned char sha1[
* with a new pack.
*/
static int stream_to_pack(struct bulk_checkin_state *state,
- git_SHA_CTX *ctx, off_t *already_hashed_to,
+ git_hash_ctx *ctx, off_t *already_hashed_to,
int fd, size_t size, enum object_type type,
const char *path, unsigned flags)
{
@@ -127,7 +127,7 @@ static int stream_to_pack(struct bulk_checkin_state *state,
if (rsize < hsize)
hsize = rsize;
if (hsize)
- git_SHA1_Update(ctx, ibuf, hsize);
+ the_hash_algo->update_fn(ctx, ibuf, hsize);
*already_hashed_to = offset;
}
s.next_in = ibuf;
@@ -149,7 +149,7 @@ static int stream_to_pack(struct bulk_checkin_state *state,
return -1;
}
- sha1write(state->f, obuf, written);
+ hashwrite(state->f, obuf, written);
state->offset += written;
}
s.next_out = obuf;
@@ -192,10 +192,10 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
unsigned flags)
{
off_t seekback, already_hashed_to;
- git_SHA_CTX ctx;
+ git_hash_ctx ctx;
unsigned char obuf[16384];
unsigned header_len;
- struct sha1file_checkpoint checkpoint;
+ struct hashfile_checkpoint checkpoint;
struct pack_idx_entry *idx = NULL;
seekback = lseek(fd, 0, SEEK_CUR);
@@ -204,8 +204,8 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
header_len = xsnprintf((char *)obuf, sizeof(obuf), "%s %" PRIuMAX,
type_name(type), (uintmax_t)size) + 1;
- git_SHA1_Init(&ctx);
- git_SHA1_Update(&ctx, obuf, header_len);
+ the_hash_algo->init_fn(&ctx);
+ the_hash_algo->update_fn(&ctx, obuf, header_len);
/* Note: idx is non-NULL when we are writing */
if ((flags & HASH_WRITE_OBJECT) != 0)
@@ -216,7 +216,7 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
while (1) {
prepare_to_stream(state, flags);
if (idx) {
- sha1file_checkpoint(state->f, &checkpoint);
+ hashfile_checkpoint(state->f, &checkpoint);
idx->offset = state->offset;
crc32_begin(state->f);
}
@@ -230,19 +230,19 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
*/
if (!idx)
die("BUG: should not happen");
- sha1file_truncate(state->f, &checkpoint);
+ hashfile_truncate(state->f, &checkpoint);
state->offset = checkpoint.offset;
finish_bulk_checkin(state);
if (lseek(fd, seekback, SEEK_SET) == (off_t) -1)
return error("cannot seek back");
}
- git_SHA1_Final(result_sha1, &ctx);
+ the_hash_algo->final_fn(result_sha1, &ctx);
if (!idx)
return 0;
idx->crc32 = crc32_end(state->f);
if (already_written(state, result_sha1)) {
- sha1file_truncate(state->f, &checkpoint);
+ hashfile_truncate(state->f, &checkpoint);
state->offset = checkpoint.offset;
free(idx);
} else {
diff --git a/cache-tree.c b/cache-tree.c
index e03e72c..c52e430 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -84,9 +84,8 @@ static struct cache_tree_sub *find_subtree(struct cache_tree *it,
down->namelen = pathlen;
if (pos < it->subtree_nr)
- memmove(it->down + pos + 1,
- it->down + pos,
- sizeof(down) * (it->subtree_nr - pos - 1));
+ MOVE_ARRAY(it->down + pos + 1, it->down + pos,
+ it->subtree_nr - pos - 1);
it->down[pos] = down;
return down;
}
@@ -400,16 +399,16 @@ static int update_one(struct cache_tree *it,
}
if (repair) {
- unsigned char sha1[20];
- hash_sha1_file(buffer.buf, buffer.len, tree_type, sha1);
- if (has_sha1_file(sha1))
- hashcpy(it->oid.hash, sha1);
+ struct object_id oid;
+ hash_object_file(buffer.buf, buffer.len, tree_type, &oid);
+ if (has_sha1_file(oid.hash))
+ oidcpy(&it->oid, &oid);
else
to_invalidate = 1;
- } else if (dryrun)
- hash_sha1_file(buffer.buf, buffer.len, tree_type,
- it->oid.hash);
- else if (write_sha1_file(buffer.buf, buffer.len, tree_type, it->oid.hash)) {
+ } else if (dryrun) {
+ hash_object_file(buffer.buf, buffer.len, tree_type, &it->oid);
+ } else if (write_object_file(buffer.buf, buffer.len, tree_type,
+ &it->oid)) {
strbuf_release(&buffer);
return -1;
}
@@ -608,7 +607,7 @@ int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, co
hold_lock_file_for_update(&lock_file, index_path, LOCK_DIE_ON_ERROR);
- entries = read_index_from(index_state, index_path);
+ entries = read_index_from(index_state, index_path, get_git_dir());
if (entries < 0) {
ret = WRITE_TREE_UNREADABLE_INDEX;
goto out;
diff --git a/cache.h b/cache.h
index 5b8d493..e62569f 100644
--- a/cache.h
+++ b/cache.h
@@ -4,7 +4,7 @@
#include "git-compat-util.h"
#include "strbuf.h"
#include "hashmap.h"
-#include "mru.h"
+#include "list.h"
#include "advice.h"
#include "gettext.h"
#include "convert.h"
@@ -16,31 +16,6 @@
#include "sha1-array.h"
#include "repository.h"
-#ifndef platform_SHA_CTX
-/*
- * platform's underlying implementation of SHA-1; could be OpenSSL,
- * blk_SHA, Apple CommonCrypto, etc... Note that including
- * SHA1_HEADER may have already defined platform_SHA_CTX for our
- * own implementations like block-sha1 and ppc-sha1, so we list
- * the default for OpenSSL compatible SHA-1 implementations here.
- */
-#define platform_SHA_CTX SHA_CTX
-#define platform_SHA1_Init SHA1_Init
-#define platform_SHA1_Update SHA1_Update
-#define platform_SHA1_Final SHA1_Final
-#endif
-
-#define git_SHA_CTX platform_SHA_CTX
-#define git_SHA1_Init platform_SHA1_Init
-#define git_SHA1_Update platform_SHA1_Update
-#define git_SHA1_Final platform_SHA1_Final
-
-#ifdef SHA1_MAX_BLOCK_SIZE
-#include "compat/sha1-chunked.h"
-#undef git_SHA1_Update
-#define git_SHA1_Update git_SHA1_Update_Chunked
-#endif
-
#include <zlib.h>
typedef struct git_zstream {
z_stream z;
@@ -345,7 +320,8 @@ struct index_state {
struct split_index *split_index;
struct cache_time timestamp;
unsigned name_hash_initialized : 1,
- initialized : 1;
+ initialized : 1,
+ drop_cache_tree : 1;
struct hashmap name_hash;
struct hashmap dir_hash;
unsigned char sha1[20];
@@ -371,7 +347,7 @@ extern void free_name_hash(struct index_state *istate);
#define active_cache_tree (the_index.cache_tree)
#define read_cache() read_index(&the_index)
-#define read_cache_from(path) read_index_from(&the_index, (path))
+#define read_cache_from(path) read_index_from(&the_index, (path), (get_git_dir()))
#define read_cache_preload(pathspec) read_index_preload(&the_index, (pathspec))
#define is_cache_unborn() is_index_unborn(&the_index)
#define read_cache_unmerged() read_index_unmerged(&the_index)
@@ -616,7 +592,8 @@ extern int read_index(struct index_state *);
extern int read_index_preload(struct index_state *, const struct pathspec *pathspec);
extern int do_read_index(struct index_state *istate, const char *path,
int must_exist); /* for testting only! */
-extern int read_index_from(struct index_state *, const char *path);
+extern int read_index_from(struct index_state *, const char *path,
+ const char *gitdir);
extern int is_index_unborn(struct index_state *);
extern int read_index_unmerged(struct index_state *);
@@ -824,6 +801,7 @@ extern char *git_replace_ref_base;
extern int fsync_object_files;
extern int core_preload_index;
+extern int core_commit_graph;
extern int core_apply_sparse_checkout;
extern int precomposed_unicode;
extern int protect_hfs;
@@ -914,10 +892,13 @@ extern int grafts_replace_parents;
#define GIT_REPO_VERSION 0
#define GIT_REPO_VERSION_READ 1
extern int repository_format_precious_objects;
+extern char *repository_format_partial_clone;
+extern const char *core_partial_clone_filter_default;
struct repository_format {
int version;
int precious_objects;
+ char *partial_clone; /* value of extensions.partialclone */
int is_bare;
int hash_algo;
char *work_tree;
@@ -957,12 +938,10 @@ extern void check_repository_format(void);
#define TYPE_CHANGED 0x0040
/*
- * Return the name of the file in the local object database that would
- * be used to store a loose object with the specified sha1. The
- * return value is a pointer to a statically allocated buffer that is
- * overwritten each time the function is called.
+ * Put in `buf` the name of the file in the local object database that
+ * would be used to store a loose object with the specified sha1.
*/
-extern const char *sha1_file_name(const unsigned char *sha1);
+extern void sha1_file_name(struct strbuf *buf, const unsigned char *sha1);
/*
* Return an abbreviated sha1 unique within this repository's object database.
@@ -1029,7 +1008,7 @@ static inline void hashclr(unsigned char *hash)
static inline void oidclr(struct object_id *oid)
{
- hashclr(oid->hash);
+ memset(oid->hash, 0, GIT_MAX_RAWSZ);
}
@@ -1047,8 +1026,6 @@ extern const struct object_id empty_tree_oid;
"\xe6\x9d\xe2\x9b\xb2\xd1\xd6\x43\x4b\x8b" \
"\x29\xae\x77\x5a\xd8\xc2\xe4\x8c\x53\x91"
extern const struct object_id empty_blob_oid;
-#define EMPTY_BLOB_SHA1_BIN (empty_blob_oid.hash)
-
static inline int is_empty_blob_sha1(const unsigned char *sha1)
{
@@ -1238,11 +1215,22 @@ static inline const unsigned char *lookup_replace_object(const unsigned char *sh
/* Read and unpack a sha1 file into memory, write memory to a sha1 file */
extern int sha1_object_info(const unsigned char *, unsigned long *);
-extern int hash_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1);
-extern int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *return_sha1);
-extern int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type, struct object_id *oid, unsigned flags);
-extern int pretend_sha1_file(void *, unsigned long, enum object_type, unsigned char *);
-extern int force_object_loose(const unsigned char *sha1, time_t mtime);
+
+extern int hash_object_file(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid);
+
+extern int write_object_file(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid);
+
+extern int hash_object_file_literally(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ unsigned flags);
+
+extern int pretend_object_file(void *, unsigned long, enum object_type,
+ struct object_id *oid);
+
+extern int force_object_loose(const struct object_id *oid, time_t mtime);
+
extern int git_open_cloexec(const char *name, int flags);
#define git_open(name) git_open_cloexec(name, O_RDONLY)
extern void *map_sha1_file(const unsigned char *sha1, unsigned long *size);
@@ -1635,6 +1623,7 @@ struct pack_window {
extern struct packed_git {
struct packed_git *next;
+ struct list_head mru;
struct pack_window *windows;
off_t pack_size;
const void *index_data;
@@ -1648,7 +1637,8 @@ extern struct packed_git {
unsigned pack_local:1,
pack_keep:1,
freshened:1,
- do_not_close:1;
+ do_not_close:1,
+ pack_promisor:1;
unsigned char sha1[20];
struct revindex_entry *revindex;
/* something like ".git/objects/pack/xxxxx.pack" */
@@ -1656,10 +1646,9 @@ extern struct packed_git {
} *packed_git;
/*
- * A most-recently-used ordered version of the packed_git list, which can
- * be iterated instead of packed_git (and marked via mru_mark).
+ * A most-recently-used ordered version of the packed_git list.
*/
-extern struct mru packed_git_mru;
+extern struct list_head packed_git_mru;
struct pack_entry {
off_t offset;
@@ -1787,6 +1776,14 @@ struct object_info {
#define OBJECT_INFO_QUICK 8
extern int sha1_object_info_extended(const unsigned char *, struct object_info *, unsigned flags);
+/*
+ * Set this to 0 to prevent sha1_object_info_extended() from fetching missing
+ * blobs. This has a difference only if extensions.partialClone is set.
+ *
+ * Its default value is 1.
+ */
+extern int fetch_if_missing;
+
/* Dumb servers support */
extern int update_server_info(int);
diff --git a/ci/lib-travisci.sh b/ci/lib-travisci.sh
index 07f27c7..1efee55 100755
--- a/ci/lib-travisci.sh
+++ b/ci/lib-travisci.sh
@@ -21,8 +21,6 @@ skip_branch_tip_with_tag () {
fi
}
-good_trees_file="$HOME/travis-cache/good-trees"
-
# Save some info about the current commit's tree, so we can skip the build
# job if we encounter the same tree again and can provide a useful info
# message.
@@ -83,7 +81,10 @@ check_unignored_build_artifacts ()
# and installing dependencies.
set -ex
-mkdir -p "$HOME/travis-cache"
+cache_dir="$HOME/travis-cache"
+good_trees_file="$cache_dir/good-trees"
+
+mkdir -p "$cache_dir"
skip_branch_tip_with_tag
skip_good_tree
diff --git a/ci/run-linux32-build.sh b/ci/run-linux32-build.sh
index c19c50c..2c60d2e 100755
--- a/ci/run-linux32-build.sh
+++ b/ci/run-linux32-build.sh
@@ -3,31 +3,58 @@
# Build and test Git in a 32-bit environment
#
# Usage:
-# run-linux32-build.sh [host-user-id]
+# run-linux32-build.sh <host-user-id>
#
-set -x
+set -ex
+
+if test $# -ne 1 || test -z "$1"
+then
+ echo >&2 "usage: run-linux32-build.sh <host-user-id>"
+ exit 1
+fi
# Update packages to the latest available versions
linux32 --32bit i386 sh -c '
apt update >/dev/null &&
apt install -y build-essential libcurl4-openssl-dev libssl-dev \
libexpat-dev gettext python >/dev/null
-' &&
+'
# If this script runs inside a docker container, then all commands are
# usually executed as root. Consequently, the host user might not be
# able to access the test output files.
-# If a host user id is given, then create a user "ci" with the host user
-# id to make everything accessible to the host user.
-HOST_UID=$1 &&
-CI_USER=$USER &&
-test -z $HOST_UID || (CI_USER="ci" && useradd -u $HOST_UID $CI_USER) &&
+# If a non 0 host user id is given, then create a user "ci" with that
+# user id to make everything accessible to the host user.
+HOST_UID=$1
+if test $HOST_UID -eq 0
+then
+ # Just in case someone does want to run the test suite as root.
+ CI_USER=root
+else
+ CI_USER=ci
+ if test "$(id -u $CI_USER 2>/dev/null)" = $HOST_UID
+ then
+ echo "user '$CI_USER' already exists with the requested ID $HOST_UID"
+ else
+ useradd -u $HOST_UID $CI_USER
+ fi
+
+ # Due to a bug the test suite was run as root in the past, so
+ # a prove state file created back then is only accessible by
+ # root. Now that bug is fixed, the test suite is run as a
+ # regular user, but the prove state file coming from Travis
+ # CI's cache might still be owned by root.
+ # Make sure that this user has rights to any cached files,
+ # including an existing prove state file.
+ test -n "$cache_dir" && chown -R $HOST_UID:$HOST_UID "$cache_dir"
+fi
# Build and test
linux32 --32bit i386 su -m -l $CI_USER -c '
- cd /usr/src/git &&
- ln -s /tmp/travis-cache/.prove t/.prove &&
- make --jobs=2 &&
- make --quiet test
+ set -ex
+ cd /usr/src/git
+ test -n "$cache_dir" && ln -s "$cache_dir/.prove" t/.prove
+ make --jobs=2
+ make --quiet test
'
diff --git a/ci/run-linux32-docker.sh b/ci/run-linux32-docker.sh
index 4f191c5..2163790 100755
--- a/ci/run-linux32-docker.sh
+++ b/ci/run-linux32-docker.sh
@@ -9,7 +9,9 @@ docker pull daald/ubuntu32:xenial
# Use the following command to debug the docker build locally:
# $ docker run -itv "${PWD}:/usr/src/git" --entrypoint /bin/bash daald/ubuntu32:xenial
-# root@container:/# /usr/src/git/ci/run-linux32-build.sh
+# root@container:/# /usr/src/git/ci/run-linux32-build.sh <host-user-id>
+
+container_cache_dir=/tmp/travis-cache
docker run \
--interactive \
@@ -18,8 +20,9 @@ docker run \
--env GIT_PROVE_OPTS \
--env GIT_TEST_OPTS \
--env GIT_TEST_CLONE_2GB \
+ --env cache_dir="$container_cache_dir" \
--volume "${PWD}:/usr/src/git" \
- --volume "${HOME}/travis-cache:/tmp/travis-cache" \
+ --volume "$cache_dir:$container_cache_dir" \
daald/ubuntu32:xenial \
/usr/src/git/ci/run-linux32-build.sh $(id -u $USER)
diff --git a/ci/run-tests.sh b/ci/run-tests.sh
index 22355f0..73e273f 100755
--- a/ci/run-tests.sh
+++ b/ci/run-tests.sh
@@ -5,8 +5,13 @@
. ${0%/*}/lib-travisci.sh
-ln -s $HOME/travis-cache/.prove t/.prove
+ln -s "$cache_dir/.prove" t/.prove
+
make --quiet test
+if test "$jobname" = "linux-gcc"
+then
+ GIT_TEST_SPLIT_INDEX=YesPlease make --quiet test
+fi
check_unignored_build_artifacts
diff --git a/combine-diff.c b/combine-diff.c
index 14db489..1ec9af1 100644
--- a/combine-diff.c
+++ b/combine-diff.c
@@ -1053,7 +1053,7 @@ static void show_patch_diff(struct combine_diff_path *elem, int num_parent,
if (is_file) {
struct strbuf buf = STRBUF_INIT;
- if (convert_to_git(&the_index, elem->path, result, len, &buf, safe_crlf)) {
+ if (convert_to_git(&the_index, elem->path, result, len, &buf, global_conv_flags_eol)) {
free(result);
result = strbuf_detach(&buf, &len);
result_size = len;
diff --git a/command-list.txt b/command-list.txt
index a1fad28..835c589 100644
--- a/command-list.txt
+++ b/command-list.txt
@@ -34,6 +34,7 @@ git-clean mainporcelain
git-clone mainporcelain init
git-column purehelpers
git-commit mainporcelain history
+git-commit-graph plumbingmanipulators
git-commit-tree plumbingmanipulators
git-config ancillarymanipulators
git-count-objects ancillaryinterrogators
diff --git a/commit-graph.c b/commit-graph.c
new file mode 100644
index 0000000..3ff8c84
--- /dev/null
+++ b/commit-graph.c
@@ -0,0 +1,738 @@
+#include "cache.h"
+#include "config.h"
+#include "git-compat-util.h"
+#include "lockfile.h"
+#include "pack.h"
+#include "packfile.h"
+#include "commit.h"
+#include "object.h"
+#include "revision.h"
+#include "sha1-lookup.h"
+#include "commit-graph.h"
+
+#define GRAPH_SIGNATURE 0x43475048 /* "CGPH" */
+#define GRAPH_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */
+#define GRAPH_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */
+#define GRAPH_CHUNKID_DATA 0x43444154 /* "CDAT" */
+#define GRAPH_CHUNKID_LARGEEDGES 0x45444745 /* "EDGE" */
+
+#define GRAPH_DATA_WIDTH 36
+
+#define GRAPH_VERSION_1 0x1
+#define GRAPH_VERSION GRAPH_VERSION_1
+
+#define GRAPH_OID_VERSION_SHA1 1
+#define GRAPH_OID_LEN_SHA1 GIT_SHA1_RAWSZ
+#define GRAPH_OID_VERSION GRAPH_OID_VERSION_SHA1
+#define GRAPH_OID_LEN GRAPH_OID_LEN_SHA1
+
+#define GRAPH_OCTOPUS_EDGES_NEEDED 0x80000000
+#define GRAPH_PARENT_MISSING 0x7fffffff
+#define GRAPH_EDGE_LAST_MASK 0x7fffffff
+#define GRAPH_PARENT_NONE 0x70000000
+
+#define GRAPH_LAST_EDGE 0x80000000
+
+#define GRAPH_FANOUT_SIZE (4 * 256)
+#define GRAPH_CHUNKLOOKUP_WIDTH 12
+#define GRAPH_MIN_SIZE (5 * GRAPH_CHUNKLOOKUP_WIDTH + GRAPH_FANOUT_SIZE + \
+ GRAPH_OID_LEN + 8)
+
+char *get_commit_graph_filename(const char *obj_dir)
+{
+ return xstrfmt("%s/info/commit-graph", obj_dir);
+}
+
+static struct commit_graph *alloc_commit_graph(void)
+{
+ struct commit_graph *g = xcalloc(1, sizeof(*g));
+ g->graph_fd = -1;
+
+ return g;
+}
+
+struct commit_graph *load_commit_graph_one(const char *graph_file)
+{
+ void *graph_map;
+ const unsigned char *data, *chunk_lookup;
+ size_t graph_size;
+ struct stat st;
+ uint32_t i;
+ struct commit_graph *graph;
+ int fd = git_open(graph_file);
+ uint64_t last_chunk_offset;
+ uint32_t last_chunk_id;
+ uint32_t graph_signature;
+ unsigned char graph_version, hash_version;
+
+ if (fd < 0)
+ return NULL;
+ if (fstat(fd, &st)) {
+ close(fd);
+ return NULL;
+ }
+ graph_size = xsize_t(st.st_size);
+
+ if (graph_size < GRAPH_MIN_SIZE) {
+ close(fd);
+ die("graph file %s is too small", graph_file);
+ }
+ graph_map = xmmap(NULL, graph_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ data = (const unsigned char *)graph_map;
+
+ graph_signature = get_be32(data);
+ if (graph_signature != GRAPH_SIGNATURE) {
+ error("graph signature %X does not match signature %X",
+ graph_signature, GRAPH_SIGNATURE);
+ goto cleanup_fail;
+ }
+
+ graph_version = *(unsigned char*)(data + 4);
+ if (graph_version != GRAPH_VERSION) {
+ error("graph version %X does not match version %X",
+ graph_version, GRAPH_VERSION);
+ goto cleanup_fail;
+ }
+
+ hash_version = *(unsigned char*)(data + 5);
+ if (hash_version != GRAPH_OID_VERSION) {
+ error("hash version %X does not match version %X",
+ hash_version, GRAPH_OID_VERSION);
+ goto cleanup_fail;
+ }
+
+ graph = alloc_commit_graph();
+
+ graph->hash_len = GRAPH_OID_LEN;
+ graph->num_chunks = *(unsigned char*)(data + 6);
+ graph->graph_fd = fd;
+ graph->data = graph_map;
+ graph->data_len = graph_size;
+
+ last_chunk_id = 0;
+ last_chunk_offset = 8;
+ chunk_lookup = data + 8;
+ for (i = 0; i < graph->num_chunks; i++) {
+ uint32_t chunk_id = get_be32(chunk_lookup + 0);
+ uint64_t chunk_offset = get_be64(chunk_lookup + 4);
+ int chunk_repeated = 0;
+
+ chunk_lookup += GRAPH_CHUNKLOOKUP_WIDTH;
+
+ if (chunk_offset > graph_size - GIT_MAX_RAWSZ) {
+ error("improper chunk offset %08x%08x", (uint32_t)(chunk_offset >> 32),
+ (uint32_t)chunk_offset);
+ goto cleanup_fail;
+ }
+
+ switch (chunk_id) {
+ case GRAPH_CHUNKID_OIDFANOUT:
+ if (graph->chunk_oid_fanout)
+ chunk_repeated = 1;
+ else
+ graph->chunk_oid_fanout = (uint32_t*)(data + chunk_offset);
+ break;
+
+ case GRAPH_CHUNKID_OIDLOOKUP:
+ if (graph->chunk_oid_lookup)
+ chunk_repeated = 1;
+ else
+ graph->chunk_oid_lookup = data + chunk_offset;
+ break;
+
+ case GRAPH_CHUNKID_DATA:
+ if (graph->chunk_commit_data)
+ chunk_repeated = 1;
+ else
+ graph->chunk_commit_data = data + chunk_offset;
+ break;
+
+ case GRAPH_CHUNKID_LARGEEDGES:
+ if (graph->chunk_large_edges)
+ chunk_repeated = 1;
+ else
+ graph->chunk_large_edges = data + chunk_offset;
+ break;
+ }
+
+ if (chunk_repeated) {
+ error("chunk id %08x appears multiple times", chunk_id);
+ goto cleanup_fail;
+ }
+
+ if (last_chunk_id == GRAPH_CHUNKID_OIDLOOKUP)
+ {
+ graph->num_commits = (chunk_offset - last_chunk_offset)
+ / graph->hash_len;
+ }
+
+ last_chunk_id = chunk_id;
+ last_chunk_offset = chunk_offset;
+ }
+
+ return graph;
+
+cleanup_fail:
+ munmap(graph_map, graph_size);
+ close(fd);
+ exit(1);
+}
+
+/* global storage */
+static struct commit_graph *commit_graph = NULL;
+
+static void prepare_commit_graph_one(const char *obj_dir)
+{
+ char *graph_name;
+
+ if (commit_graph)
+ return;
+
+ graph_name = get_commit_graph_filename(obj_dir);
+ commit_graph = load_commit_graph_one(graph_name);
+
+ FREE_AND_NULL(graph_name);
+}
+
+static int prepare_commit_graph_run_once = 0;
+static void prepare_commit_graph(void)
+{
+ struct alternate_object_database *alt;
+ char *obj_dir;
+
+ if (prepare_commit_graph_run_once)
+ return;
+ prepare_commit_graph_run_once = 1;
+
+ obj_dir = get_object_directory();
+ prepare_commit_graph_one(obj_dir);
+ prepare_alt_odb();
+ for (alt = alt_odb_list; !commit_graph && alt; alt = alt->next)
+ prepare_commit_graph_one(alt->path);
+}
+
+static void close_commit_graph(void)
+{
+ if (!commit_graph)
+ return;
+
+ if (commit_graph->graph_fd >= 0) {
+ munmap((void *)commit_graph->data, commit_graph->data_len);
+ commit_graph->data = NULL;
+ close(commit_graph->graph_fd);
+ }
+
+ FREE_AND_NULL(commit_graph);
+}
+
+static int bsearch_graph(struct commit_graph *g, struct object_id *oid, uint32_t *pos)
+{
+ return bsearch_hash(oid->hash, g->chunk_oid_fanout,
+ g->chunk_oid_lookup, g->hash_len, pos);
+}
+
+static struct commit_list **insert_parent_or_die(struct commit_graph *g,
+ uint64_t pos,
+ struct commit_list **pptr)
+{
+ struct commit *c;
+ struct object_id oid;
+ hashcpy(oid.hash, g->chunk_oid_lookup + g->hash_len * pos);
+ c = lookup_commit(&oid);
+ if (!c)
+ die("could not find commit %s", oid_to_hex(&oid));
+ c->graph_pos = pos;
+ return &commit_list_insert(c, pptr)->next;
+}
+
+static int fill_commit_in_graph(struct commit *item, struct commit_graph *g, uint32_t pos)
+{
+ struct object_id oid;
+ uint32_t edge_value;
+ uint32_t *parent_data_ptr;
+ uint64_t date_low, date_high;
+ struct commit_list **pptr;
+ const unsigned char *commit_data = g->chunk_commit_data + (g->hash_len + 16) * pos;
+
+ item->object.parsed = 1;
+ item->graph_pos = pos;
+
+ hashcpy(oid.hash, commit_data);
+ item->tree = lookup_tree(&oid);
+
+ date_high = get_be32(commit_data + g->hash_len + 8) & 0x3;
+ date_low = get_be32(commit_data + g->hash_len + 12);
+ item->date = (timestamp_t)((date_high << 32) | date_low);
+
+ pptr = &item->parents;
+
+ edge_value = get_be32(commit_data + g->hash_len);
+ if (edge_value == GRAPH_PARENT_NONE)
+ return 1;
+ pptr = insert_parent_or_die(g, edge_value, pptr);
+
+ edge_value = get_be32(commit_data + g->hash_len + 4);
+ if (edge_value == GRAPH_PARENT_NONE)
+ return 1;
+ if (!(edge_value & GRAPH_OCTOPUS_EDGES_NEEDED)) {
+ pptr = insert_parent_or_die(g, edge_value, pptr);
+ return 1;
+ }
+
+ parent_data_ptr = (uint32_t*)(g->chunk_large_edges +
+ 4 * (uint64_t)(edge_value & GRAPH_EDGE_LAST_MASK));
+ do {
+ edge_value = get_be32(parent_data_ptr);
+ pptr = insert_parent_or_die(g,
+ edge_value & GRAPH_EDGE_LAST_MASK,
+ pptr);
+ parent_data_ptr++;
+ } while (!(edge_value & GRAPH_LAST_EDGE));
+
+ return 1;
+}
+
+int parse_commit_in_graph(struct commit *item)
+{
+ if (!core_commit_graph)
+ return 0;
+ if (item->object.parsed)
+ return 1;
+
+ prepare_commit_graph();
+ if (commit_graph) {
+ uint32_t pos;
+ int found;
+ if (item->graph_pos != COMMIT_NOT_FROM_GRAPH) {
+ pos = item->graph_pos;
+ found = 1;
+ } else {
+ found = bsearch_graph(commit_graph, &(item->object.oid), &pos);
+ }
+
+ if (found)
+ return fill_commit_in_graph(item, commit_graph, pos);
+ }
+
+ return 0;
+}
+
+static void write_graph_chunk_fanout(struct hashfile *f,
+ struct commit **commits,
+ int nr_commits)
+{
+ int i, count = 0;
+ struct commit **list = commits;
+
+ /*
+ * Write the first-level table (the list is sorted,
+ * but we use a 256-entry lookup to be able to avoid
+ * having to do eight extra binary search iterations).
+ */
+ for (i = 0; i < 256; i++) {
+ while (count < nr_commits) {
+ if ((*list)->object.oid.hash[0] != i)
+ break;
+ count++;
+ list++;
+ }
+
+ hashwrite_be32(f, count);
+ }
+}
+
+static void write_graph_chunk_oids(struct hashfile *f, int hash_len,
+ struct commit **commits, int nr_commits)
+{
+ struct commit **list = commits;
+ int count;
+ for (count = 0; count < nr_commits; count++, list++)
+ hashwrite(f, (*list)->object.oid.hash, (int)hash_len);
+}
+
+static const unsigned char *commit_to_sha1(size_t index, void *table)
+{
+ struct commit **commits = table;
+ return commits[index]->object.oid.hash;
+}
+
+static void write_graph_chunk_data(struct hashfile *f, int hash_len,
+ struct commit **commits, int nr_commits)
+{
+ struct commit **list = commits;
+ struct commit **last = commits + nr_commits;
+ uint32_t num_extra_edges = 0;
+
+ while (list < last) {
+ struct commit_list *parent;
+ int edge_value;
+ uint32_t packedDate[2];
+
+ parse_commit(*list);
+ hashwrite(f, (*list)->tree->object.oid.hash, hash_len);
+
+ parent = (*list)->parents;
+
+ if (!parent)
+ edge_value = GRAPH_PARENT_NONE;
+ else {
+ edge_value = sha1_pos(parent->item->object.oid.hash,
+ commits,
+ nr_commits,
+ commit_to_sha1);
+
+ if (edge_value < 0)
+ edge_value = GRAPH_PARENT_MISSING;
+ }
+
+ hashwrite_be32(f, edge_value);
+
+ if (parent)
+ parent = parent->next;
+
+ if (!parent)
+ edge_value = GRAPH_PARENT_NONE;
+ else if (parent->next)
+ edge_value = GRAPH_OCTOPUS_EDGES_NEEDED | num_extra_edges;
+ else {
+ edge_value = sha1_pos(parent->item->object.oid.hash,
+ commits,
+ nr_commits,
+ commit_to_sha1);
+ if (edge_value < 0)
+ edge_value = GRAPH_PARENT_MISSING;
+ }
+
+ hashwrite_be32(f, edge_value);
+
+ if (edge_value & GRAPH_OCTOPUS_EDGES_NEEDED) {
+ do {
+ num_extra_edges++;
+ parent = parent->next;
+ } while (parent);
+ }
+
+ if (sizeof((*list)->date) > 4)
+ packedDate[0] = htonl(((*list)->date >> 32) & 0x3);
+ else
+ packedDate[0] = 0;
+
+ packedDate[1] = htonl((*list)->date);
+ hashwrite(f, packedDate, 8);
+
+ list++;
+ }
+}
+
+static void write_graph_chunk_large_edges(struct hashfile *f,
+ struct commit **commits,
+ int nr_commits)
+{
+ struct commit **list = commits;
+ struct commit **last = commits + nr_commits;
+ struct commit_list *parent;
+
+ while (list < last) {
+ int num_parents = 0;
+ for (parent = (*list)->parents; num_parents < 3 && parent;
+ parent = parent->next)
+ num_parents++;
+
+ if (num_parents <= 2) {
+ list++;
+ continue;
+ }
+
+ /* Since num_parents > 2, this initializer is safe. */
+ for (parent = (*list)->parents->next; parent; parent = parent->next) {
+ int edge_value = sha1_pos(parent->item->object.oid.hash,
+ commits,
+ nr_commits,
+ commit_to_sha1);
+
+ if (edge_value < 0)
+ edge_value = GRAPH_PARENT_MISSING;
+ else if (!parent->next)
+ edge_value |= GRAPH_LAST_EDGE;
+
+ hashwrite_be32(f, edge_value);
+ }
+
+ list++;
+ }
+}
+
+static int commit_compare(const void *_a, const void *_b)
+{
+ const struct object_id *a = (const struct object_id *)_a;
+ const struct object_id *b = (const struct object_id *)_b;
+ return oidcmp(a, b);
+}
+
+struct packed_commit_list {
+ struct commit **list;
+ int nr;
+ int alloc;
+};
+
+struct packed_oid_list {
+ struct object_id *list;
+ int nr;
+ int alloc;
+};
+
+static int add_packed_commits(const struct object_id *oid,
+ struct packed_git *pack,
+ uint32_t pos,
+ void *data)
+{
+ struct packed_oid_list *list = (struct packed_oid_list*)data;
+ enum object_type type;
+ off_t offset = nth_packed_object_offset(pack, pos);
+ struct object_info oi = OBJECT_INFO_INIT;
+
+ oi.typep = &type;
+ if (packed_object_info(pack, offset, &oi) < 0)
+ die("unable to get type of object %s", oid_to_hex(oid));
+
+ if (type != OBJ_COMMIT)
+ return 0;
+
+ ALLOC_GROW(list->list, list->nr + 1, list->alloc);
+ oidcpy(&(list->list[list->nr]), oid);
+ list->nr++;
+
+ return 0;
+}
+
+static void add_missing_parents(struct packed_oid_list *oids, struct commit *commit)
+{
+ struct commit_list *parent;
+ for (parent = commit->parents; parent; parent = parent->next) {
+ if (!(parent->item->object.flags & UNINTERESTING)) {
+ ALLOC_GROW(oids->list, oids->nr + 1, oids->alloc);
+ oidcpy(&oids->list[oids->nr], &(parent->item->object.oid));
+ oids->nr++;
+ parent->item->object.flags |= UNINTERESTING;
+ }
+ }
+}
+
+static void close_reachable(struct packed_oid_list *oids)
+{
+ int i;
+ struct commit *commit;
+
+ for (i = 0; i < oids->nr; i++) {
+ commit = lookup_commit(&oids->list[i]);
+ if (commit)
+ commit->object.flags |= UNINTERESTING;
+ }
+
+ /*
+ * As this loop runs, oids->nr may grow, but not more
+ * than the number of missing commits in the reachable
+ * closure.
+ */
+ for (i = 0; i < oids->nr; i++) {
+ commit = lookup_commit(&oids->list[i]);
+
+ if (commit && !parse_commit(commit))
+ add_missing_parents(oids, commit);
+ }
+
+ for (i = 0; i < oids->nr; i++) {
+ commit = lookup_commit(&oids->list[i]);
+
+ if (commit)
+ commit->object.flags &= ~UNINTERESTING;
+ }
+}
+
+void write_commit_graph(const char *obj_dir,
+ const char **pack_indexes,
+ int nr_packs,
+ const char **commit_hex,
+ int nr_commits,
+ int append)
+{
+ struct packed_oid_list oids;
+ struct packed_commit_list commits;
+ struct hashfile *f;
+ uint32_t i, count_distinct = 0;
+ char *graph_name;
+ int fd;
+ struct lock_file lk = LOCK_INIT;
+ uint32_t chunk_ids[5];
+ uint64_t chunk_offsets[5];
+ int num_chunks;
+ int num_extra_edges;
+ struct commit_list *parent;
+
+ oids.nr = 0;
+ oids.alloc = approximate_object_count() / 4;
+
+ if (append) {
+ prepare_commit_graph_one(obj_dir);
+ if (commit_graph)
+ oids.alloc += commit_graph->num_commits;
+ }
+
+ if (oids.alloc < 1024)
+ oids.alloc = 1024;
+ ALLOC_ARRAY(oids.list, oids.alloc);
+
+ if (append && commit_graph) {
+ for (i = 0; i < commit_graph->num_commits; i++) {
+ const unsigned char *hash = commit_graph->chunk_oid_lookup +
+ commit_graph->hash_len * i;
+ hashcpy(oids.list[oids.nr++].hash, hash);
+ }
+ }
+
+ if (pack_indexes) {
+ struct strbuf packname = STRBUF_INIT;
+ int dirlen;
+ strbuf_addf(&packname, "%s/pack/", obj_dir);
+ dirlen = packname.len;
+ for (i = 0; i < nr_packs; i++) {
+ struct packed_git *p;
+ strbuf_setlen(&packname, dirlen);
+ strbuf_addstr(&packname, pack_indexes[i]);
+ p = add_packed_git(packname.buf, packname.len, 1);
+ if (!p)
+ die("error adding pack %s", packname.buf);
+ if (open_pack_index(p))
+ die("error opening index for %s", packname.buf);
+ for_each_object_in_pack(p, add_packed_commits, &oids);
+ close_pack(p);
+ }
+ strbuf_release(&packname);
+ }
+
+ if (commit_hex) {
+ for (i = 0; i < nr_commits; i++) {
+ const char *end;
+ struct object_id oid;
+ struct commit *result;
+
+ if (commit_hex[i] && parse_oid_hex(commit_hex[i], &oid, &end))
+ continue;
+
+ result = lookup_commit_reference_gently(&oid, 1);
+
+ if (result) {
+ ALLOC_GROW(oids.list, oids.nr + 1, oids.alloc);
+ oidcpy(&oids.list[oids.nr], &(result->object.oid));
+ oids.nr++;
+ }
+ }
+ }
+
+ if (!pack_indexes && !commit_hex)
+ for_each_packed_object(add_packed_commits, &oids, 0);
+
+ close_reachable(&oids);
+
+ QSORT(oids.list, oids.nr, commit_compare);
+
+ count_distinct = 1;
+ for (i = 1; i < oids.nr; i++) {
+ if (oidcmp(&oids.list[i-1], &oids.list[i]))
+ count_distinct++;
+ }
+
+ if (count_distinct >= GRAPH_PARENT_MISSING)
+ die(_("the commit graph format cannot write %d commits"), count_distinct);
+
+ commits.nr = 0;
+ commits.alloc = count_distinct;
+ ALLOC_ARRAY(commits.list, commits.alloc);
+
+ num_extra_edges = 0;
+ for (i = 0; i < oids.nr; i++) {
+ int num_parents = 0;
+ if (i > 0 && !oidcmp(&oids.list[i-1], &oids.list[i]))
+ continue;
+
+ commits.list[commits.nr] = lookup_commit(&oids.list[i]);
+ parse_commit(commits.list[commits.nr]);
+
+ for (parent = commits.list[commits.nr]->parents;
+ parent; parent = parent->next)
+ num_parents++;
+
+ if (num_parents > 2)
+ num_extra_edges += num_parents - 1;
+
+ commits.nr++;
+ }
+ num_chunks = num_extra_edges ? 4 : 3;
+
+ if (commits.nr >= GRAPH_PARENT_MISSING)
+ die(_("too many commits to write graph"));
+
+ graph_name = get_commit_graph_filename(obj_dir);
+ fd = hold_lock_file_for_update(&lk, graph_name, 0);
+
+ if (fd < 0) {
+ struct strbuf folder = STRBUF_INIT;
+ strbuf_addstr(&folder, graph_name);
+ strbuf_setlen(&folder, strrchr(folder.buf, '/') - folder.buf);
+
+ if (mkdir(folder.buf, 0777) < 0)
+ die_errno(_("cannot mkdir %s"), folder.buf);
+ strbuf_release(&folder);
+
+ fd = hold_lock_file_for_update(&lk, graph_name, LOCK_DIE_ON_ERROR);
+
+ if (fd < 0)
+ die_errno("unable to create '%s'", graph_name);
+ }
+
+ f = hashfd(lk.tempfile->fd, lk.tempfile->filename.buf);
+
+ hashwrite_be32(f, GRAPH_SIGNATURE);
+
+ hashwrite_u8(f, GRAPH_VERSION);
+ hashwrite_u8(f, GRAPH_OID_VERSION);
+ hashwrite_u8(f, num_chunks);
+ hashwrite_u8(f, 0); /* unused padding byte */
+
+ chunk_ids[0] = GRAPH_CHUNKID_OIDFANOUT;
+ chunk_ids[1] = GRAPH_CHUNKID_OIDLOOKUP;
+ chunk_ids[2] = GRAPH_CHUNKID_DATA;
+ if (num_extra_edges)
+ chunk_ids[3] = GRAPH_CHUNKID_LARGEEDGES;
+ else
+ chunk_ids[3] = 0;
+ chunk_ids[4] = 0;
+
+ chunk_offsets[0] = 8 + (num_chunks + 1) * GRAPH_CHUNKLOOKUP_WIDTH;
+ chunk_offsets[1] = chunk_offsets[0] + GRAPH_FANOUT_SIZE;
+ chunk_offsets[2] = chunk_offsets[1] + GRAPH_OID_LEN * commits.nr;
+ chunk_offsets[3] = chunk_offsets[2] + (GRAPH_OID_LEN + 16) * commits.nr;
+ chunk_offsets[4] = chunk_offsets[3] + 4 * num_extra_edges;
+
+ for (i = 0; i <= num_chunks; i++) {
+ uint32_t chunk_write[3];
+
+ chunk_write[0] = htonl(chunk_ids[i]);
+ chunk_write[1] = htonl(chunk_offsets[i] >> 32);
+ chunk_write[2] = htonl(chunk_offsets[i] & 0xffffffff);
+ hashwrite(f, chunk_write, 12);
+ }
+
+ write_graph_chunk_fanout(f, commits.list, commits.nr);
+ write_graph_chunk_oids(f, GRAPH_OID_LEN, commits.list, commits.nr);
+ write_graph_chunk_data(f, GRAPH_OID_LEN, commits.list, commits.nr);
+ write_graph_chunk_large_edges(f, commits.list, commits.nr);
+
+ close_commit_graph();
+ finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_FSYNC);
+ commit_lock_file(&lk);
+
+ free(oids.list);
+ oids.alloc = 0;
+ oids.nr = 0;
+}
diff --git a/commit-graph.h b/commit-graph.h
new file mode 100644
index 0000000..e1d8580
--- /dev/null
+++ b/commit-graph.h
@@ -0,0 +1,46 @@
+#ifndef COMMIT_GRAPH_H
+#define COMMIT_GRAPH_H
+
+#include "git-compat-util.h"
+
+char *get_commit_graph_filename(const char *obj_dir);
+
+/*
+ * Given a commit struct, try to fill the commit struct info, including:
+ * 1. tree object
+ * 2. date
+ * 3. parents.
+ *
+ * Returns 1 if and only if the commit was found in the packed graph.
+ *
+ * See parse_commit_buffer() for the fallback after this call.
+ */
+int parse_commit_in_graph(struct commit *item);
+
+struct commit_graph {
+ int graph_fd;
+
+ const unsigned char *data;
+ size_t data_len;
+
+ unsigned char hash_len;
+ unsigned char num_chunks;
+ uint32_t num_commits;
+ struct object_id oid;
+
+ const uint32_t *chunk_oid_fanout;
+ const unsigned char *chunk_oid_lookup;
+ const unsigned char *chunk_commit_data;
+ const unsigned char *chunk_large_edges;
+};
+
+struct commit_graph *load_commit_graph_one(const char *graph_file);
+
+void write_commit_graph(const char *obj_dir,
+ const char **pack_indexes,
+ int nr_packs,
+ const char **commit_hex,
+ int nr_commits,
+ int append);
+
+#endif
diff --git a/commit.c b/commit.c
index 874b6e5..3e39c86 100644
--- a/commit.c
+++ b/commit.c
@@ -1,6 +1,7 @@
#include "cache.h"
#include "tag.h"
#include "commit.h"
+#include "commit-graph.h"
#include "pkt-line.h"
#include "utf8.h"
#include "diff.h"
@@ -126,10 +127,8 @@ int register_commit_graft(struct commit_graft *graft, int ignore_dups)
ALLOC_GROW(commit_graft, commit_graft_nr + 1, commit_graft_alloc);
commit_graft_nr++;
if (pos < commit_graft_nr)
- memmove(commit_graft + pos + 1,
- commit_graft + pos,
- (commit_graft_nr - pos - 1) *
- sizeof(*commit_graft));
+ MOVE_ARRAY(commit_graft + pos + 1, commit_graft + pos,
+ commit_graft_nr - pos - 1);
commit_graft[pos] = graft;
return 0;
}
@@ -385,6 +384,8 @@ int parse_commit_gently(struct commit *item, int quiet_on_missing)
return -1;
if (item->object.parsed)
return 0;
+ if (parse_commit_in_graph(item))
+ return 0;
buffer = read_sha1_file(item->object.oid.hash, &type, &size);
if (!buffer)
return quiet_on_missing ? -1 :
@@ -1380,9 +1381,8 @@ void free_commit_extra_headers(struct commit_extra_header *extra)
}
}
-int commit_tree(const char *msg, size_t msg_len,
- const unsigned char *tree,
- struct commit_list *parents, unsigned char *ret,
+int commit_tree(const char *msg, size_t msg_len, const struct object_id *tree,
+ struct commit_list *parents, struct object_id *ret,
const char *author, const char *sign_commit)
{
struct commit_extra_header *extra = NULL, **tail = &extra;
@@ -1511,8 +1511,8 @@ N_("Warning: commit message did not conform to UTF-8.\n"
"variable i18n.commitencoding to the encoding your project uses.\n");
int commit_tree_extended(const char *msg, size_t msg_len,
- const unsigned char *tree,
- struct commit_list *parents, unsigned char *ret,
+ const struct object_id *tree,
+ struct commit_list *parents, struct object_id *ret,
const char *author, const char *sign_commit,
struct commit_extra_header *extra)
{
@@ -1520,7 +1520,7 @@ int commit_tree_extended(const char *msg, size_t msg_len,
int encoding_is_utf8;
struct strbuf buffer;
- assert_sha1_type(tree, OBJ_TREE);
+ assert_sha1_type(tree->hash, OBJ_TREE);
if (memchr(msg, '\0', msg_len))
return error("a NUL byte in commit log message not allowed.");
@@ -1529,7 +1529,7 @@ int commit_tree_extended(const char *msg, size_t msg_len,
encoding_is_utf8 = is_encoding_utf8(git_commit_encoding);
strbuf_init(&buffer, 8192); /* should avoid reallocs for the headers */
- strbuf_addf(&buffer, "tree %s\n", sha1_to_hex(tree));
+ strbuf_addf(&buffer, "tree %s\n", oid_to_hex(tree));
/*
* NOTE! This ordering means that the same exact tree merged with a
@@ -1568,7 +1568,7 @@ int commit_tree_extended(const char *msg, size_t msg_len,
goto out;
}
- result = write_sha1_file(buffer.buf, buffer.len, commit_type, ret);
+ result = write_object_file(buffer.buf, buffer.len, commit_type, ret);
out:
strbuf_release(&buffer);
return result;
diff --git a/commit.h b/commit.h
index 425f402..e57ae4b 100644
--- a/commit.h
+++ b/commit.h
@@ -9,6 +9,8 @@
#include "string-list.h"
#include "pretty.h"
+#define COMMIT_NOT_FROM_GRAPH 0xFFFFFFFF
+
struct commit_list {
struct commit *item;
struct commit_list *next;
@@ -21,6 +23,7 @@ struct commit {
timestamp_t date;
struct commit_list *parents;
struct tree *tree;
+ uint32_t graph_pos;
};
extern int save_commit_buffer;
@@ -262,14 +265,15 @@ extern void append_merge_tag_headers(struct commit_list *parents,
struct commit_extra_header ***tail);
extern int commit_tree(const char *msg, size_t msg_len,
- const unsigned char *tree,
- struct commit_list *parents, unsigned char *ret,
+ const struct object_id *tree,
+ struct commit_list *parents, struct object_id *ret,
const char *author, const char *sign_commit);
extern int commit_tree_extended(const char *msg, size_t msg_len,
- const unsigned char *tree,
- struct commit_list *parents, unsigned char *ret,
- const char *author, const char *sign_commit,
+ const struct object_id *tree,
+ struct commit_list *parents,
+ struct object_id *ret, const char *author,
+ const char *sign_commit,
struct commit_extra_header *);
extern struct commit_extra_header *read_commit_extra_headers(struct commit *, const char **);
diff --git a/config.c b/config.c
index e617c20..25ee4a6 100644
--- a/config.c
+++ b/config.c
@@ -1149,11 +1149,14 @@ static int git_default_core_config(const char *var, const char *value)
}
if (!strcmp(var, "core.safecrlf")) {
+ int eol_rndtrp_die;
if (value && !strcasecmp(value, "warn")) {
- safe_crlf = SAFE_CRLF_WARN;
+ global_conv_flags_eol = CONV_EOL_RNDTRP_WARN;
return 0;
}
- safe_crlf = git_config_bool(var, value);
+ eol_rndtrp_die = git_config_bool(var, value);
+ global_conv_flags_eol = eol_rndtrp_die ?
+ CONV_EOL_RNDTRP_DIE : CONV_EOL_RNDTRP_WARN;
return 0;
}
@@ -1223,6 +1226,11 @@ static int git_default_core_config(const char *var, const char *value)
return 0;
}
+ if (!strcmp(var, "core.commitgraph")) {
+ core_commit_graph = git_config_bool(var, value);
+ return 0;
+ }
+
if (!strcmp(var, "core.sparsecheckout")) {
core_apply_sparse_checkout = git_config_bool(var, value);
return 0;
@@ -1251,6 +1259,11 @@ static int git_default_core_config(const char *var, const char *value)
return 0;
}
+ if (!strcmp(var, "core.partialclonefilter")) {
+ return git_config_string(&core_partial_clone_filter_default,
+ var, value);
+ }
+
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
diff --git a/connected.c b/connected.c
index 4a47f33..91feb78 100644
--- a/connected.c
+++ b/connected.c
@@ -56,6 +56,8 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
argv_array_push(&rev_list.args,"rev-list");
argv_array_push(&rev_list.args, "--objects");
argv_array_push(&rev_list.args, "--stdin");
+ if (repository_format_partial_clone)
+ argv_array_push(&rev_list.args, "--exclude-promisor-objects");
argv_array_push(&rev_list.args, "--not");
argv_array_push(&rev_list.args, "--all");
argv_array_push(&rev_list.args, "--quiet");
diff --git a/contrib/coccinelle/strbuf.cocci b/contrib/coccinelle/strbuf.cocci
index 1d580e4..e34eada 100644
--- a/contrib/coccinelle/strbuf.cocci
+++ b/contrib/coccinelle/strbuf.cocci
@@ -1,21 +1,6 @@
@ strbuf_addf_with_format_only @
expression E;
-constant fmt;
-@@
- strbuf_addf(E,
-(
- fmt
-|
- _(fmt)
-)
- );
-
-@ script:python @
-fmt << strbuf_addf_with_format_only.fmt;
-@@
-cocci.include_match("%" not in fmt)
-
-@ extends strbuf_addf_with_format_only @
+constant fmt !~ "%";
@@
- strbuf_addf
+ strbuf_addstr
@@ -29,8 +14,9 @@ cocci.include_match("%" not in fmt)
@@
expression E1, E2;
+format F =~ "s";
@@
-- strbuf_addf(E1, "%s", E2);
+- strbuf_addf(E1, "%@F@", E2);
+ strbuf_addstr(E1, E2);
@@
diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash
index 3683c77..b060b6f 100644
--- a/contrib/completion/git-completion.bash
+++ b/contrib/completion/git-completion.bash
@@ -594,7 +594,7 @@ __git_is_configured_remote ()
__git_list_merge_strategies ()
{
- git merge -s help 2>&1 |
+ LANG=C LC_ALL=C git merge -s help 2>&1 |
sed -n -e '/[Aa]vailable strategies are: /,/^$/{
s/\.$//
s/.*://
@@ -841,6 +841,7 @@ __git_list_porcelain_commands ()
check-ref-format) : plumbing;;
checkout-index) : plumbing;;
column) : internal helper;;
+ commit-graph) : plumbing;;
commit-tree) : plumbing;;
count-objects) : infrequent;;
credential) : credentials;;
@@ -2419,6 +2420,7 @@ _git_config ()
core.bigFileThreshold
core.checkStat
core.commentChar
+ core.commitGraph
core.compression
core.createObject
core.deltaBaseCacheLimit
diff --git a/contrib/examples/git-difftool.perl b/contrib/examples/git-difftool.perl
index df59bdf..fb0fd0b 100755
--- a/contrib/examples/git-difftool.perl
+++ b/contrib/examples/git-difftool.perl
@@ -13,7 +13,7 @@
use 5.008;
use strict;
use warnings;
-use Error qw(:try);
+use Git::Error qw(:try);
use File::Basename qw(dirname);
use File::Copy;
use File::Find;
diff --git a/convert.c b/convert.c
index 1a41a48..cc562f6 100644
--- a/convert.c
+++ b/convert.c
@@ -193,30 +193,30 @@ static enum eol output_eol(enum crlf_action crlf_action)
return core_eol;
}
-static void check_safe_crlf(const char *path, enum crlf_action crlf_action,
+static void check_global_conv_flags_eol(const char *path, enum crlf_action crlf_action,
struct text_stat *old_stats, struct text_stat *new_stats,
- enum safe_crlf checksafe)
+ int conv_flags)
{
if (old_stats->crlf && !new_stats->crlf ) {
/*
* CRLFs would not be restored by checkout
*/
- if (checksafe == SAFE_CRLF_WARN)
+ if (conv_flags & CONV_EOL_RNDTRP_DIE)
+ die(_("CRLF would be replaced by LF in %s."), path);
+ else if (conv_flags & CONV_EOL_RNDTRP_WARN)
warning(_("CRLF will be replaced by LF in %s.\n"
"The file will have its original line"
" endings in your working directory."), path);
- else /* i.e. SAFE_CRLF_FAIL */
- die(_("CRLF would be replaced by LF in %s."), path);
} else if (old_stats->lonelf && !new_stats->lonelf ) {
/*
* CRLFs would be added by checkout
*/
- if (checksafe == SAFE_CRLF_WARN)
+ if (conv_flags & CONV_EOL_RNDTRP_DIE)
+ die(_("LF would be replaced by CRLF in %s"), path);
+ else if (conv_flags & CONV_EOL_RNDTRP_WARN)
warning(_("LF will be replaced by CRLF in %s.\n"
"The file will have its original line"
" endings in your working directory."), path);
- else /* i.e. SAFE_CRLF_FAIL */
- die(_("LF would be replaced by CRLF in %s"), path);
}
}
@@ -268,7 +268,7 @@ static int will_convert_lf_to_crlf(size_t len, struct text_stat *stats,
static int crlf_to_git(const struct index_state *istate,
const char *path, const char *src, size_t len,
struct strbuf *buf,
- enum crlf_action crlf_action, enum safe_crlf checksafe)
+ enum crlf_action crlf_action, int conv_flags)
{
struct text_stat stats;
char *dst;
@@ -298,12 +298,12 @@ static int crlf_to_git(const struct index_state *istate,
* unless we want to renormalize in a merge or
* cherry-pick.
*/
- if ((checksafe != SAFE_CRLF_RENORMALIZE) &&
+ if ((!(conv_flags & CONV_EOL_RENORMALIZE)) &&
has_crlf_in_index(istate, path))
convert_crlf_into_lf = 0;
}
- if ((checksafe == SAFE_CRLF_WARN ||
- (checksafe == SAFE_CRLF_FAIL)) && len) {
+ if (((conv_flags & CONV_EOL_RNDTRP_WARN) ||
+ ((conv_flags & CONV_EOL_RNDTRP_DIE) && len))) {
struct text_stat new_stats;
memcpy(&new_stats, &stats, sizeof(new_stats));
/* simulate "git add" */
@@ -316,7 +316,7 @@ static int crlf_to_git(const struct index_state *istate,
new_stats.crlf += new_stats.lonelf;
new_stats.lonelf = 0;
}
- check_safe_crlf(path, crlf_action, &stats, &new_stats, checksafe);
+ check_global_conv_flags_eol(path, crlf_action, &stats, &new_stats, conv_flags);
}
if (!convert_crlf_into_lf)
return 0;
@@ -898,7 +898,7 @@ static int ident_to_git(const char *path, const char *src, size_t len,
static int ident_to_worktree(const char *path, const char *src, size_t len,
struct strbuf *buf, int ident)
{
- unsigned char sha1[20];
+ struct object_id oid;
char *to_free = NULL, *dollar, *spc;
int cnt;
@@ -912,7 +912,7 @@ static int ident_to_worktree(const char *path, const char *src, size_t len,
/* are we "faking" in place editing ? */
if (src == buf->buf)
to_free = strbuf_detach(buf, NULL);
- hash_sha1_file(src, len, "blob", sha1);
+ hash_object_file(src, len, "blob", &oid);
strbuf_grow(buf, len + cnt * 43);
for (;;) {
@@ -969,7 +969,7 @@ static int ident_to_worktree(const char *path, const char *src, size_t len,
/* step 4: substitute */
strbuf_addstr(buf, "Id: ");
- strbuf_add(buf, sha1_to_hex(sha1), 40);
+ strbuf_addstr(buf, oid_to_hex(&oid));
strbuf_addstr(buf, " $");
}
strbuf_add(buf, src, len);
@@ -1129,7 +1129,7 @@ const char *get_convert_attr_ascii(const char *path)
int convert_to_git(const struct index_state *istate,
const char *path, const char *src, size_t len,
- struct strbuf *dst, enum safe_crlf checksafe)
+ struct strbuf *dst, int conv_flags)
{
int ret = 0;
struct conv_attrs ca;
@@ -1144,8 +1144,8 @@ int convert_to_git(const struct index_state *istate,
src = dst->buf;
len = dst->len;
}
- if (checksafe != SAFE_CRLF_KEEP_CRLF) {
- ret |= crlf_to_git(istate, path, src, len, dst, ca.crlf_action, checksafe);
+ if (!(conv_flags & CONV_EOL_KEEP_CRLF)) {
+ ret |= crlf_to_git(istate, path, src, len, dst, ca.crlf_action, conv_flags);
if (ret && dst) {
src = dst->buf;
len = dst->len;
@@ -1156,7 +1156,7 @@ int convert_to_git(const struct index_state *istate,
void convert_to_git_filter_fd(const struct index_state *istate,
const char *path, int fd, struct strbuf *dst,
- enum safe_crlf checksafe)
+ int conv_flags)
{
struct conv_attrs ca;
convert_attrs(&ca, path);
@@ -1167,7 +1167,7 @@ void convert_to_git_filter_fd(const struct index_state *istate,
if (!apply_filter(path, NULL, 0, fd, dst, ca.drv, CAP_CLEAN, NULL))
die("%s: clean filter '%s' failed", path, ca.drv->name);
- crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, checksafe);
+ crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, conv_flags);
ident_to_git(path, dst->buf, dst->len, dst, ca.ident);
}
@@ -1226,7 +1226,7 @@ int renormalize_buffer(const struct index_state *istate, const char *path,
src = dst->buf;
len = dst->len;
}
- return ret | convert_to_git(istate, path, src, len, dst, SAFE_CRLF_RENORMALIZE);
+ return ret | convert_to_git(istate, path, src, len, dst, CONV_EOL_RENORMALIZE);
}
/*****************************************************************
diff --git a/convert.h b/convert.h
index 4f2da22..65ab3e5 100644
--- a/convert.h
+++ b/convert.h
@@ -8,15 +8,12 @@
struct index_state;
-enum safe_crlf {
- SAFE_CRLF_FALSE = 0,
- SAFE_CRLF_FAIL = 1,
- SAFE_CRLF_WARN = 2,
- SAFE_CRLF_RENORMALIZE = 3,
- SAFE_CRLF_KEEP_CRLF = 4
-};
+#define CONV_EOL_RNDTRP_DIE (1<<0) /* Die if CRLF to LF to CRLF is different */
+#define CONV_EOL_RNDTRP_WARN (1<<1) /* Warn if CRLF to LF to CRLF is different */
+#define CONV_EOL_RENORMALIZE (1<<2) /* Convert CRLF to LF */
+#define CONV_EOL_KEEP_CRLF (1<<3) /* Keep CRLF line endings as is */
-extern enum safe_crlf safe_crlf;
+extern int global_conv_flags_eol;
enum auto_crlf {
AUTO_CRLF_FALSE = 0,
@@ -66,7 +63,7 @@ extern const char *get_convert_attr_ascii(const char *path);
/* returns 1 if *dst was used */
extern int convert_to_git(const struct index_state *istate,
const char *path, const char *src, size_t len,
- struct strbuf *dst, enum safe_crlf checksafe);
+ struct strbuf *dst, int conv_flags);
extern int convert_to_working_tree(const char *path, const char *src,
size_t len, struct strbuf *dst);
extern int async_convert_to_working_tree(const char *path, const char *src,
@@ -85,7 +82,7 @@ static inline int would_convert_to_git(const struct index_state *istate,
extern void convert_to_git_filter_fd(const struct index_state *istate,
const char *path, int fd,
struct strbuf *dst,
- enum safe_crlf checksafe);
+ int conv_flags);
extern int would_convert_to_git_filter_fd(const char *path);
/*****************************************************************
diff --git a/csum-file.c b/csum-file.c
index 2adae04..53ce37f 100644
--- a/csum-file.c
+++ b/csum-file.c
@@ -11,7 +11,7 @@
#include "progress.h"
#include "csum-file.h"
-static void flush(struct sha1file *f, const void *buf, unsigned int count)
+static void flush(struct hashfile *f, const void *buf, unsigned int count)
{
if (0 <= f->check_fd && count) {
unsigned char check_buffer[8192];
@@ -42,30 +42,30 @@ static void flush(struct sha1file *f, const void *buf, unsigned int count)
}
}
-void sha1flush(struct sha1file *f)
+void hashflush(struct hashfile *f)
{
unsigned offset = f->offset;
if (offset) {
- git_SHA1_Update(&f->ctx, f->buffer, offset);
+ the_hash_algo->update_fn(&f->ctx, f->buffer, offset);
flush(f, f->buffer, offset);
f->offset = 0;
}
}
-int sha1close(struct sha1file *f, unsigned char *result, unsigned int flags)
+int finalize_hashfile(struct hashfile *f, unsigned char *result, unsigned int flags)
{
int fd;
- sha1flush(f);
- git_SHA1_Final(f->buffer, &f->ctx);
+ hashflush(f);
+ the_hash_algo->final_fn(f->buffer, &f->ctx);
if (result)
hashcpy(result, f->buffer);
- if (flags & (CSUM_CLOSE | CSUM_FSYNC)) {
- /* write checksum and close fd */
- flush(f, f->buffer, 20);
- if (flags & CSUM_FSYNC)
- fsync_or_die(f->fd, f->name);
+ if (flags & CSUM_HASH_IN_STREAM)
+ flush(f, f->buffer, the_hash_algo->rawsz);
+ if (flags & CSUM_FSYNC)
+ fsync_or_die(f->fd, f->name);
+ if (flags & CSUM_CLOSE) {
if (close(f->fd))
die_errno("%s: sha1 file error on close", f->name);
fd = 0;
@@ -86,7 +86,7 @@ int sha1close(struct sha1file *f, unsigned char *result, unsigned int flags)
return fd;
}
-void sha1write(struct sha1file *f, const void *buf, unsigned int count)
+void hashwrite(struct hashfile *f, const void *buf, unsigned int count)
{
while (count) {
unsigned offset = f->offset;
@@ -110,7 +110,7 @@ void sha1write(struct sha1file *f, const void *buf, unsigned int count)
buf = (char *) buf + nr;
left -= nr;
if (!left) {
- git_SHA1_Update(&f->ctx, data, offset);
+ the_hash_algo->update_fn(&f->ctx, data, offset);
flush(f, data, offset);
offset = 0;
}
@@ -118,15 +118,15 @@ void sha1write(struct sha1file *f, const void *buf, unsigned int count)
}
}
-struct sha1file *sha1fd(int fd, const char *name)
+struct hashfile *hashfd(int fd, const char *name)
{
- return sha1fd_throughput(fd, name, NULL);
+ return hashfd_throughput(fd, name, NULL);
}
-struct sha1file *sha1fd_check(const char *name)
+struct hashfile *hashfd_check(const char *name)
{
int sink, check;
- struct sha1file *f;
+ struct hashfile *f;
sink = open("/dev/null", O_WRONLY);
if (sink < 0)
@@ -134,14 +134,14 @@ struct sha1file *sha1fd_check(const char *name)
check = open(name, O_RDONLY);
if (check < 0)
die_errno("unable to open '%s'", name);
- f = sha1fd(sink, name);
+ f = hashfd(sink, name);
f->check_fd = check;
return f;
}
-struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp)
+struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp)
{
- struct sha1file *f = xmalloc(sizeof(*f));
+ struct hashfile *f = xmalloc(sizeof(*f));
f->fd = fd;
f->check_fd = -1;
f->offset = 0;
@@ -149,18 +149,18 @@ struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp
f->tp = tp;
f->name = name;
f->do_crc = 0;
- git_SHA1_Init(&f->ctx);
+ the_hash_algo->init_fn(&f->ctx);
return f;
}
-void sha1file_checkpoint(struct sha1file *f, struct sha1file_checkpoint *checkpoint)
+void hashfile_checkpoint(struct hashfile *f, struct hashfile_checkpoint *checkpoint)
{
- sha1flush(f);
+ hashflush(f);
checkpoint->offset = f->total;
checkpoint->ctx = f->ctx;
}
-int sha1file_truncate(struct sha1file *f, struct sha1file_checkpoint *checkpoint)
+int hashfile_truncate(struct hashfile *f, struct hashfile_checkpoint *checkpoint)
{
off_t offset = checkpoint->offset;
@@ -169,17 +169,17 @@ int sha1file_truncate(struct sha1file *f, struct sha1file_checkpoint *checkpoint
return -1;
f->total = offset;
f->ctx = checkpoint->ctx;
- f->offset = 0; /* sha1flush() was called in checkpoint */
+ f->offset = 0; /* hashflush() was called in checkpoint */
return 0;
}
-void crc32_begin(struct sha1file *f)
+void crc32_begin(struct hashfile *f)
{
f->crc32 = crc32(0, NULL, 0);
f->do_crc = 1;
}
-uint32_t crc32_end(struct sha1file *f)
+uint32_t crc32_end(struct hashfile *f)
{
f->do_crc = 0;
return f->crc32;
diff --git a/csum-file.h b/csum-file.h
index 7530927..c5a2e33 100644
--- a/csum-file.h
+++ b/csum-file.h
@@ -4,11 +4,11 @@
struct progress;
/* A SHA1-protected file */
-struct sha1file {
+struct hashfile {
int fd;
int check_fd;
unsigned int offset;
- git_SHA_CTX ctx;
+ git_hash_ctx ctx;
off_t total;
struct progress *tp;
const char *name;
@@ -18,36 +18,37 @@ struct sha1file {
};
/* Checkpoint */
-struct sha1file_checkpoint {
+struct hashfile_checkpoint {
off_t offset;
- git_SHA_CTX ctx;
+ git_hash_ctx ctx;
};
-extern void sha1file_checkpoint(struct sha1file *, struct sha1file_checkpoint *);
-extern int sha1file_truncate(struct sha1file *, struct sha1file_checkpoint *);
+extern void hashfile_checkpoint(struct hashfile *, struct hashfile_checkpoint *);
+extern int hashfile_truncate(struct hashfile *, struct hashfile_checkpoint *);
-/* sha1close flags */
-#define CSUM_CLOSE 1
-#define CSUM_FSYNC 2
+/* finalize_hashfile flags */
+#define CSUM_CLOSE 1
+#define CSUM_FSYNC 2
+#define CSUM_HASH_IN_STREAM 4
-extern struct sha1file *sha1fd(int fd, const char *name);
-extern struct sha1file *sha1fd_check(const char *name);
-extern struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp);
-extern int sha1close(struct sha1file *, unsigned char *, unsigned int);
-extern void sha1write(struct sha1file *, const void *, unsigned int);
-extern void sha1flush(struct sha1file *f);
-extern void crc32_begin(struct sha1file *);
-extern uint32_t crc32_end(struct sha1file *);
+extern struct hashfile *hashfd(int fd, const char *name);
+extern struct hashfile *hashfd_check(const char *name);
+extern struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp);
+extern int finalize_hashfile(struct hashfile *, unsigned char *, unsigned int);
+extern void hashwrite(struct hashfile *, const void *, unsigned int);
+extern void hashflush(struct hashfile *f);
+extern void crc32_begin(struct hashfile *);
+extern uint32_t crc32_end(struct hashfile *);
-static inline void sha1write_u8(struct sha1file *f, uint8_t data)
+static inline void hashwrite_u8(struct hashfile *f, uint8_t data)
{
- sha1write(f, &data, sizeof(data));
+ hashwrite(f, &data, sizeof(data));
}
-static inline void sha1write_be32(struct sha1file *f, uint32_t data)
+static inline void hashwrite_be32(struct hashfile *f, uint32_t data)
{
data = htonl(data);
- sha1write(f, &data, sizeof(data));
+ hashwrite(f, &data, sizeof(data));
}
#endif
diff --git a/daemon.c b/daemon.c
index e37e343..72dfeaf 100644
--- a/daemon.c
+++ b/daemon.c
@@ -597,6 +597,7 @@ static char *parse_host_arg(struct hostinfo *hi, char *extra_args, int buflen)
if (strncasecmp("host=", extra_args, 5) == 0) {
val = extra_args + 5;
vallen = strlen(val) + 1;
+ loginfo("Extended attribute \"host\": %s", val);
if (*val) {
/* Split <host>:<port> at colon. */
char *host;
@@ -647,9 +648,11 @@ static void parse_extra_args(struct hostinfo *hi, struct argv_array *env,
}
}
- if (git_protocol.len > 0)
+ if (git_protocol.len > 0) {
+ loginfo("Extended attribute \"protocol\": %s", git_protocol.buf);
argv_array_pushf(env, GIT_PROTOCOL_ENVIRONMENT "=%s",
git_protocol.buf);
+ }
strbuf_release(&git_protocol);
}
@@ -757,14 +760,8 @@ static int execute(void)
alarm(0);
len = strlen(line);
- if (pktlen != len)
- loginfo("Extended attributes (%d bytes) exist <%.*s>",
- (int) pktlen - len,
- (int) pktlen - len, line + len + 1);
- if (len && line[len-1] == '\n') {
- line[--len] = 0;
- pktlen--;
- }
+ if (len && line[len-1] == '\n')
+ line[len-1] = 0;
/* parse additional args hidden behind a NUL byte */
if (len != pktlen)
diff --git a/diff-lib.c b/diff-lib.c
index 261ce13..104f954 100644
--- a/diff-lib.c
+++ b/diff-lib.c
@@ -92,6 +92,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
int diff_unmerged_stage = revs->max_count;
unsigned ce_option = ((option & DIFF_RACY_IS_MODIFIED)
? CE_MATCH_RACY_IS_DIRTY : 0);
+ uint64_t start = getnanotime();
diff_set_mnemonic_prefix(&revs->diffopt, "i/", "w/");
@@ -246,6 +247,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
}
diffcore_std(&revs->diffopt);
diff_flush(&revs->diffopt);
+ trace_performance_since(start, "diff-files");
return 0;
}
@@ -512,6 +514,7 @@ static int diff_cache(struct rev_info *revs,
int run_diff_index(struct rev_info *revs, int cached)
{
struct object_array_entry *ent;
+ uint64_t start = getnanotime();
ent = revs->pending.objects;
if (diff_cache(revs, &ent->item->oid, ent->name, cached))
@@ -521,6 +524,7 @@ int run_diff_index(struct rev_info *revs, int cached)
diffcore_fix_diff_index(&revs->diffopt);
diffcore_std(&revs->diffopt);
diff_flush(&revs->diffopt);
+ trace_performance_since(start, "diff-index");
return 0;
}
diff --git a/diff.c b/diff.c
index 1870742..915f4d6 100644
--- a/diff.c
+++ b/diff.c
@@ -3520,13 +3520,13 @@ int diff_populate_filespec(struct diff_filespec *s, unsigned int flags)
{
int size_only = flags & CHECK_SIZE_ONLY;
int err = 0;
+ int conv_flags = global_conv_flags_eol;
/*
* demote FAIL to WARN to allow inspecting the situation
* instead of refusing.
*/
- enum safe_crlf crlf_warn = (safe_crlf == SAFE_CRLF_FAIL
- ? SAFE_CRLF_WARN
- : safe_crlf);
+ if (conv_flags & CONV_EOL_RNDTRP_DIE)
+ conv_flags = CONV_EOL_RNDTRP_WARN;
if (!DIFF_FILE_VALID(s))
die("internal error: asking to populate invalid file.");
@@ -3603,7 +3603,7 @@ int diff_populate_filespec(struct diff_filespec *s, unsigned int flags)
/*
* Convert from working tree format to canonical git format
*/
- if (convert_to_git(&the_index, s->path, s->data, s->size, &buf, crlf_warn)) {
+ if (convert_to_git(&the_index, s->path, s->data, s->size, &buf, conv_flags)) {
size_t size = 0;
munmap(s->data, s->size);
s->should_munmap = 0;
@@ -5471,6 +5471,7 @@ N_("you may want to set your %s variable to at least "
void diff_warn_rename_limit(const char *varname, int needed, int degraded_cc)
{
+ fflush(stdout);
if (degraded_cc)
warning(_(degrade_cc_to_c_warning));
else if (needed)
diff --git a/diffcore-rename.c b/diffcore-rename.c
index 245e999..0b7e498 100644
--- a/diffcore-rename.c
+++ b/diffcore-rename.c
@@ -57,8 +57,8 @@ static int add_rename_dst(struct diff_filespec *two)
ALLOC_GROW(rename_dst, rename_dst_nr + 1, rename_dst_alloc);
rename_dst_nr++;
if (first < rename_dst_nr)
- memmove(rename_dst + first + 1, rename_dst + first,
- (rename_dst_nr - first - 1) * sizeof(*rename_dst));
+ MOVE_ARRAY(rename_dst + first + 1, rename_dst + first,
+ rename_dst_nr - first - 1);
rename_dst[first].two = alloc_filespec(two->path);
fill_filespec(rename_dst[first].two, &two->oid, two->oid_valid,
two->mode);
@@ -98,8 +98,8 @@ static struct diff_rename_src *register_rename_src(struct diff_filepair *p)
ALLOC_GROW(rename_src, rename_src_nr + 1, rename_src_alloc);
rename_src_nr++;
if (first < rename_src_nr)
- memmove(rename_src + first + 1, rename_src + first,
- (rename_src_nr - first - 1) * sizeof(*rename_src));
+ MOVE_ARRAY(rename_src + first + 1, rename_src + first,
+ rename_src_nr - first - 1);
rename_src[first].p = p;
rename_src[first].score = score;
return &(rename_src[first]);
@@ -260,8 +260,8 @@ static unsigned int hash_filespec(struct diff_filespec *filespec)
if (!filespec->oid_valid) {
if (diff_populate_filespec(filespec, 0))
return 0;
- hash_sha1_file(filespec->data, filespec->size, "blob",
- filespec->oid.hash);
+ hash_object_file(filespec->data, filespec->size, "blob",
+ &filespec->oid);
}
return sha1hash(filespec->oid.hash);
}
diff --git a/dir.c b/dir.c
index 7c4b45e..536416f 100644
--- a/dir.c
+++ b/dir.c
@@ -231,12 +231,10 @@ int within_depth(const char *name, int namelen,
* 1 along with { data, size } of the (possibly augmented) buffer
* when successful.
*
- * Optionally updates the given sha1_stat with the given OID (when valid).
+ * Optionally updates the given oid_stat with the given OID (when valid).
*/
-static int do_read_blob(const struct object_id *oid,
- struct sha1_stat *sha1_stat,
- size_t *size_out,
- char **data_out)
+static int do_read_blob(const struct object_id *oid, struct oid_stat *oid_stat,
+ size_t *size_out, char **data_out)
{
enum object_type type;
unsigned long sz;
@@ -251,9 +249,9 @@ static int do_read_blob(const struct object_id *oid,
return -1;
}
- if (sha1_stat) {
- memset(&sha1_stat->stat, 0, sizeof(sha1_stat->stat));
- hashcpy(sha1_stat->sha1, oid->hash);
+ if (oid_stat) {
+ memset(&oid_stat->stat, 0, sizeof(oid_stat->stat));
+ oidcpy(&oid_stat->oid, oid);
}
if (sz == 0) {
@@ -654,9 +652,8 @@ void add_exclude(const char *string, const char *base,
static int read_skip_worktree_file_from_index(const struct index_state *istate,
const char *path,
- size_t *size_out,
- char **data_out,
- struct sha1_stat *sha1_stat)
+ size_t *size_out, char **data_out,
+ struct oid_stat *oid_stat)
{
int pos, len;
@@ -667,7 +664,7 @@ static int read_skip_worktree_file_from_index(const struct index_state *istate,
if (!ce_skip_worktree(istate->cache[pos]))
return -1;
- return do_read_blob(&istate->cache[pos]->oid, sha1_stat, size_out, data_out);
+ return do_read_blob(&istate->cache[pos]->oid, oid_stat, size_out, data_out);
}
/*
@@ -747,8 +744,8 @@ static struct untracked_cache_dir *lookup_untracked(struct untracked_cache *uc,
FLEX_ALLOC_MEM(d, name, name, len);
ALLOC_GROW(dir->dirs, dir->dirs_nr + 1, dir->dirs_alloc);
- memmove(dir->dirs + first + 1, dir->dirs + first,
- (dir->dirs_nr - first) * sizeof(*dir->dirs));
+ MOVE_ARRAY(dir->dirs + first + 1, dir->dirs + first,
+ dir->dirs_nr - first);
dir->dirs_nr++;
dir->dirs[first] = d;
return d;
@@ -795,9 +792,8 @@ static int add_excludes_from_buffer(char *buf, size_t size,
* ss_valid is non-zero, "ss" must contain good value as input.
*/
static int add_excludes(const char *fname, const char *base, int baselen,
- struct exclude_list *el,
- struct index_state *istate,
- struct sha1_stat *sha1_stat)
+ struct exclude_list *el, struct index_state *istate,
+ struct oid_stat *oid_stat)
{
struct stat st;
int r;
@@ -815,16 +811,16 @@ static int add_excludes(const char *fname, const char *base, int baselen,
return -1;
r = read_skip_worktree_file_from_index(istate, fname,
&size, &buf,
- sha1_stat);
+ oid_stat);
if (r != 1)
return r;
} else {
size = xsize_t(st.st_size);
if (size == 0) {
- if (sha1_stat) {
- fill_stat_data(&sha1_stat->stat, &st);
- hashcpy(sha1_stat->sha1, EMPTY_BLOB_SHA1_BIN);
- sha1_stat->valid = 1;
+ if (oid_stat) {
+ fill_stat_data(&oid_stat->stat, &st);
+ oidcpy(&oid_stat->oid, &empty_blob_oid);
+ oid_stat->valid = 1;
}
close(fd);
return 0;
@@ -837,22 +833,23 @@ static int add_excludes(const char *fname, const char *base, int baselen,
}
buf[size++] = '\n';
close(fd);
- if (sha1_stat) {
+ if (oid_stat) {
int pos;
- if (sha1_stat->valid &&
- !match_stat_data_racy(istate, &sha1_stat->stat, &st))
+ if (oid_stat->valid &&
+ !match_stat_data_racy(istate, &oid_stat->stat, &st))
; /* no content change, ss->sha1 still good */
else if (istate &&
(pos = index_name_pos(istate, fname, strlen(fname))) >= 0 &&
!ce_stage(istate->cache[pos]) &&
ce_uptodate(istate->cache[pos]) &&
!would_convert_to_git(istate, fname))
- hashcpy(sha1_stat->sha1,
- istate->cache[pos]->oid.hash);
+ oidcpy(&oid_stat->oid,
+ &istate->cache[pos]->oid);
else
- hash_sha1_file(buf, size, "blob", sha1_stat->sha1);
- fill_stat_data(&sha1_stat->stat, &st);
- sha1_stat->valid = 1;
+ hash_object_file(buf, size, "blob",
+ &oid_stat->oid);
+ fill_stat_data(&oid_stat->stat, &st);
+ oid_stat->valid = 1;
}
}
@@ -930,7 +927,7 @@ struct exclude_list *add_exclude_list(struct dir_struct *dir,
* Used to set up core.excludesfile and .git/info/exclude lists.
*/
static void add_excludes_from_file_1(struct dir_struct *dir, const char *fname,
- struct sha1_stat *sha1_stat)
+ struct oid_stat *oid_stat)
{
struct exclude_list *el;
/*
@@ -941,7 +938,7 @@ static void add_excludes_from_file_1(struct dir_struct *dir, const char *fname,
if (!dir->untracked)
dir->unmanaged_exclude_files++;
el = add_exclude_list(dir, EXC_FILE, fname);
- if (add_excludes(fname, "", 0, el, NULL, sha1_stat) < 0)
+ if (add_excludes(fname, "", 0, el, NULL, oid_stat) < 0)
die("cannot use %s as an exclude file", fname);
}
@@ -1180,7 +1177,7 @@ static void prep_exclude(struct dir_struct *dir,
while (current < baselen) {
const char *cp;
- struct sha1_stat sha1_stat;
+ struct oid_stat oid_stat;
stk = xcalloc(1, sizeof(*stk));
if (current < 0) {
@@ -1223,8 +1220,8 @@ static void prep_exclude(struct dir_struct *dir,
}
/* Try to read per-directory file */
- hashclr(sha1_stat.sha1);
- sha1_stat.valid = 0;
+ oidclr(&oid_stat.oid);
+ oid_stat.valid = 0;
if (dir->exclude_per_dir &&
/*
* If we know that no files have been added in
@@ -1252,7 +1249,7 @@ static void prep_exclude(struct dir_struct *dir,
strbuf_addstr(&sb, dir->exclude_per_dir);
el->src = strbuf_detach(&sb, NULL);
add_excludes(el->src, el->src, stk->baselen, el, istate,
- untracked ? &sha1_stat : NULL);
+ untracked ? &oid_stat : NULL);
}
/*
* NEEDSWORK: when untracked cache is enabled, prep_exclude()
@@ -1269,9 +1266,9 @@ static void prep_exclude(struct dir_struct *dir,
* order, though, if you do that.
*/
if (untracked &&
- hashcmp(sha1_stat.sha1, untracked->exclude_sha1)) {
+ hashcmp(oid_stat.oid.hash, untracked->exclude_sha1)) {
invalidate_gitignore(dir->untracked, untracked);
- hashcpy(untracked->exclude_sha1, sha1_stat.sha1);
+ hashcpy(untracked->exclude_sha1, oid_stat.oid.hash);
}
dir->exclude_stack = stk;
current = stk->baselen;
@@ -2228,13 +2225,13 @@ static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *d
/* Validate $GIT_DIR/info/exclude and core.excludesfile */
root = dir->untracked->root;
- if (hashcmp(dir->ss_info_exclude.sha1,
- dir->untracked->ss_info_exclude.sha1)) {
+ if (oidcmp(&dir->ss_info_exclude.oid,
+ &dir->untracked->ss_info_exclude.oid)) {
invalidate_gitignore(dir->untracked, root);
dir->untracked->ss_info_exclude = dir->ss_info_exclude;
}
- if (hashcmp(dir->ss_excludes_file.sha1,
- dir->untracked->ss_excludes_file.sha1)) {
+ if (oidcmp(&dir->ss_excludes_file.oid,
+ &dir->untracked->ss_excludes_file.oid)) {
invalidate_gitignore(dir->untracked, root);
dir->untracked->ss_excludes_file = dir->ss_excludes_file;
}
@@ -2248,6 +2245,7 @@ int read_directory(struct dir_struct *dir, struct index_state *istate,
const char *path, int len, const struct pathspec *pathspec)
{
struct untracked_cache_dir *untracked;
+ uint64_t start = getnanotime();
if (has_symlink_leading_path(path, len))
return dir->nr;
@@ -2286,6 +2284,7 @@ int read_directory(struct dir_struct *dir, struct index_state *istate,
dir->nr = i;
}
+ trace_performance_since(start, "read directory %.*s", len, path);
if (dir->untracked) {
static struct trace_key trace_untracked_stats = TRACE_KEY_INIT(UNTRACKED_STATS);
trace_printf_key(&trace_untracked_stats,
@@ -2638,8 +2637,8 @@ void write_untracked_extension(struct strbuf *out, struct untracked_cache *untra
FLEX_ALLOC_MEM(ouc, exclude_per_dir, untracked->exclude_per_dir, len);
stat_data_to_disk(&ouc->info_exclude_stat, &untracked->ss_info_exclude.stat);
stat_data_to_disk(&ouc->excludes_file_stat, &untracked->ss_excludes_file.stat);
- hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.sha1);
- hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.sha1);
+ hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.oid.hash);
+ hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.oid.hash);
ouc->dir_flags = htonl(untracked->dir_flags);
varint_len = encode_varint(untracked->ident.len, varbuf);
@@ -2816,13 +2815,12 @@ static void read_sha1(size_t pos, void *cb)
rd->data += 20;
}
-static void load_sha1_stat(struct sha1_stat *sha1_stat,
- const unsigned char *data,
- const unsigned char *sha1)
+static void load_oid_stat(struct oid_stat *oid_stat, const unsigned char *data,
+ const unsigned char *sha1)
{
- stat_data_from_disk(&sha1_stat->stat, data);
- hashcpy(sha1_stat->sha1, sha1);
- sha1_stat->valid = 1;
+ stat_data_from_disk(&oid_stat->stat, data);
+ hashcpy(oid_stat->oid.hash, sha1);
+ oid_stat->valid = 1;
}
struct untracked_cache *read_untracked_extension(const void *data, unsigned long sz)
@@ -2850,12 +2848,12 @@ struct untracked_cache *read_untracked_extension(const void *data, unsigned long
uc = xcalloc(1, sizeof(*uc));
strbuf_init(&uc->ident, ident_len);
strbuf_add(&uc->ident, ident, ident_len);
- load_sha1_stat(&uc->ss_info_exclude,
- next + ouc_offset(info_exclude_stat),
- next + ouc_offset(info_exclude_sha1));
- load_sha1_stat(&uc->ss_excludes_file,
- next + ouc_offset(excludes_file_stat),
- next + ouc_offset(excludes_file_sha1));
+ load_oid_stat(&uc->ss_info_exclude,
+ next + ouc_offset(info_exclude_stat),
+ next + ouc_offset(info_exclude_sha1));
+ load_oid_stat(&uc->ss_excludes_file,
+ next + ouc_offset(excludes_file_stat),
+ next + ouc_offset(excludes_file_sha1));
uc->dir_flags = get_be32(next + ouc_offset(dir_flags));
exclude_per_dir = (const char *)next + ouc_offset(exclude_per_dir);
uc->exclude_per_dir = xstrdup(exclude_per_dir);
diff --git a/dir.h b/dir.h
index 11a047b..e7bb786 100644
--- a/dir.h
+++ b/dir.h
@@ -74,9 +74,9 @@ struct exclude_list_group {
struct exclude_list *el;
};
-struct sha1_stat {
+struct oid_stat {
struct stat_data stat;
- unsigned char sha1[20];
+ struct object_id oid;
int valid;
};
@@ -124,8 +124,8 @@ struct untracked_cache_dir {
};
struct untracked_cache {
- struct sha1_stat ss_info_exclude;
- struct sha1_stat ss_excludes_file;
+ struct oid_stat ss_info_exclude;
+ struct oid_stat ss_excludes_file;
const char *exclude_per_dir;
struct strbuf ident;
/*
@@ -195,8 +195,8 @@ struct dir_struct {
/* Enable untracked file cache if set */
struct untracked_cache *untracked;
- struct sha1_stat ss_info_exclude;
- struct sha1_stat ss_excludes_file;
+ struct oid_stat ss_info_exclude;
+ struct oid_stat ss_excludes_file;
unsigned unmanaged_exclude_files;
};
diff --git a/environment.c b/environment.c
index 270ba98..8853e2f 100644
--- a/environment.c
+++ b/environment.c
@@ -27,6 +27,8 @@ int warn_ambiguous_refs = 1;
int warn_on_object_refname_ambiguity = 1;
int ref_paranoia = -1;
int repository_format_precious_objects;
+char *repository_format_partial_clone;
+const char *core_partial_clone_filter_default;
const char *git_commit_encoding;
const char *git_log_output_encoding;
const char *apply_default_whitespace;
@@ -49,7 +51,7 @@ enum auto_crlf auto_crlf = AUTO_CRLF_FALSE;
int check_replace_refs = 1;
char *git_replace_ref_base;
enum eol core_eol = EOL_UNSET;
-enum safe_crlf safe_crlf = SAFE_CRLF_WARN;
+int global_conv_flags_eol = CONV_EOL_RNDTRP_WARN;
unsigned whitespace_rule_cfg = WS_DEFAULT_RULE;
enum branch_track git_branch_track = BRANCH_TRACK_REMOTE;
enum rebase_setup_type autorebase = AUTOREBASE_NEVER;
@@ -60,6 +62,7 @@ enum push_default_type push_default = PUSH_DEFAULT_UNSPECIFIED;
enum object_creation_mode object_creation_mode = OBJECT_CREATION_MODE;
char *notes_ref_name;
int grafts_replace_parents = 1;
+int core_commit_graph;
int core_apply_sparse_checkout;
int merge_log_config = -1;
int precomposed_unicode = -1; /* see probe_utf8_pathname_composition() */
diff --git a/fast-import.c b/fast-import.c
index 92b89d1..2e5d173 100644
--- a/fast-import.c
+++ b/fast-import.c
@@ -316,7 +316,7 @@ static struct atom_str **atom_table;
/* The .pack file being generated */
static struct pack_idx_option pack_idx_opts;
static unsigned int pack_id;
-static struct sha1file *pack_file;
+static struct hashfile *pack_file;
static struct packed_git *pack_data;
static struct packed_git **all_packs;
static off_t pack_size;
@@ -905,12 +905,12 @@ static void start_packfile(void)
p->pack_fd = pack_fd;
p->do_not_close = 1;
- pack_file = sha1fd(pack_fd, p->pack_name);
+ pack_file = hashfd(pack_fd, p->pack_name);
hdr.hdr_signature = htonl(PACK_SIGNATURE);
hdr.hdr_version = htonl(2);
hdr.hdr_entries = 0;
- sha1write(pack_file, &hdr, sizeof(hdr));
+ hashwrite(pack_file, &hdr, sizeof(hdr));
pack_data = p;
pack_size = sizeof(hdr);
@@ -1016,7 +1016,7 @@ static void end_packfile(void)
struct tag *t;
close_pack_windows(pack_data);
- sha1close(pack_file, cur_pack_oid.hash, 0);
+ finalize_hashfile(pack_file, cur_pack_oid.hash, 0);
fixup_pack_header_footer(pack_data->pack_fd, pack_data->sha1,
pack_data->pack_name, object_count,
cur_pack_oid.hash, pack_size);
@@ -1092,15 +1092,15 @@ static int store_object(
unsigned char hdr[96];
struct object_id oid;
unsigned long hdrlen, deltalen;
- git_SHA_CTX c;
+ git_hash_ctx c;
git_zstream s;
hdrlen = xsnprintf((char *)hdr, sizeof(hdr), "%s %lu",
type_name(type), (unsigned long)dat->len) + 1;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, hdrlen);
- git_SHA1_Update(&c, dat->buf, dat->len);
- git_SHA1_Final(oid.hash, &c);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
+ the_hash_algo->update_fn(&c, dat->buf, dat->len);
+ the_hash_algo->final_fn(oid.hash, &c);
if (oidout)
oidcpy(oidout, &oid);
@@ -1118,11 +1118,13 @@ static int store_object(
return 1;
}
- if (last && last->data.buf && last->depth < max_depth && dat->len > 20) {
+ if (last && last->data.buf && last->depth < max_depth
+ && dat->len > the_hash_algo->rawsz) {
+
delta_count_attempts_by_type[type]++;
delta = diff_delta(last->data.buf, last->data.len,
dat->buf, dat->len,
- &deltalen, dat->len - 20);
+ &deltalen, dat->len - the_hash_algo->rawsz);
} else
delta = NULL;
@@ -1180,23 +1182,23 @@ static int store_object(
hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
OBJ_OFS_DELTA, deltalen);
- sha1write(pack_file, hdr, hdrlen);
+ hashwrite(pack_file, hdr, hdrlen);
pack_size += hdrlen;
hdr[pos] = ofs & 127;
while (ofs >>= 7)
hdr[--pos] = 128 | (--ofs & 127);
- sha1write(pack_file, hdr + pos, sizeof(hdr) - pos);
+ hashwrite(pack_file, hdr + pos, sizeof(hdr) - pos);
pack_size += sizeof(hdr) - pos;
} else {
e->depth = 0;
hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
type, dat->len);
- sha1write(pack_file, hdr, hdrlen);
+ hashwrite(pack_file, hdr, hdrlen);
pack_size += hdrlen;
}
- sha1write(pack_file, out, s.total_out);
+ hashwrite(pack_file, out, s.total_out);
pack_size += s.total_out;
e->idx.crc32 = crc32_end(pack_file);
@@ -1215,9 +1217,9 @@ static int store_object(
return 0;
}
-static void truncate_pack(struct sha1file_checkpoint *checkpoint)
+static void truncate_pack(struct hashfile_checkpoint *checkpoint)
{
- if (sha1file_truncate(pack_file, checkpoint))
+ if (hashfile_truncate(pack_file, checkpoint))
die_errno("cannot truncate pack to skip duplicate");
pack_size = checkpoint->offset;
}
@@ -1231,9 +1233,9 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
struct object_id oid;
unsigned long hdrlen;
off_t offset;
- git_SHA_CTX c;
+ git_hash_ctx c;
git_zstream s;
- struct sha1file_checkpoint checkpoint;
+ struct hashfile_checkpoint checkpoint;
int status = Z_OK;
/* Determine if we should auto-checkpoint. */
@@ -1241,13 +1243,13 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
|| (pack_size + 60 + len) < pack_size)
cycle_packfile();
- sha1file_checkpoint(pack_file, &checkpoint);
+ hashfile_checkpoint(pack_file, &checkpoint);
offset = checkpoint.offset;
hdrlen = xsnprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, out_buf, hdrlen);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, out_buf, hdrlen);
crc32_begin(pack_file);
@@ -1265,7 +1267,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
if (!n && feof(stdin))
die("EOF in data (%" PRIuMAX " bytes remaining)", len);
- git_SHA1_Update(&c, in_buf, n);
+ the_hash_algo->update_fn(&c, in_buf, n);
s.next_in = in_buf;
s.avail_in = n;
len -= n;
@@ -1275,7 +1277,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
if (!s.avail_out || status == Z_STREAM_END) {
size_t n = s.next_out - out_buf;
- sha1write(pack_file, out_buf, n);
+ hashwrite(pack_file, out_buf, n);
pack_size += n;
s.next_out = out_buf;
s.avail_out = out_sz;
@@ -1291,7 +1293,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
}
}
git_deflate_end(&s);
- git_SHA1_Final(oid.hash, &c);
+ the_hash_algo->final_fn(oid.hash, &c);
if (oidout)
oidcpy(oidout, &oid);
@@ -1350,25 +1352,25 @@ static void *gfi_unpack_entry(
{
enum object_type type;
struct packed_git *p = all_packs[oe->pack_id];
- if (p == pack_data && p->pack_size < (pack_size + 20)) {
+ if (p == pack_data && p->pack_size < (pack_size + the_hash_algo->rawsz)) {
/* The object is stored in the packfile we are writing to
* and we have modified it since the last time we scanned
* back to read a previously written object. If an old
- * window covered [p->pack_size, p->pack_size + 20) its
+ * window covered [p->pack_size, p->pack_size + rawsz) its
* data is stale and is not valid. Closing all windows
* and updating the packfile length ensures we can read
* the newly written data.
*/
close_pack_windows(p);
- sha1flush(pack_file);
+ hashflush(pack_file);
- /* We have to offer 20 bytes additional on the end of
+ /* We have to offer rawsz bytes additional on the end of
* the packfile as the core unpacker code assumes the
* footer is present at the file end and must promise
- * at least 20 bytes within any window it maps. But
+ * at least rawsz bytes within any window it maps. But
* we don't actually create the footer here.
*/
- p->pack_size = pack_size + 20;
+ p->pack_size = pack_size + the_hash_algo->rawsz;
}
return unpack_entry(p, oe->idx.offset, &type, sizep);
}
@@ -2204,7 +2206,7 @@ static void construct_path_with_fanout(const char *hex_sha1,
unsigned char fanout, char *path)
{
unsigned int i = 0, j = 0;
- if (fanout >= 20)
+ if (fanout >= the_hash_algo->rawsz)
die("Too large fanout (%u)", fanout);
while (fanout) {
path[i++] = hex_sha1[j++];
@@ -2212,8 +2214,8 @@ static void construct_path_with_fanout(const char *hex_sha1,
path[i++] = '/';
fanout--;
}
- memcpy(path + i, hex_sha1 + j, GIT_SHA1_HEXSZ - j);
- path[i + GIT_SHA1_HEXSZ - j] = '\0';
+ memcpy(path + i, hex_sha1 + j, the_hash_algo->hexsz - j);
+ path[i + the_hash_algo->hexsz - j] = '\0';
}
static uintmax_t do_change_note_fanout(
diff --git a/fetch-object.c b/fetch-object.c
new file mode 100644
index 0000000..853624f
--- /dev/null
+++ b/fetch-object.c
@@ -0,0 +1,45 @@
+#include "cache.h"
+#include "packfile.h"
+#include "pkt-line.h"
+#include "strbuf.h"
+#include "transport.h"
+#include "fetch-object.h"
+
+static void fetch_refs(const char *remote_name, struct ref *ref)
+{
+ struct remote *remote;
+ struct transport *transport;
+ int original_fetch_if_missing = fetch_if_missing;
+
+ fetch_if_missing = 0;
+ remote = remote_get(remote_name);
+ if (!remote->url[0])
+ die(_("Remote with no URL"));
+ transport = transport_get(remote, remote->url[0]);
+
+ transport_set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
+ transport_set_option(transport, TRANS_OPT_NO_DEPENDENTS, "1");
+ transport_fetch_refs(transport, ref);
+ fetch_if_missing = original_fetch_if_missing;
+}
+
+void fetch_object(const char *remote_name, const unsigned char *sha1)
+{
+ struct ref *ref = alloc_ref(sha1_to_hex(sha1));
+ hashcpy(ref->old_oid.hash, sha1);
+ fetch_refs(remote_name, ref);
+}
+
+void fetch_objects(const char *remote_name, const struct oid_array *to_fetch)
+{
+ struct ref *ref = NULL;
+ int i;
+
+ for (i = 0; i < to_fetch->nr; i++) {
+ struct ref *new_ref = alloc_ref(oid_to_hex(&to_fetch->oid[i]));
+ oidcpy(&new_ref->old_oid, &to_fetch->oid[i]);
+ new_ref->next = ref;
+ ref = new_ref;
+ }
+ fetch_refs(remote_name, ref);
+}
diff --git a/fetch-object.h b/fetch-object.h
new file mode 100644
index 0000000..4b269d0
--- /dev/null
+++ b/fetch-object.h
@@ -0,0 +1,11 @@
+#ifndef FETCH_OBJECT_H
+#define FETCH_OBJECT_H
+
+#include "sha1-array.h"
+
+extern void fetch_object(const char *remote_name, const unsigned char *sha1);
+
+extern void fetch_objects(const char *remote_name,
+ const struct oid_array *to_fetch);
+
+#endif
diff --git a/fetch-pack.c b/fetch-pack.c
index 9f6b07a..8253d74 100644
--- a/fetch-pack.c
+++ b/fetch-pack.c
@@ -29,6 +29,7 @@ static int deepen_not_ok;
static int fetch_fsck_objects = -1;
static int transfer_fsck_objects = -1;
static int agent_supported;
+static int server_supports_filtering;
static struct lock_file shallow_lock;
static const char *alternate_shallow_file;
@@ -379,6 +380,8 @@ static int find_common(struct fetch_pack_args *args,
if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
if (agent_supported) strbuf_addf(&c, " agent=%s",
git_user_agent_sanitized());
+ if (args->filter_options.choice)
+ strbuf_addstr(&c, " filter");
packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
strbuf_release(&c);
} else
@@ -407,6 +410,9 @@ static int find_common(struct fetch_pack_args *args,
packet_buf_write(&req_buf, "deepen-not %s", s->string);
}
}
+ if (server_supports_filtering && args->filter_options.choice)
+ packet_buf_write(&req_buf, "filter %s",
+ args->filter_options.filter_spec);
packet_buf_flush(&req_buf);
state_len = req_buf.len;
@@ -450,6 +456,8 @@ static int find_common(struct fetch_pack_args *args,
flushes = 0;
retval = -1;
+ if (args->no_dependents)
+ goto done;
while ((oid = get_rev())) {
packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
print_verbose(args, "have %s", oid_to_hex(oid));
@@ -709,6 +717,7 @@ static int everything_local(struct fetch_pack_args *args,
{
struct ref *ref;
int retval;
+ int old_save_commit_buffer = save_commit_buffer;
timestamp_t cutoff = 0;
save_commit_buffer = 0;
@@ -735,29 +744,31 @@ static int everything_local(struct fetch_pack_args *args,
}
}
- if (!args->deepen) {
- for_each_ref(mark_complete_oid, NULL);
- for_each_cached_alternate(mark_alternate_complete);
- commit_list_sort_by_date(&complete);
- if (cutoff)
- mark_recent_complete_commits(args, cutoff);
- }
+ if (!args->no_dependents) {
+ if (!args->deepen) {
+ for_each_ref(mark_complete_oid, NULL);
+ for_each_cached_alternate(mark_alternate_complete);
+ commit_list_sort_by_date(&complete);
+ if (cutoff)
+ mark_recent_complete_commits(args, cutoff);
+ }
- /*
- * Mark all complete remote refs as common refs.
- * Don't mark them common yet; the server has to be told so first.
- */
- for (ref = *refs; ref; ref = ref->next) {
- struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
- NULL, 0);
+ /*
+ * Mark all complete remote refs as common refs.
+ * Don't mark them common yet; the server has to be told so first.
+ */
+ for (ref = *refs; ref; ref = ref->next) {
+ struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
+ NULL, 0);
- if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
- continue;
+ if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
+ continue;
- if (!(o->flags & SEEN)) {
- rev_list_push((struct commit *)o, COMMON_REF | SEEN);
+ if (!(o->flags & SEEN)) {
+ rev_list_push((struct commit *)o, COMMON_REF | SEEN);
- mark_common((struct commit *)o, 1, 1);
+ mark_common((struct commit *)o, 1, 1);
+ }
}
}
@@ -777,6 +788,9 @@ static int everything_local(struct fetch_pack_args *args,
print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
ref->name);
}
+
+ save_commit_buffer = old_save_commit_buffer;
+
return retval;
}
@@ -833,7 +847,7 @@ static int get_pack(struct fetch_pack_args *args,
argv_array_push(&cmd.args, alternate_shallow_file);
}
- if (do_keep) {
+ if (do_keep || args->from_promisor) {
if (pack_lockfile)
cmd.out = -1;
cmd_name = "index-pack";
@@ -843,7 +857,7 @@ static int get_pack(struct fetch_pack_args *args,
argv_array_push(&cmd.args, "-v");
if (args->use_thin_pack)
argv_array_push(&cmd.args, "--fix-thin");
- if (args->lock_pack || unpack_limit) {
+ if (do_keep && (args->lock_pack || unpack_limit)) {
char hostname[HOST_NAME_MAX + 1];
if (xgethostname(hostname, sizeof(hostname)))
xsnprintf(hostname, sizeof(hostname), "localhost");
@@ -853,6 +867,8 @@ static int get_pack(struct fetch_pack_args *args,
}
if (args->check_self_contained_and_connected)
argv_array_push(&cmd.args, "--check-self-contained-and-connected");
+ if (args->from_promisor)
+ argv_array_push(&cmd.args, "--promisor");
}
else {
cmd_name = "unpack-objects";
@@ -964,6 +980,13 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
else
prefer_ofs_delta = 0;
+ if (server_supports("filter")) {
+ server_supports_filtering = 1;
+ print_verbose(args, _("Server supports filter"));
+ } else if (args->filter_options.choice) {
+ warning("filtering not recognized by server, ignoring");
+ }
+
if ((agent_feature = server_feature_value("agent", &agent_len))) {
agent_supported = 1;
if (agent_len)
diff --git a/fetch-pack.h b/fetch-pack.h
index b6aeb43..3e224a1 100644
--- a/fetch-pack.h
+++ b/fetch-pack.h
@@ -3,6 +3,7 @@
#include "string-list.h"
#include "run-command.h"
+#include "list-objects-filter-options.h"
struct oid_array;
@@ -12,6 +13,7 @@ struct fetch_pack_args {
int depth;
const char *deepen_since;
const struct string_list *deepen_not;
+ struct list_objects_filter_options filter_options;
unsigned deepen_relative:1;
unsigned quiet:1;
unsigned keep_pack:1;
@@ -29,6 +31,14 @@ struct fetch_pack_args {
unsigned cloning:1;
unsigned update_shallow:1;
unsigned deepen:1;
+ unsigned from_promisor:1;
+
+ /*
+ * If 1, fetch_pack() will also not modify any object flags.
+ * This allows fetch_pack() to safely be called by any function,
+ * regardless of which object flags it uses (if any).
+ */
+ unsigned no_dependents:1;
};
/*
diff --git a/git-send-email.perl b/git-send-email.perl
index 340b5c8..bbf4dea 100755
--- a/git-send-email.perl
+++ b/git-send-email.perl
@@ -26,7 +26,7 @@ use Text::ParseWords;
use Term::ANSIColor;
use File::Temp qw/ tempdir tempfile /;
use File::Spec::Functions qw(catdir catfile);
-use Error qw(:try);
+use Git::Error qw(:try);
use Cwd qw(abs_path cwd);
use Git;
use Git::I18N;
diff --git a/git-submodule.sh b/git-submodule.sh
index 156255a..2491496 100755
--- a/git-submodule.sh
+++ b/git-submodule.sh
@@ -428,60 +428,7 @@ cmd_deinit()
shift
done
- if test -n "$deinit_all" && test "$#" -ne 0
- then
- echo >&2 "$(eval_gettext "pathspec and --all are incompatible")"
- usage
- fi
- if test $# = 0 && test -z "$deinit_all"
- then
- die "$(eval_gettext "Use '--all' if you really want to deinitialize all submodules")"
- fi
-
- {
- git submodule--helper list --prefix "$wt_prefix" "$@" ||
- echo "#unmatched" $?
- } |
- while read -r mode sha1 stage sm_path
- do
- die_if_unmatched "$mode" "$sha1"
- name=$(git submodule--helper name "$sm_path") || exit
-
- displaypath=$(git submodule--helper relative-path "$sm_path" "$wt_prefix")
-
- # Remove the submodule work tree (unless the user already did it)
- if test -d "$sm_path"
- then
- # Protect submodules containing a .git directory
- if test -d "$sm_path/.git"
- then
- die "$(eval_gettext "\
-Submodule work tree '\$displaypath' contains a .git directory
-(use 'rm -rf' if you really want to remove it including all of its history)")"
- fi
-
- if test -z "$force"
- then
- git rm -qn "$sm_path" ||
- die "$(eval_gettext "Submodule work tree '\$displaypath' contains local modifications; use '-f' to discard them")"
- fi
- rm -rf "$sm_path" &&
- say "$(eval_gettext "Cleared directory '\$displaypath'")" ||
- say "$(eval_gettext "Could not remove submodule work tree '\$displaypath'")"
- fi
-
- mkdir "$sm_path" || say "$(eval_gettext "Could not create empty submodule directory '\$displaypath'")"
-
- # Remove the .git/config entries (unless the user already did it)
- if test -n "$(git config --get-regexp submodule."$name\.")"
- then
- # Remove the whole section so we have a clean state when
- # the user later decides to init this submodule again
- url=$(git config submodule."$name".url)
- git config --remove-section submodule."$name" 2>/dev/null &&
- say "$(eval_gettext "Submodule '\$name' (\$url) unregistered for path '\$displaypath'")"
- fi
- done
+ git ${wt_prefix:+-C "$wt_prefix"} submodule--helper deinit ${GIT_QUIET:+--quiet} ${prefix:+--prefix "$prefix"} ${force:+--force} ${deinit_all:+--all} "$@"
}
is_tip_reachable () (
@@ -1036,63 +983,8 @@ cmd_sync()
;;
esac
done
- cd_to_toplevel
- {
- git submodule--helper list --prefix "$wt_prefix" "$@" ||
- echo "#unmatched" $?
- } |
- while read -r mode sha1 stage sm_path
- do
- die_if_unmatched "$mode" "$sha1"
-
- # skip inactive submodules
- if ! git submodule--helper is-active "$sm_path"
- then
- continue
- fi
-
- name=$(git submodule--helper name "$sm_path")
- url=$(git config -f .gitmodules --get submodule."$name".url)
-
- # Possibly a url relative to parent
- case "$url" in
- ./*|../*)
- # rewrite foo/bar as ../.. to find path from
- # submodule work tree to superproject work tree
- up_path="$(printf '%s\n' "$sm_path" | sed "s/[^/][^/]*/../g")" &&
- # guarantee a trailing /
- up_path=${up_path%/}/ &&
- # path from submodule work tree to submodule origin repo
- sub_origin_url=$(git submodule--helper resolve-relative-url "$url" "$up_path") &&
- # path from superproject work tree to submodule origin repo
- super_config_url=$(git submodule--helper resolve-relative-url "$url") || exit
- ;;
- *)
- sub_origin_url="$url"
- super_config_url="$url"
- ;;
- esac
-
- displaypath=$(git submodule--helper relative-path "$prefix$sm_path" "$wt_prefix")
- say "$(eval_gettext "Synchronizing submodule url for '\$displaypath'")"
- git config submodule."$name".url "$super_config_url"
-
- if test -e "$sm_path"/.git
- then
- (
- sanitize_submodule_env
- cd "$sm_path"
- remote=$(get_default_remote)
- git config remote."$remote".url "$sub_origin_url"
- if test -n "$recursive"
- then
- prefix="$prefix$sm_path/"
- eval cmd_sync
- fi
- )
- fi
- done
+ git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper sync ${GIT_QUIET:+--quiet} ${recursive:+--recursive} "$@"
}
cmd_absorbgitdirs()
diff --git a/git-svn.perl b/git-svn.perl
index 76a75d0..a6b6c3e 100755
--- a/git-svn.perl
+++ b/git-svn.perl
@@ -1200,6 +1200,11 @@ sub cmd_branch {
$ctx->copy($src, $rev, $dst)
unless $_dry_run;
+ # Release resources held by ctx before creating another SVN::Ra
+ # so destruction is orderly. This seems necessary with SVN 1.9.5
+ # to avoid segfaults.
+ $ctx = undef;
+
$gs->fetch_all;
}
diff --git a/git.c b/git.c
index c870b97..c7b5ada 100644
--- a/git.c
+++ b/git.c
@@ -388,6 +388,7 @@ static struct cmd_struct commands[] = {
{ "clone", cmd_clone },
{ "column", cmd_column, RUN_SETUP_GENTLY },
{ "commit", cmd_commit, RUN_SETUP | NEED_WORK_TREE },
+ { "commit-graph", cmd_commit_graph, RUN_SETUP },
{ "commit-tree", cmd_commit_tree, RUN_SETUP },
{ "config", cmd_config, RUN_SETUP_GENTLY },
{ "count-objects", cmd_count_objects, RUN_SETUP },
diff --git a/hash.h b/hash.h
index 7d7a864..7c8238b 100644
--- a/hash.h
+++ b/hash.h
@@ -15,6 +15,31 @@
#include "block-sha1/sha1.h"
#endif
+#ifndef platform_SHA_CTX
+/*
+ * platform's underlying implementation of SHA-1; could be OpenSSL,
+ * blk_SHA, Apple CommonCrypto, etc... Note that the relevant
+ * SHA-1 header may have already defined platform_SHA_CTX for our
+ * own implementations like block-sha1 and ppc-sha1, so we list
+ * the default for OpenSSL compatible SHA-1 implementations here.
+ */
+#define platform_SHA_CTX SHA_CTX
+#define platform_SHA1_Init SHA1_Init
+#define platform_SHA1_Update SHA1_Update
+#define platform_SHA1_Final SHA1_Final
+#endif
+
+#define git_SHA_CTX platform_SHA_CTX
+#define git_SHA1_Init platform_SHA1_Init
+#define git_SHA1_Update platform_SHA1_Update
+#define git_SHA1_Final platform_SHA1_Final
+
+#ifdef SHA1_MAX_BLOCK_SIZE
+#include "compat/sha1-chunked.h"
+#undef git_SHA1_Update
+#define git_SHA1_Update git_SHA1_Update_Chunked
+#endif
+
/*
* Note that these constants are suitable for indexing the hash_algos array and
* comparing against each other, but are otherwise arbitrary, so they should not
@@ -30,9 +55,15 @@
/* Number of algorithms supported (including unknown). */
#define GIT_HASH_NALGOS (GIT_HASH_SHA1 + 1)
-typedef void (*git_hash_init_fn)(void *ctx);
-typedef void (*git_hash_update_fn)(void *ctx, const void *in, size_t len);
-typedef void (*git_hash_final_fn)(unsigned char *hash, void *ctx);
+/* A suitably aligned type for stack allocations of hash contexts. */
+union git_hash_ctx {
+ git_SHA_CTX sha1;
+};
+typedef union git_hash_ctx git_hash_ctx;
+
+typedef void (*git_hash_init_fn)(git_hash_ctx *ctx);
+typedef void (*git_hash_update_fn)(git_hash_ctx *ctx, const void *in, size_t len);
+typedef void (*git_hash_final_fn)(unsigned char *hash, git_hash_ctx *ctx);
struct git_hash_algo {
/*
@@ -44,9 +75,6 @@ struct git_hash_algo {
/* A four-byte version identifier, used in pack indices. */
uint32_t format_id;
- /* The size of a hash context (e.g. git_SHA_CTX). */
- size_t ctxsz;
-
/* The length of the hash in binary. */
size_t rawsz;
diff --git a/hashmap.h b/hashmap.h
index 7ce79f3..d375d9c 100644
--- a/hashmap.h
+++ b/hashmap.h
@@ -400,7 +400,6 @@ static inline void hashmap_disable_item_counting(struct hashmap *map)
*/
static inline void hashmap_enable_item_counting(struct hashmap *map)
{
- void *item;
unsigned int n = 0;
struct hashmap_iter iter;
@@ -408,7 +407,7 @@ static inline void hashmap_enable_item_counting(struct hashmap *map)
return;
hashmap_iter_init(map, &iter);
- while ((item = hashmap_iter_next(&iter)))
+ while (hashmap_iter_next(&iter))
n++;
map->do_count_items = 1;
diff --git a/http-push.c b/http-push.c
index 8814aa1..7dcd9da 100644
--- a/http-push.c
+++ b/http-push.c
@@ -915,6 +915,10 @@ static struct remote_lock *lock_remote(const char *path, long timeout)
lock->timeout = -1;
}
XML_ParserFree(parser);
+ } else {
+ fprintf(stderr,
+ "error: curl result=%d, HTTP code=%ld\n",
+ results.curl_result, results.http_code);
}
} else {
fprintf(stderr, "Unable to start LOCK request\n");
diff --git a/http-walker.c b/http-walker.c
index 1ae8363..07c2b1a 100644
--- a/http-walker.c
+++ b/http-walker.c
@@ -544,8 +544,10 @@ static int fetch_object(struct walker *walker, unsigned char *sha1)
} else if (hashcmp(obj_req->sha1, req->real_sha1)) {
ret = error("File %s has bad hash", hex);
} else if (req->rename < 0) {
- ret = error("unable to write sha1 filename %s",
- sha1_file_name(req->sha1));
+ struct strbuf buf = STRBUF_INIT;
+ sha1_file_name(&buf, req->sha1);
+ ret = error("unable to write sha1 filename %s", buf.buf);
+ strbuf_release(&buf);
}
release_http_object_request(req);
diff --git a/http.c b/http.c
index 41cfa41..8c11156 100644
--- a/http.c
+++ b/http.c
@@ -13,8 +13,11 @@
#include "transport.h"
#include "packfile.h"
#include "protocol.h"
+#include "string-list.h"
static struct trace_key trace_curl = TRACE_KEY_INIT(CURL);
+static int trace_curl_data = 1;
+static struct string_list cookies_to_redact = STRING_LIST_INIT_DUP;
#if LIBCURL_VERSION_NUM >= 0x070a08
long int git_curl_ipresolve = CURL_IPRESOLVE_WHATEVER;
#else
@@ -575,6 +578,54 @@ static void redact_sensitive_header(struct strbuf *header)
/* Everything else is opaque and possibly sensitive */
strbuf_setlen(header, sensitive_header - header->buf);
strbuf_addstr(header, " <redacted>");
+ } else if (cookies_to_redact.nr &&
+ skip_prefix(header->buf, "Cookie:", &sensitive_header)) {
+ struct strbuf redacted_header = STRBUF_INIT;
+ char *cookie;
+
+ while (isspace(*sensitive_header))
+ sensitive_header++;
+
+ /*
+ * The contents of header starting from sensitive_header will
+ * subsequently be overridden, so it is fine to mutate this
+ * string (hence the assignment to "char *").
+ */
+ cookie = (char *) sensitive_header;
+
+ while (cookie) {
+ char *equals;
+ char *semicolon = strstr(cookie, "; ");
+ if (semicolon)
+ *semicolon = 0;
+ equals = strchrnul(cookie, '=');
+ if (!equals) {
+ /* invalid cookie, just append and continue */
+ strbuf_addstr(&redacted_header, cookie);
+ continue;
+ }
+ *equals = 0; /* temporarily set to NUL for lookup */
+ if (string_list_lookup(&cookies_to_redact, cookie)) {
+ strbuf_addstr(&redacted_header, cookie);
+ strbuf_addstr(&redacted_header, "=<redacted>");
+ } else {
+ *equals = '=';
+ strbuf_addstr(&redacted_header, cookie);
+ }
+ if (semicolon) {
+ /*
+ * There are more cookies. (Or, for some
+ * reason, the input string ends in "; ".)
+ */
+ strbuf_addstr(&redacted_header, "; ");
+ cookie = semicolon + strlen("; ");
+ } else {
+ cookie = NULL;
+ }
+ }
+
+ strbuf_setlen(header, sensitive_header - header->buf);
+ strbuf_addbuf(header, &redacted_header);
}
}
@@ -645,24 +696,32 @@ static int curl_trace(CURL *handle, curl_infotype type, char *data, size_t size,
curl_dump_header(text, (unsigned char *)data, size, DO_FILTER);
break;
case CURLINFO_DATA_OUT:
- text = "=> Send data";
- curl_dump_data(text, (unsigned char *)data, size);
+ if (trace_curl_data) {
+ text = "=> Send data";
+ curl_dump_data(text, (unsigned char *)data, size);
+ }
break;
case CURLINFO_SSL_DATA_OUT:
- text = "=> Send SSL data";
- curl_dump_data(text, (unsigned char *)data, size);
+ if (trace_curl_data) {
+ text = "=> Send SSL data";
+ curl_dump_data(text, (unsigned char *)data, size);
+ }
break;
case CURLINFO_HEADER_IN:
text = "<= Recv header";
curl_dump_header(text, (unsigned char *)data, size, NO_FILTER);
break;
case CURLINFO_DATA_IN:
- text = "<= Recv data";
- curl_dump_data(text, (unsigned char *)data, size);
+ if (trace_curl_data) {
+ text = "<= Recv data";
+ curl_dump_data(text, (unsigned char *)data, size);
+ }
break;
case CURLINFO_SSL_DATA_IN:
- text = "<= Recv SSL data";
- curl_dump_data(text, (unsigned char *)data, size);
+ if (trace_curl_data) {
+ text = "<= Recv SSL data";
+ curl_dump_data(text, (unsigned char *)data, size);
+ }
break;
default: /* we ignore unknown types by default */
@@ -807,6 +866,13 @@ static CURL *get_curl_handle(void)
if (getenv("GIT_CURL_VERBOSE"))
curl_easy_setopt(result, CURLOPT_VERBOSE, 1L);
setup_curl_trace(result);
+ if (getenv("GIT_TRACE_CURL_NO_DATA"))
+ trace_curl_data = 0;
+ if (getenv("GIT_REDACT_COOKIES")) {
+ string_list_split(&cookies_to_redact,
+ getenv("GIT_REDACT_COOKIES"), ',', -1);
+ string_list_sort(&cookies_to_redact);
+ }
curl_easy_setopt(result, CURLOPT_USERAGENT,
user_agent ? user_agent : git_user_agent());
@@ -2168,7 +2234,7 @@ struct http_object_request *new_http_object_request(const char *base_url,
unsigned char *sha1)
{
char *hex = sha1_to_hex(sha1);
- const char *filename;
+ struct strbuf filename = STRBUF_INIT;
char prevfile[PATH_MAX];
int prevlocal;
char prev_buf[PREV_BUF_SIZE];
@@ -2180,14 +2246,15 @@ struct http_object_request *new_http_object_request(const char *base_url,
hashcpy(freq->sha1, sha1);
freq->localfile = -1;
- filename = sha1_file_name(sha1);
+ sha1_file_name(&filename, sha1);
snprintf(freq->tmpfile, sizeof(freq->tmpfile),
- "%s.temp", filename);
+ "%s.temp", filename.buf);
- snprintf(prevfile, sizeof(prevfile), "%s.prev", filename);
+ snprintf(prevfile, sizeof(prevfile), "%s.prev", filename.buf);
unlink_or_warn(prevfile);
rename(freq->tmpfile, prevfile);
unlink_or_warn(freq->tmpfile);
+ strbuf_release(&filename);
if (freq->localfile != -1)
error("fd leakage in start: %d", freq->localfile);
@@ -2302,6 +2369,7 @@ void process_http_object_request(struct http_object_request *freq)
int finish_http_object_request(struct http_object_request *freq)
{
struct stat st;
+ struct strbuf filename = STRBUF_INIT;
close(freq->localfile);
freq->localfile = -1;
@@ -2327,8 +2395,10 @@ int finish_http_object_request(struct http_object_request *freq)
unlink_or_warn(freq->tmpfile);
return -1;
}
- freq->rename =
- finalize_object_file(freq->tmpfile, sha1_file_name(freq->sha1));
+
+ sha1_file_name(&filename, freq->sha1);
+ freq->rename = finalize_object_file(freq->tmpfile, filename.buf);
+ strbuf_release(&filename);
return freq->rename;
}
diff --git a/list-objects-filter-options.c b/list-objects-filter-options.c
index 4c5b34e..6a3cc98 100644
--- a/list-objects-filter-options.c
+++ b/list-objects-filter-options.c
@@ -21,29 +21,36 @@
* subordinate commands when necessary. We also "intern" the arg for
* the convenience of the current command.
*/
-int parse_list_objects_filter(struct list_objects_filter_options *filter_options,
- const char *arg)
+static int gently_parse_list_objects_filter(
+ struct list_objects_filter_options *filter_options,
+ const char *arg,
+ struct strbuf *errbuf)
{
const char *v0;
- if (filter_options->choice)
- die(_("multiple object filter types cannot be combined"));
+ if (filter_options->choice) {
+ if (errbuf) {
+ strbuf_init(errbuf, 0);
+ strbuf_addstr(
+ errbuf,
+ _("multiple filter-specs cannot be combined"));
+ }
+ return 1;
+ }
filter_options->filter_spec = strdup(arg);
if (!strcmp(arg, "blob:none")) {
filter_options->choice = LOFC_BLOB_NONE;
return 0;
- }
- if (skip_prefix(arg, "blob:limit=", &v0)) {
- if (!git_parse_ulong(v0, &filter_options->blob_limit_value))
- die(_("invalid filter-spec expression '%s'"), arg);
- filter_options->choice = LOFC_BLOB_LIMIT;
- return 0;
- }
+ } else if (skip_prefix(arg, "blob:limit=", &v0)) {
+ if (git_parse_ulong(v0, &filter_options->blob_limit_value)) {
+ filter_options->choice = LOFC_BLOB_LIMIT;
+ return 0;
+ }
- if (skip_prefix(arg, "sparse:oid=", &v0)) {
+ } else if (skip_prefix(arg, "sparse:oid=", &v0)) {
struct object_context oc;
struct object_id sparse_oid;
@@ -57,15 +64,27 @@ int parse_list_objects_filter(struct list_objects_filter_options *filter_options
filter_options->sparse_oid_value = oiddup(&sparse_oid);
filter_options->choice = LOFC_SPARSE_OID;
return 0;
- }
- if (skip_prefix(arg, "sparse:path=", &v0)) {
+ } else if (skip_prefix(arg, "sparse:path=", &v0)) {
filter_options->choice = LOFC_SPARSE_PATH;
filter_options->sparse_path_value = strdup(v0);
return 0;
}
- die(_("invalid filter-spec expression '%s'"), arg);
+ if (errbuf) {
+ strbuf_init(errbuf, 0);
+ strbuf_addf(errbuf, "invalid filter-spec '%s'", arg);
+ }
+ memset(filter_options, 0, sizeof(*filter_options));
+ return 1;
+}
+
+int parse_list_objects_filter(struct list_objects_filter_options *filter_options,
+ const char *arg)
+{
+ struct strbuf buf = STRBUF_INIT;
+ if (gently_parse_list_objects_filter(filter_options, arg, &buf))
+ die("%s", buf.buf);
return 0;
}
@@ -75,7 +94,7 @@ int opt_parse_list_objects_filter(const struct option *opt,
struct list_objects_filter_options *filter_options = opt->value;
if (unset || !arg) {
- list_objects_filter_release(filter_options);
+ list_objects_filter_set_no_filter(filter_options);
return 0;
}
@@ -90,3 +109,44 @@ void list_objects_filter_release(
free(filter_options->sparse_path_value);
memset(filter_options, 0, sizeof(*filter_options));
}
+
+void partial_clone_register(
+ const char *remote,
+ const struct list_objects_filter_options *filter_options)
+{
+ /*
+ * Record the name of the partial clone remote in the
+ * config and in the global variable -- the latter is
+ * used throughout to indicate that partial clone is
+ * enabled and to expect missing objects.
+ */
+ if (repository_format_partial_clone &&
+ *repository_format_partial_clone &&
+ strcmp(remote, repository_format_partial_clone))
+ die(_("cannot change partial clone promisor remote"));
+
+ git_config_set("core.repositoryformatversion", "1");
+ git_config_set("extensions.partialclone", remote);
+
+ repository_format_partial_clone = xstrdup(remote);
+
+ /*
+ * Record the initial filter-spec in the config as
+ * the default for subsequent fetches from this remote.
+ */
+ core_partial_clone_filter_default =
+ xstrdup(filter_options->filter_spec);
+ git_config_set("core.partialclonefilter",
+ core_partial_clone_filter_default);
+}
+
+void partial_clone_get_default_filter_spec(
+ struct list_objects_filter_options *filter_options)
+{
+ /*
+ * Parse default value, but silently ignore it if it is invalid.
+ */
+ gently_parse_list_objects_filter(filter_options,
+ core_partial_clone_filter_default,
+ NULL);
+}
diff --git a/list-objects-filter-options.h b/list-objects-filter-options.h
index eea44a1..0000a61 100644
--- a/list-objects-filter-options.h
+++ b/list-objects-filter-options.h
@@ -31,6 +31,11 @@ struct list_objects_filter_options {
enum list_objects_filter_choice choice;
/*
+ * Choice is LOFC_DISABLED because "--no-filter" was requested.
+ */
+ unsigned int no_filter : 1;
+
+ /*
* Parsed values (fields) from within the filter-spec. These are
* choice-specific; not all values will be defined for any given
* choice.
@@ -58,4 +63,17 @@ int opt_parse_list_objects_filter(const struct option *opt,
void list_objects_filter_release(
struct list_objects_filter_options *filter_options);
+static inline void list_objects_filter_set_no_filter(
+ struct list_objects_filter_options *filter_options)
+{
+ list_objects_filter_release(filter_options);
+ filter_options->no_filter = 1;
+}
+
+void partial_clone_register(
+ const char *remote,
+ const struct list_objects_filter_options *filter_options);
+void partial_clone_get_default_filter_spec(
+ struct list_objects_filter_options *filter_options);
+
#endif /* LIST_OBJECTS_FILTER_OPTIONS_H */
diff --git a/list-objects.c b/list-objects.c
index 0966cdc..168bef6 100644
--- a/list-objects.c
+++ b/list-objects.c
@@ -9,6 +9,7 @@
#include "list-objects.h"
#include "list-objects-filter.h"
#include "list-objects-filter-options.h"
+#include "packfile.h"
static void process_blob(struct rev_info *revs,
struct blob *blob,
@@ -30,6 +31,20 @@ static void process_blob(struct rev_info *revs,
if (obj->flags & (UNINTERESTING | SEEN))
return;
+ /*
+ * Pre-filter known-missing objects when explicitly requested.
+ * Otherwise, a missing object error message may be reported
+ * later (depending on other filtering criteria).
+ *
+ * Note that this "--exclude-promisor-objects" pre-filtering
+ * may cause the actual filter to report an incomplete list
+ * of missing objects.
+ */
+ if (revs->exclude_promisor_objects &&
+ !has_object_file(&obj->oid) &&
+ is_promisor_object(&obj->oid))
+ return;
+
pathlen = path->len;
strbuf_addstr(path, name);
if (filter_fn)
@@ -91,6 +106,8 @@ static void process_tree(struct rev_info *revs,
all_entries_interesting: entry_not_interesting;
int baselen = base->len;
enum list_objects_filter_result r = LOFR_MARK_SEEN | LOFR_DO_SHOW;
+ int gently = revs->ignore_missing_links ||
+ revs->exclude_promisor_objects;
if (!revs->tree_objects)
return;
@@ -98,9 +115,19 @@ static void process_tree(struct rev_info *revs,
die("bad tree object");
if (obj->flags & (UNINTERESTING | SEEN))
return;
- if (parse_tree_gently(tree, revs->ignore_missing_links) < 0) {
+ if (parse_tree_gently(tree, gently) < 0) {
if (revs->ignore_missing_links)
return;
+
+ /*
+ * Pre-filter known-missing tree objects when explicitly
+ * requested. This may cause the actual filter to report
+ * an incomplete list of missing objects.
+ */
+ if (revs->exclude_promisor_objects &&
+ is_promisor_object(&obj->oid))
+ return;
+
die("bad tree object %s", oid_to_hex(&obj->oid));
}
diff --git a/log-tree.c b/log-tree.c
index 2eeddbe..bdf23c5 100644
--- a/log-tree.c
+++ b/log-tree.c
@@ -499,7 +499,7 @@ static void show_one_mergetag(struct commit *commit,
int status, nth;
size_t payload_size, gpg_message_offset;
- hash_sha1_file(extra->value, extra->len, type_name(OBJ_TAG), oid.hash);
+ hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &oid);
tag = lookup_tag(&oid);
if (!tag)
return; /* error message already given */
@@ -659,9 +659,6 @@ void show_log(struct rev_info *opt)
show_mergetag(opt, commit);
}
- if (!get_cached_commit_buffer(commit, NULL))
- return;
-
if (opt->show_notes) {
int raw;
struct strbuf notebuf = STRBUF_INIT;
diff --git a/mailinfo.c b/mailinfo.c
index a89db22..d04142c 100644
--- a/mailinfo.c
+++ b/mailinfo.c
@@ -1167,11 +1167,13 @@ void clear_mailinfo(struct mailinfo *mi)
strbuf_release(&mi->inbody_header_accum);
free(mi->message_id);
- for (i = 0; mi->p_hdr_data[i]; i++)
- strbuf_release(mi->p_hdr_data[i]);
+ if (mi->p_hdr_data)
+ for (i = 0; mi->p_hdr_data[i]; i++)
+ strbuf_release(mi->p_hdr_data[i]);
free(mi->p_hdr_data);
- for (i = 0; mi->s_hdr_data[i]; i++)
- strbuf_release(mi->s_hdr_data[i]);
+ if (mi->s_hdr_data)
+ for (i = 0; mi->s_hdr_data[i]; i++)
+ strbuf_release(mi->s_hdr_data[i]);
free(mi->s_hdr_data);
while (mi->content < mi->content_top) {
diff --git a/match-trees.c b/match-trees.c
index 396b733..0ca99d5 100644
--- a/match-trees.c
+++ b/match-trees.c
@@ -158,22 +158,20 @@ static void match_trees(const struct object_id *hash1,
}
/*
- * A tree "hash1" has a subdirectory at "prefix". Come up with a
- * tree object by replacing it with another tree "hash2".
+ * A tree "oid1" has a subdirectory at "prefix". Come up with a tree object by
+ * replacing it with another tree "oid2".
*/
-static int splice_tree(const unsigned char *hash1,
- const char *prefix,
- const unsigned char *hash2,
- unsigned char *result)
+static int splice_tree(const struct object_id *oid1, const char *prefix,
+ const struct object_id *oid2, struct object_id *result)
{
char *subpath;
int toplen;
char *buf;
unsigned long sz;
struct tree_desc desc;
- unsigned char *rewrite_here;
- const unsigned char *rewrite_with;
- unsigned char subtree[20];
+ struct object_id *rewrite_here;
+ const struct object_id *rewrite_with;
+ struct object_id subtree;
enum object_type type;
int status;
@@ -182,9 +180,9 @@ static int splice_tree(const unsigned char *hash1,
if (*subpath)
subpath++;
- buf = read_sha1_file(hash1, &type, &sz);
+ buf = read_sha1_file(oid1->hash, &type, &sz);
if (!buf)
- die("cannot read tree %s", sha1_to_hex(hash1));
+ die("cannot read tree %s", oid_to_hex(oid1));
init_tree_desc(&desc, buf, sz);
rewrite_here = NULL;
@@ -197,26 +195,26 @@ static int splice_tree(const unsigned char *hash1,
if (strlen(name) == toplen &&
!memcmp(name, prefix, toplen)) {
if (!S_ISDIR(mode))
- die("entry %s in tree %s is not a tree",
- name, sha1_to_hex(hash1));
- rewrite_here = (unsigned char *) oid->hash;
+ die("entry %s in tree %s is not a tree", name,
+ oid_to_hex(oid1));
+ rewrite_here = (struct object_id *)oid;
break;
}
update_tree_entry(&desc);
}
if (!rewrite_here)
- die("entry %.*s not found in tree %s",
- toplen, prefix, sha1_to_hex(hash1));
+ die("entry %.*s not found in tree %s", toplen, prefix,
+ oid_to_hex(oid1));
if (*subpath) {
- status = splice_tree(rewrite_here, subpath, hash2, subtree);
+ status = splice_tree(rewrite_here, subpath, oid2, &subtree);
if (status)
return status;
- rewrite_with = subtree;
+ rewrite_with = &subtree;
+ } else {
+ rewrite_with = oid2;
}
- else
- rewrite_with = hash2;
- hashcpy(rewrite_here, rewrite_with);
- status = write_sha1_file(buf, sz, tree_type, result);
+ oidcpy(rewrite_here, rewrite_with);
+ status = write_object_file(buf, sz, tree_type, result);
free(buf);
return status;
}
@@ -280,7 +278,7 @@ void shift_tree(const struct object_id *hash1,
if (!*add_prefix)
return;
- splice_tree(hash1->hash, add_prefix, hash2->hash, shifted->hash);
+ splice_tree(hash1, add_prefix, hash2, shifted);
}
/*
@@ -334,7 +332,7 @@ void shift_tree_by(const struct object_id *hash1,
* shift tree2 down by adding shift_prefix above it
* to match tree1.
*/
- splice_tree(hash1->hash, shift_prefix, hash2->hash, shifted->hash);
+ splice_tree(hash1, shift_prefix, hash2, shifted);
else
/*
* shift tree2 up by removing shift_prefix from it
diff --git a/merge-recursive.c b/merge-recursive.c
index cc5fa0a..6ff971f 100644
--- a/merge-recursive.c
+++ b/merge-recursive.c
@@ -513,6 +513,25 @@ static void record_df_conflict_files(struct merge_options *o,
struct rename {
struct diff_filepair *pair;
+ /*
+ * Purpose of src_entry and dst_entry:
+ *
+ * If 'before' is renamed to 'after' then src_entry will contain
+ * the versions of 'before' from the merge_base, HEAD, and MERGE in
+ * stages 1, 2, and 3; dst_entry will contain the respective
+ * versions of 'after' in corresponding locations. Thus, we have a
+ * total of six modes and oids, though some will be null. (Stage 0
+ * is ignored; we're interested in handling conflicts.)
+ *
+ * Since we don't turn on break-rewrites by default, neither
+ * src_entry nor dst_entry can have all three of their stages have
+ * non-null oids, meaning at most four of the six will be non-null.
+ * Also, since this is a rename, both src_entry and dst_entry will
+ * have at least one non-null oid, meaning at least two will be
+ * non-null. Of the six oids, a typical rename will have three be
+ * non-null. Only two implies a rename/delete, and four implies a
+ * rename/add.
+ */
struct stage_data *src_entry;
struct stage_data *dst_entry;
unsigned processed:1;
@@ -1009,8 +1028,9 @@ static int merge_file_1(struct merge_options *o,
if ((merge_status < 0) || !result_buf.ptr)
ret = err(o, _("Failed to execute internal merge"));
- if (!ret && write_sha1_file(result_buf.ptr, result_buf.size,
- blob_type, result->oid.hash))
+ if (!ret &&
+ write_object_file(result_buf.ptr, result_buf.size,
+ blob_type, &result->oid))
ret = err(o, _("Unable to add %s to database"),
a->path);
@@ -1998,10 +2018,10 @@ int merge_trees(struct merge_options *o,
get_files_dirs(o, merge);
entries = get_unmerged();
- record_df_conflict_files(o, entries);
re_head = get_renames(o, head, common, head, merge, entries);
re_merge = get_renames(o, merge, common, head, merge, entries);
clean = process_renames(o, re_head, re_merge);
+ record_df_conflict_files(o, entries);
if (clean < 0)
goto cleanup;
for (i = entries->nr-1; 0 <= i; i--) {
diff --git a/mru.c b/mru.c
deleted file mode 100644
index 9dedae0..0000000
--- a/mru.c
+++ /dev/null
@@ -1,50 +0,0 @@
-#include "cache.h"
-#include "mru.h"
-
-void mru_append(struct mru *mru, void *item)
-{
- struct mru_entry *cur = xmalloc(sizeof(*cur));
- cur->item = item;
- cur->prev = mru->tail;
- cur->next = NULL;
-
- if (mru->tail)
- mru->tail->next = cur;
- else
- mru->head = cur;
- mru->tail = cur;
-}
-
-void mru_mark(struct mru *mru, struct mru_entry *entry)
-{
- /* If we're already at the front of the list, nothing to do */
- if (mru->head == entry)
- return;
-
- /* Otherwise, remove us from our current slot... */
- if (entry->prev)
- entry->prev->next = entry->next;
- if (entry->next)
- entry->next->prev = entry->prev;
- else
- mru->tail = entry->prev;
-
- /* And insert us at the beginning. */
- entry->prev = NULL;
- entry->next = mru->head;
- if (mru->head)
- mru->head->prev = entry;
- mru->head = entry;
-}
-
-void mru_clear(struct mru *mru)
-{
- struct mru_entry *p = mru->head;
-
- while (p) {
- struct mru_entry *to_free = p;
- p = p->next;
- free(to_free);
- }
- mru->head = mru->tail = NULL;
-}
diff --git a/mru.h b/mru.h
deleted file mode 100644
index 42e4aea..0000000
--- a/mru.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef MRU_H
-#define MRU_H
-
-/**
- * A simple most-recently-used cache, backed by a doubly-linked list.
- *
- * Usage is roughly:
- *
- * // Create a list. Zero-initialization is required.
- * static struct mru cache;
- * mru_append(&cache, item);
- * ...
- *
- * // Iterate in MRU order.
- * struct mru_entry *p;
- * for (p = cache.head; p; p = p->next) {
- * if (matches(p->item))
- * break;
- * }
- *
- * // Mark an item as used, moving it to the front of the list.
- * mru_mark(&cache, p);
- *
- * // Reset the list to empty, cleaning up all resources.
- * mru_clear(&cache);
- *
- * Note that you SHOULD NOT call mru_mark() and then continue traversing the
- * list; it reorders the marked item to the front of the list, and therefore
- * you will begin traversing the whole list again.
- */
-
-struct mru_entry {
- void *item;
- struct mru_entry *prev, *next;
-};
-
-struct mru {
- struct mru_entry *head, *tail;
-};
-
-void mru_append(struct mru *mru, void *item);
-void mru_mark(struct mru *mru, struct mru_entry *entry);
-void mru_clear(struct mru *mru);
-
-#endif /* MRU_H */
diff --git a/name-hash.c b/name-hash.c
index 45c98db..ada66f0 100644
--- a/name-hash.c
+++ b/name-hash.c
@@ -578,6 +578,8 @@ static void threaded_lazy_init_name_hash(
static void lazy_init_name_hash(struct index_state *istate)
{
+ uint64_t start = getnanotime();
+
if (istate->name_hash_initialized)
return;
hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr);
@@ -600,6 +602,7 @@ static void lazy_init_name_hash(struct index_state *istate)
}
istate->name_hash_initialized = 1;
+ trace_performance_since(start, "initialize name hash");
}
/*
diff --git a/notes-cache.c b/notes-cache.c
index 17ee860..398e61d 100644
--- a/notes-cache.c
+++ b/notes-cache.c
@@ -54,10 +54,10 @@ int notes_cache_write(struct notes_cache *c)
if (!c->tree.dirty)
return 0;
- if (write_notes_tree(&c->tree, tree_oid.hash))
+ if (write_notes_tree(&c->tree, &tree_oid))
return -1;
- if (commit_tree(c->validity, strlen(c->validity), tree_oid.hash, NULL,
- commit_oid.hash, NULL, NULL) < 0)
+ if (commit_tree(c->validity, strlen(c->validity), &tree_oid, NULL,
+ &commit_oid, NULL, NULL) < 0)
return -1;
if (update_ref("update notes cache", c->tree.update_ref, &commit_oid,
NULL, 0, UPDATE_REFS_QUIET_ON_ERR) < 0)
@@ -88,7 +88,7 @@ int notes_cache_put(struct notes_cache *c, struct object_id *key_oid,
{
struct object_id value_oid;
- if (write_sha1_file(data, size, "blob", value_oid.hash) < 0)
+ if (write_object_file(data, size, "blob", &value_oid) < 0)
return -1;
return add_note(&c->tree, key_oid, &value_oid, NULL);
}
diff --git a/notes-merge.c b/notes-merge.c
index 0f6573c..c09c5e0 100644
--- a/notes-merge.c
+++ b/notes-merge.c
@@ -642,9 +642,8 @@ int notes_merge(struct notes_merge_options *o,
struct commit_list *parents = NULL;
commit_list_insert(remote, &parents); /* LIFO order */
commit_list_insert(local, &parents);
- create_notes_commit(local_tree, parents,
- o->commit_msg.buf, o->commit_msg.len,
- result_oid->hash);
+ create_notes_commit(local_tree, parents, o->commit_msg.buf,
+ o->commit_msg.len, result_oid);
}
found_result:
@@ -718,8 +717,8 @@ int notes_merge_commit(struct notes_merge_options *o,
strbuf_setlen(&path, baselen);
}
- create_notes_commit(partial_tree, partial_commit->parents,
- msg, strlen(msg), result_oid->hash);
+ create_notes_commit(partial_tree, partial_commit->parents, msg,
+ strlen(msg), result_oid);
unuse_commit_buffer(partial_commit, buffer);
if (o->verbosity >= 4)
printf("Finalized notes merge commit: %s\n",
diff --git a/notes-utils.c b/notes-utils.c
index 5c8e70c..02407fe 100644
--- a/notes-utils.c
+++ b/notes-utils.c
@@ -6,13 +6,13 @@
void create_notes_commit(struct notes_tree *t, struct commit_list *parents,
const char *msg, size_t msg_len,
- unsigned char *result_sha1)
+ struct object_id *result_oid)
{
struct object_id tree_oid;
assert(t->initialized);
- if (write_notes_tree(t, tree_oid.hash))
+ if (write_notes_tree(t, &tree_oid))
die("Failed to write notes tree to database");
if (!parents) {
@@ -27,7 +27,8 @@ void create_notes_commit(struct notes_tree *t, struct commit_list *parents,
/* else: t->ref points to nothing, assume root/orphan commit */
}
- if (commit_tree(msg, msg_len, tree_oid.hash, parents, result_sha1, NULL, NULL))
+ if (commit_tree(msg, msg_len, &tree_oid, parents, result_oid, NULL,
+ NULL))
die("Failed to commit notes tree to database");
}
@@ -47,7 +48,7 @@ void commit_notes(struct notes_tree *t, const char *msg)
strbuf_addstr(&buf, msg);
strbuf_complete_line(&buf);
- create_notes_commit(t, NULL, buf.buf, buf.len, commit_oid.hash);
+ create_notes_commit(t, NULL, buf.buf, buf.len, &commit_oid);
strbuf_insert(&buf, 0, "notes: ", 7); /* commit message starts at index 7 */
update_ref(buf.buf, t->update_ref, &commit_oid, NULL, 0,
UPDATE_REFS_DIE_ON_ERR);
diff --git a/notes-utils.h b/notes-utils.h
index 1190578..5d79cbe 100644
--- a/notes-utils.h
+++ b/notes-utils.h
@@ -15,7 +15,8 @@
* The resulting commit SHA1 is stored in result_sha1.
*/
void create_notes_commit(struct notes_tree *t, struct commit_list *parents,
- const char *msg, size_t msg_len, unsigned char *result_sha1);
+ const char *msg, size_t msg_len,
+ struct object_id *result_oid);
void commit_notes(struct notes_tree *t, const char *msg);
diff --git a/notes.c b/notes.c
index c7f21fa..ce9a8f5 100644
--- a/notes.c
+++ b/notes.c
@@ -270,8 +270,8 @@ static int note_tree_insert(struct notes_tree *t, struct int_node *tree,
if (!oidcmp(&l->val_oid, &entry->val_oid))
return 0;
- ret = combine_notes(l->val_oid.hash,
- entry->val_oid.hash);
+ ret = combine_notes(&l->val_oid,
+ &entry->val_oid);
if (!ret && is_null_oid(&l->val_oid))
note_tree_remove(t, tree, n, entry);
free(entry);
@@ -667,7 +667,7 @@ static int tree_write_stack_finish_subtree(struct tree_write_stack *tws)
ret = tree_write_stack_finish_subtree(n);
if (ret)
return ret;
- ret = write_sha1_file(n->buf.buf, n->buf.len, tree_type, s.hash);
+ ret = write_object_file(n->buf.buf, n->buf.len, tree_type, &s);
if (ret)
return ret;
strbuf_release(&n->buf);
@@ -786,8 +786,8 @@ static int prune_notes_helper(const struct object_id *object_oid,
return 0;
}
-int combine_notes_concatenate(unsigned char *cur_sha1,
- const unsigned char *new_sha1)
+int combine_notes_concatenate(struct object_id *cur_oid,
+ const struct object_id *new_oid)
{
char *cur_msg = NULL, *new_msg = NULL, *buf;
unsigned long cur_len, new_len, buf_len;
@@ -795,18 +795,18 @@ int combine_notes_concatenate(unsigned char *cur_sha1,
int ret;
/* read in both note blob objects */
- if (!is_null_sha1(new_sha1))
- new_msg = read_sha1_file(new_sha1, &new_type, &new_len);
+ if (!is_null_oid(new_oid))
+ new_msg = read_sha1_file(new_oid->hash, &new_type, &new_len);
if (!new_msg || !new_len || new_type != OBJ_BLOB) {
free(new_msg);
return 0;
}
- if (!is_null_sha1(cur_sha1))
- cur_msg = read_sha1_file(cur_sha1, &cur_type, &cur_len);
+ if (!is_null_oid(cur_oid))
+ cur_msg = read_sha1_file(cur_oid->hash, &cur_type, &cur_len);
if (!cur_msg || !cur_len || cur_type != OBJ_BLOB) {
free(cur_msg);
free(new_msg);
- hashcpy(cur_sha1, new_sha1);
+ oidcpy(cur_oid, new_oid);
return 0;
}
@@ -825,20 +825,20 @@ int combine_notes_concatenate(unsigned char *cur_sha1,
free(new_msg);
/* create a new blob object from buf */
- ret = write_sha1_file(buf, buf_len, blob_type, cur_sha1);
+ ret = write_object_file(buf, buf_len, blob_type, cur_oid);
free(buf);
return ret;
}
-int combine_notes_overwrite(unsigned char *cur_sha1,
- const unsigned char *new_sha1)
+int combine_notes_overwrite(struct object_id *cur_oid,
+ const struct object_id *new_oid)
{
- hashcpy(cur_sha1, new_sha1);
+ oidcpy(cur_oid, new_oid);
return 0;
}
-int combine_notes_ignore(unsigned char *cur_sha1,
- const unsigned char *new_sha1)
+int combine_notes_ignore(struct object_id *cur_oid,
+ const struct object_id *new_oid)
{
return 0;
}
@@ -848,17 +848,17 @@ int combine_notes_ignore(unsigned char *cur_sha1,
* newlines removed.
*/
static int string_list_add_note_lines(struct string_list *list,
- const unsigned char *sha1)
+ const struct object_id *oid)
{
char *data;
unsigned long len;
enum object_type t;
- if (is_null_sha1(sha1))
+ if (is_null_oid(oid))
return 0;
/* read_sha1_file NUL-terminates */
- data = read_sha1_file(sha1, &t, &len);
+ data = read_sha1_file(oid->hash, &t, &len);
if (t != OBJ_BLOB || !data || !len) {
free(data);
return t != OBJ_BLOB || !data;
@@ -884,17 +884,17 @@ static int string_list_join_lines_helper(struct string_list_item *item,
return 0;
}
-int combine_notes_cat_sort_uniq(unsigned char *cur_sha1,
- const unsigned char *new_sha1)
+int combine_notes_cat_sort_uniq(struct object_id *cur_oid,
+ const struct object_id *new_oid)
{
struct string_list sort_uniq_list = STRING_LIST_INIT_DUP;
struct strbuf buf = STRBUF_INIT;
int ret = 1;
/* read both note blob objects into unique_lines */
- if (string_list_add_note_lines(&sort_uniq_list, cur_sha1))
+ if (string_list_add_note_lines(&sort_uniq_list, cur_oid))
goto out;
- if (string_list_add_note_lines(&sort_uniq_list, new_sha1))
+ if (string_list_add_note_lines(&sort_uniq_list, new_oid))
goto out;
string_list_remove_empty_items(&sort_uniq_list, 0);
string_list_sort(&sort_uniq_list);
@@ -905,7 +905,7 @@ int combine_notes_cat_sort_uniq(unsigned char *cur_sha1,
string_list_join_lines_helper, &buf))
goto out;
- ret = write_sha1_file(buf.buf, buf.len, blob_type, cur_sha1);
+ ret = write_object_file(buf.buf, buf.len, blob_type, cur_oid);
out:
strbuf_release(&buf);
@@ -1123,11 +1123,12 @@ int for_each_note(struct notes_tree *t, int flags, each_note_fn fn,
return for_each_note_helper(t, t->root, 0, 0, flags, fn, cb_data);
}
-int write_notes_tree(struct notes_tree *t, unsigned char *result)
+int write_notes_tree(struct notes_tree *t, struct object_id *result)
{
struct tree_write_stack root;
struct write_each_note_data cb_data;
int ret;
+ int flags;
if (!t)
t = &default_notes_tree;
@@ -1141,12 +1142,12 @@ int write_notes_tree(struct notes_tree *t, unsigned char *result)
cb_data.next_non_note = t->first_non_note;
/* Write tree objects representing current notes tree */
- ret = for_each_note(t, FOR_EACH_NOTE_DONT_UNPACK_SUBTREES |
- FOR_EACH_NOTE_YIELD_SUBTREES,
- write_each_note, &cb_data) ||
- write_each_non_note_until(NULL, &cb_data) ||
- tree_write_stack_finish_subtree(&root) ||
- write_sha1_file(root.buf.buf, root.buf.len, tree_type, result);
+ flags = FOR_EACH_NOTE_DONT_UNPACK_SUBTREES |
+ FOR_EACH_NOTE_YIELD_SUBTREES;
+ ret = for_each_note(t, flags, write_each_note, &cb_data) ||
+ write_each_non_note_until(NULL, &cb_data) ||
+ tree_write_stack_finish_subtree(&root) ||
+ write_object_file(root.buf.buf, root.buf.len, tree_type, result);
strbuf_release(&root.buf);
return ret;
}
diff --git a/notes.h b/notes.h
index 3848c2f..0433f45 100644
--- a/notes.h
+++ b/notes.h
@@ -9,27 +9,32 @@
* When adding a new note annotating the same object as an existing note, it is
* up to the caller to decide how to combine the two notes. The decision is
* made by passing in a function of the following form. The function accepts
- * two SHA1s -- of the existing note and the new note, respectively. The
+ * two object_ids -- of the existing note and the new note, respectively. The
* function then combines the notes in whatever way it sees fit, and writes the
- * resulting SHA1 into the first SHA1 argument (cur_sha1). A non-zero return
+ * resulting oid into the first argument (cur_oid). A non-zero return
* value indicates failure.
*
- * The two given SHA1s shall both be non-NULL and different from each other.
- * Either of them (but not both) may be == null_sha1, which indicates an
- * empty/non-existent note. If the resulting SHA1 (cur_sha1) is == null_sha1,
+ * The two given object_ids shall both be non-NULL and different from each
+ * other. Either of them (but not both) may be == null_oid, which indicates an
+ * empty/non-existent note. If the resulting oid (cur_oid) is == null_oid,
* the note will be removed from the notes tree.
*
* The default combine_notes function (you get this when passing NULL) is
* combine_notes_concatenate(), which appends the contents of the new note to
* the contents of the existing note.
*/
-typedef int (*combine_notes_fn)(unsigned char *cur_sha1, const unsigned char *new_sha1);
+typedef int (*combine_notes_fn)(struct object_id *cur_oid,
+ const struct object_id *new_oid);
/* Common notes combinators */
-int combine_notes_concatenate(unsigned char *cur_sha1, const unsigned char *new_sha1);
-int combine_notes_overwrite(unsigned char *cur_sha1, const unsigned char *new_sha1);
-int combine_notes_ignore(unsigned char *cur_sha1, const unsigned char *new_sha1);
-int combine_notes_cat_sort_uniq(unsigned char *cur_sha1, const unsigned char *new_sha1);
+int combine_notes_concatenate(struct object_id *cur_oid,
+ const struct object_id *new_oid);
+int combine_notes_overwrite(struct object_id *cur_oid,
+ const struct object_id *new_oid);
+int combine_notes_ignore(struct object_id *cur_oid,
+ const struct object_id *new_oid);
+int combine_notes_cat_sort_uniq(struct object_id *cur_oid,
+ const struct object_id *new_oid);
/*
* Notes tree object
@@ -212,7 +217,7 @@ int for_each_note(struct notes_tree *t, int flags, each_note_fn fn,
* Write the given notes_tree structure to the object database
*
* Creates a new tree object encapsulating the current state of the given
- * notes_tree, and stores its SHA1 into the 'result' argument.
+ * notes_tree, and stores its object id into the 'result' argument.
*
* Returns zero on success, non-zero on failure.
*
@@ -220,7 +225,7 @@ int for_each_note(struct notes_tree *t, int flags, each_note_fn fn,
* this function has returned zero. Please also remember to create a
* corresponding commit object, and update the appropriate notes ref.
*/
-int write_notes_tree(struct notes_tree *t, unsigned char *result);
+int write_notes_tree(struct notes_tree *t, struct object_id *result);
/* Flags controlling the operation of prune */
#define NOTES_PRUNE_VERBOSE 1
diff --git a/object.c b/object.c
index 0db33b2..e6ad3f6 100644
--- a/object.c
+++ b/object.c
@@ -252,7 +252,7 @@ struct object *parse_object(const struct object_id *oid)
if (obj && obj->parsed)
return obj;
- if ((obj && obj->type == OBJ_BLOB) ||
+ if ((obj && obj->type == OBJ_BLOB && has_object_file(oid)) ||
(!obj && has_object_file(oid) &&
sha1_object_info(oid->hash, NULL) == OBJ_BLOB)) {
if (check_sha1_signature(repl, NULL, 0, NULL) < 0) {
diff --git a/pack-bitmap-write.c b/pack-bitmap-write.c
index a8df5ce..db4c832 100644
--- a/pack-bitmap-write.c
+++ b/pack-bitmap-write.c
@@ -440,19 +440,19 @@ void bitmap_writer_select_commits(struct commit **indexed_commits,
}
-static int sha1write_ewah_helper(void *f, const void *buf, size_t len)
+static int hashwrite_ewah_helper(void *f, const void *buf, size_t len)
{
- /* sha1write will die on error */
- sha1write(f, buf, len);
+ /* hashwrite will die on error */
+ hashwrite(f, buf, len);
return len;
}
/**
* Write the bitmap index to disk
*/
-static inline void dump_bitmap(struct sha1file *f, struct ewah_bitmap *bitmap)
+static inline void dump_bitmap(struct hashfile *f, struct ewah_bitmap *bitmap)
{
- if (ewah_serialize_to(bitmap, sha1write_ewah_helper, f) < 0)
+ if (ewah_serialize_to(bitmap, hashwrite_ewah_helper, f) < 0)
die("Failed to write bitmap index");
}
@@ -462,7 +462,7 @@ static const unsigned char *sha1_access(size_t pos, void *table)
return index[pos]->oid.hash;
}
-static void write_selected_commits_v1(struct sha1file *f,
+static void write_selected_commits_v1(struct hashfile *f,
struct pack_idx_entry **index,
uint32_t index_nr)
{
@@ -477,15 +477,15 @@ static void write_selected_commits_v1(struct sha1file *f,
if (commit_pos < 0)
die("BUG: trying to write commit not in index");
- sha1write_be32(f, commit_pos);
- sha1write_u8(f, stored->xor_offset);
- sha1write_u8(f, stored->flags);
+ hashwrite_be32(f, commit_pos);
+ hashwrite_u8(f, stored->xor_offset);
+ hashwrite_u8(f, stored->flags);
dump_bitmap(f, stored->write_as);
}
}
-static void write_hash_cache(struct sha1file *f,
+static void write_hash_cache(struct hashfile *f,
struct pack_idx_entry **index,
uint32_t index_nr)
{
@@ -494,7 +494,7 @@ static void write_hash_cache(struct sha1file *f,
for (i = 0; i < index_nr; ++i) {
struct object_entry *entry = (struct object_entry *)index[i];
uint32_t hash_value = htonl(entry->hash);
- sha1write(f, &hash_value, sizeof(hash_value));
+ hashwrite(f, &hash_value, sizeof(hash_value));
}
}
@@ -511,13 +511,13 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
static uint16_t default_version = 1;
static uint16_t flags = BITMAP_OPT_FULL_DAG;
struct strbuf tmp_file = STRBUF_INIT;
- struct sha1file *f;
+ struct hashfile *f;
struct bitmap_disk_header header;
int fd = odb_mkstemp(&tmp_file, "pack/tmp_bitmap_XXXXXX");
- f = sha1fd(fd, tmp_file.buf);
+ f = hashfd(fd, tmp_file.buf);
memcpy(header.magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE));
header.version = htons(default_version);
@@ -525,7 +525,7 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
header.entry_count = htonl(writer.selected_nr);
hashcpy(header.checksum, writer.pack_checksum);
- sha1write(f, &header, sizeof(header));
+ hashwrite(f, &header, sizeof(header));
dump_bitmap(f, writer.commits);
dump_bitmap(f, writer.trees);
dump_bitmap(f, writer.blobs);
@@ -535,7 +535,7 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
if (options & BITMAP_OPT_HASH_CACHE)
write_hash_cache(f, index, index_nr);
- sha1close(f, NULL, CSUM_FSYNC);
+ finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
if (adjust_shared_perm(tmp_file.buf))
die_errno("unable to make temporary bitmap file readable");
diff --git a/pack-check.c b/pack-check.c
index bd34859..8fc7dd1 100644
--- a/pack-check.c
+++ b/pack-check.c
@@ -41,7 +41,7 @@ int check_pack_crc(struct packed_git *p, struct pack_window **w_curs,
} while (len);
index_crc = p->index_data;
- index_crc += 2 + 256 + p->num_objects * (20/4) + nr;
+ index_crc += 2 + 256 + p->num_objects * (the_hash_algo->rawsz/4) + nr;
return data_crc != ntohl(*index_crc);
}
@@ -54,7 +54,7 @@ static int verify_packfile(struct packed_git *p,
{
off_t index_size = p->index_size;
const unsigned char *index_base = p->index_data;
- git_SHA_CTX ctx;
+ git_hash_ctx ctx;
unsigned char hash[GIT_MAX_RAWSZ], *pack_sig;
off_t offset = 0, pack_sig_ofs = 0;
uint32_t nr_objects, i;
@@ -64,24 +64,24 @@ static int verify_packfile(struct packed_git *p,
if (!is_pack_valid(p))
return error("packfile %s cannot be accessed", p->pack_name);
- git_SHA1_Init(&ctx);
+ the_hash_algo->init_fn(&ctx);
do {
unsigned long remaining;
unsigned char *in = use_pack(p, w_curs, offset, &remaining);
offset += remaining;
if (!pack_sig_ofs)
- pack_sig_ofs = p->pack_size - 20;
+ pack_sig_ofs = p->pack_size - the_hash_algo->rawsz;
if (offset > pack_sig_ofs)
remaining -= (unsigned int)(offset - pack_sig_ofs);
- git_SHA1_Update(&ctx, in, remaining);
+ the_hash_algo->update_fn(&ctx, in, remaining);
} while (offset < pack_sig_ofs);
- git_SHA1_Final(hash, &ctx);
+ the_hash_algo->final_fn(hash, &ctx);
pack_sig = use_pack(p, w_curs, pack_sig_ofs, NULL);
if (hashcmp(hash, pack_sig))
- err = error("%s SHA1 checksum mismatch",
+ err = error("%s pack checksum mismatch",
p->pack_name);
- if (hashcmp(index_base + index_size - 40, pack_sig))
- err = error("%s SHA1 does not match its index",
+ if (hashcmp(index_base + index_size - the_hash_algo->hexsz, pack_sig))
+ err = error("%s pack checksum does not match its index",
p->pack_name);
unuse_pack(w_curs);
@@ -165,8 +165,8 @@ int verify_pack_index(struct packed_git *p)
{
off_t index_size;
const unsigned char *index_base;
- git_SHA_CTX ctx;
- unsigned char sha1[20];
+ git_hash_ctx ctx;
+ unsigned char hash[GIT_MAX_RAWSZ];
int err = 0;
if (open_pack_index(p))
@@ -175,11 +175,11 @@ int verify_pack_index(struct packed_git *p)
index_base = p->index_data;
/* Verify SHA1 sum of the index file */
- git_SHA1_Init(&ctx);
- git_SHA1_Update(&ctx, index_base, (unsigned int)(index_size - 20));
- git_SHA1_Final(sha1, &ctx);
- if (hashcmp(sha1, index_base + index_size - 20))
- err = error("Packfile index for %s SHA1 mismatch",
+ the_hash_algo->init_fn(&ctx);
+ the_hash_algo->update_fn(&ctx, index_base, (unsigned int)(index_size - the_hash_algo->rawsz));
+ the_hash_algo->final_fn(hash, &ctx);
+ if (hashcmp(hash, index_base + index_size - the_hash_algo->rawsz))
+ err = error("Packfile index for %s hash mismatch",
p->pack_name);
return err;
}
diff --git a/pack-revindex.c b/pack-revindex.c
index 1b7ebd8..ff5f62c 100644
--- a/pack-revindex.c
+++ b/pack-revindex.c
@@ -134,10 +134,8 @@ static void create_pack_revindex(struct packed_git *p)
if (!(off & 0x80000000)) {
p->revindex[i].offset = off;
} else {
- p->revindex[i].offset =
- ((uint64_t)ntohl(*off_64++)) << 32;
- p->revindex[i].offset |=
- ntohl(*off_64++);
+ p->revindex[i].offset = get_be64(off_64);
+ off_64 += 2;
}
p->revindex[i].nr = i;
}
diff --git a/pack-write.c b/pack-write.c
index fea6284..a9d46bc 100644
--- a/pack-write.c
+++ b/pack-write.c
@@ -46,7 +46,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
int nr_objects, const struct pack_idx_option *opts,
const unsigned char *sha1)
{
- struct sha1file *f;
+ struct hashfile *f;
struct pack_idx_entry **sorted_by_sha, **list, **last;
off_t last_obj_offset = 0;
uint32_t array[256];
@@ -68,7 +68,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
if (opts->flags & WRITE_IDX_VERIFY) {
assert(index_name);
- f = sha1fd_check(index_name);
+ f = hashfd_check(index_name);
} else {
if (!index_name) {
struct strbuf tmp_file = STRBUF_INIT;
@@ -80,7 +80,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
if (fd < 0)
die_errno("unable to create '%s'", index_name);
}
- f = sha1fd(fd, index_name);
+ f = hashfd(fd, index_name);
}
/* if last object's offset is >= 2^31 we should use index V2 */
@@ -91,7 +91,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
struct pack_idx_header hdr;
hdr.idx_signature = htonl(PACK_IDX_SIGNATURE);
hdr.idx_version = htonl(index_version);
- sha1write(f, &hdr, sizeof(hdr));
+ hashwrite(f, &hdr, sizeof(hdr));
}
/*
@@ -110,7 +110,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
array[i] = htonl(next - sorted_by_sha);
list = next;
}
- sha1write(f, array, 256 * 4);
+ hashwrite(f, array, 256 * 4);
/*
* Write the actual SHA1 entries..
@@ -120,9 +120,9 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
struct pack_idx_entry *obj = *list++;
if (index_version < 2) {
uint32_t offset = htonl(obj->offset);
- sha1write(f, &offset, 4);
+ hashwrite(f, &offset, 4);
}
- sha1write(f, obj->oid.hash, 20);
+ hashwrite(f, obj->oid.hash, the_hash_algo->rawsz);
if ((opts->flags & WRITE_IDX_STRICT) &&
(i && !oidcmp(&list[-2]->oid, &obj->oid)))
die("The same object %s appears twice in the pack",
@@ -137,7 +137,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
for (i = 0; i < nr_objects; i++) {
struct pack_idx_entry *obj = *list++;
uint32_t crc32_val = htonl(obj->crc32);
- sha1write(f, &crc32_val, 4);
+ hashwrite(f, &crc32_val, 4);
}
/* write the 32-bit offset table */
@@ -150,7 +150,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
? (0x80000000 | nr_large_offset++)
: obj->offset);
offset = htonl(offset);
- sha1write(f, &offset, 4);
+ hashwrite(f, &offset, 4);
}
/* write the large offset table */
@@ -164,25 +164,26 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
continue;
split[0] = htonl(offset >> 32);
split[1] = htonl(offset & 0xffffffff);
- sha1write(f, split, 8);
+ hashwrite(f, split, 8);
nr_large_offset--;
}
}
- sha1write(f, sha1, 20);
- sha1close(f, NULL, ((opts->flags & WRITE_IDX_VERIFY)
- ? CSUM_CLOSE : CSUM_FSYNC));
+ hashwrite(f, sha1, the_hash_algo->rawsz);
+ finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_CLOSE |
+ ((opts->flags & WRITE_IDX_VERIFY)
+ ? 0 : CSUM_FSYNC));
return index_name;
}
-off_t write_pack_header(struct sha1file *f, uint32_t nr_entries)
+off_t write_pack_header(struct hashfile *f, uint32_t nr_entries)
{
struct pack_header hdr;
hdr.hdr_signature = htonl(PACK_SIGNATURE);
hdr.hdr_version = htonl(PACK_VERSION);
hdr.hdr_entries = htonl(nr_entries);
- sha1write(f, &hdr, sizeof(hdr));
+ hashwrite(f, &hdr, sizeof(hdr));
return sizeof(hdr);
}
@@ -203,20 +204,20 @@ off_t write_pack_header(struct sha1file *f, uint32_t nr_entries)
* interested in the resulting SHA1 of pack data above partial_pack_offset.
*/
void fixup_pack_header_footer(int pack_fd,
- unsigned char *new_pack_sha1,
+ unsigned char *new_pack_hash,
const char *pack_name,
uint32_t object_count,
- unsigned char *partial_pack_sha1,
+ unsigned char *partial_pack_hash,
off_t partial_pack_offset)
{
int aligned_sz, buf_sz = 8 * 1024;
- git_SHA_CTX old_sha1_ctx, new_sha1_ctx;
+ git_hash_ctx old_hash_ctx, new_hash_ctx;
struct pack_header hdr;
char *buf;
ssize_t read_result;
- git_SHA1_Init(&old_sha1_ctx);
- git_SHA1_Init(&new_sha1_ctx);
+ the_hash_algo->init_fn(&old_hash_ctx);
+ the_hash_algo->init_fn(&new_hash_ctx);
if (lseek(pack_fd, 0, SEEK_SET) != 0)
die_errno("Failed seeking to start of '%s'", pack_name);
@@ -228,9 +229,9 @@ void fixup_pack_header_footer(int pack_fd,
pack_name);
if (lseek(pack_fd, 0, SEEK_SET) != 0)
die_errno("Failed seeking to start of '%s'", pack_name);
- git_SHA1_Update(&old_sha1_ctx, &hdr, sizeof(hdr));
+ the_hash_algo->update_fn(&old_hash_ctx, &hdr, sizeof(hdr));
hdr.hdr_entries = htonl(object_count);
- git_SHA1_Update(&new_sha1_ctx, &hdr, sizeof(hdr));
+ the_hash_algo->update_fn(&new_hash_ctx, &hdr, sizeof(hdr));
write_or_die(pack_fd, &hdr, sizeof(hdr));
partial_pack_offset -= sizeof(hdr);
@@ -238,28 +239,28 @@ void fixup_pack_header_footer(int pack_fd,
aligned_sz = buf_sz - sizeof(hdr);
for (;;) {
ssize_t m, n;
- m = (partial_pack_sha1 && partial_pack_offset < aligned_sz) ?
+ m = (partial_pack_hash && partial_pack_offset < aligned_sz) ?
partial_pack_offset : aligned_sz;
n = xread(pack_fd, buf, m);
if (!n)
break;
if (n < 0)
die_errno("Failed to checksum '%s'", pack_name);
- git_SHA1_Update(&new_sha1_ctx, buf, n);
+ the_hash_algo->update_fn(&new_hash_ctx, buf, n);
aligned_sz -= n;
if (!aligned_sz)
aligned_sz = buf_sz;
- if (!partial_pack_sha1)
+ if (!partial_pack_hash)
continue;
- git_SHA1_Update(&old_sha1_ctx, buf, n);
+ the_hash_algo->update_fn(&old_hash_ctx, buf, n);
partial_pack_offset -= n;
if (partial_pack_offset == 0) {
- unsigned char sha1[20];
- git_SHA1_Final(sha1, &old_sha1_ctx);
- if (hashcmp(sha1, partial_pack_sha1) != 0)
+ unsigned char hash[GIT_MAX_RAWSZ];
+ the_hash_algo->final_fn(hash, &old_hash_ctx);
+ if (hashcmp(hash, partial_pack_hash) != 0)
die("Unexpected checksum for %s "
"(disk corruption?)", pack_name);
/*
@@ -267,23 +268,24 @@ void fixup_pack_header_footer(int pack_fd,
* pack, which also means making partial_pack_offset
* big enough not to matter anymore.
*/
- git_SHA1_Init(&old_sha1_ctx);
+ the_hash_algo->init_fn(&old_hash_ctx);
partial_pack_offset = ~partial_pack_offset;
partial_pack_offset -= MSB(partial_pack_offset, 1);
}
}
free(buf);
- if (partial_pack_sha1)
- git_SHA1_Final(partial_pack_sha1, &old_sha1_ctx);
- git_SHA1_Final(new_pack_sha1, &new_sha1_ctx);
- write_or_die(pack_fd, new_pack_sha1, 20);
+ if (partial_pack_hash)
+ the_hash_algo->final_fn(partial_pack_hash, &old_hash_ctx);
+ the_hash_algo->final_fn(new_pack_hash, &new_hash_ctx);
+ write_or_die(pack_fd, new_pack_hash, the_hash_algo->rawsz);
fsync_or_die(pack_fd, pack_name);
}
char *index_pack_lockfile(int ip_out)
{
- char packname[46];
+ char packname[GIT_MAX_HEXSZ + 6];
+ const int len = the_hash_algo->hexsz + 6;
/*
* The first thing we expect from index-pack's output
@@ -292,9 +294,9 @@ char *index_pack_lockfile(int ip_out)
* case, we need it to remove the corresponding .keep file
* later on. If we don't get that then tough luck with it.
*/
- if (read_in_full(ip_out, packname, 46) == 46 && packname[45] == '\n') {
+ if (read_in_full(ip_out, packname, len) == len && packname[len-1] == '\n') {
const char *name;
- packname[45] = 0;
+ packname[len-1] = 0;
if (skip_prefix(packname, "keep\t", &name))
return xstrfmt("%s/pack/pack-%s.keep",
get_object_directory(), name);
@@ -332,14 +334,14 @@ int encode_in_pack_object_header(unsigned char *hdr, int hdr_len,
return n;
}
-struct sha1file *create_tmp_packfile(char **pack_tmp_name)
+struct hashfile *create_tmp_packfile(char **pack_tmp_name)
{
struct strbuf tmpname = STRBUF_INIT;
int fd;
fd = odb_mkstemp(&tmpname, "pack/tmp_pack_XXXXXX");
*pack_tmp_name = strbuf_detach(&tmpname, NULL);
- return sha1fd(fd, *pack_tmp_name);
+ return hashfd(fd, *pack_tmp_name);
}
void finish_tmp_packfile(struct strbuf *name_buffer,
diff --git a/pack.h b/pack.h
index 8294341..34a9d45 100644
--- a/pack.h
+++ b/pack.h
@@ -81,7 +81,7 @@ extern const char *write_idx_file(const char *index_name, struct pack_idx_entry
extern int check_pack_crc(struct packed_git *p, struct pack_window **w_curs, off_t offset, off_t len, unsigned int nr);
extern int verify_pack_index(struct packed_git *);
extern int verify_pack(struct packed_git *, verify_fn fn, struct progress *, uint32_t);
-extern off_t write_pack_header(struct sha1file *f, uint32_t);
+extern off_t write_pack_header(struct hashfile *f, uint32_t);
extern void fixup_pack_header_footer(int, unsigned char *, const char *, uint32_t, unsigned char *, off_t);
extern char *index_pack_lockfile(int fd);
@@ -98,7 +98,7 @@ extern int encode_in_pack_object_header(unsigned char *hdr, int hdr_len,
#define PH_ERROR_PROTOCOL (-3)
extern int read_pack_header(int fd, struct pack_header *);
-extern struct sha1file *create_tmp_packfile(char **pack_tmp_name);
+extern struct hashfile *create_tmp_packfile(char **pack_tmp_name);
extern void finish_tmp_packfile(struct strbuf *name_buffer, const char *pack_tmp_name, struct pack_idx_entry **written_list, uint32_t nr_written, struct pack_idx_option *pack_idx_opts, unsigned char sha1[]);
#endif
diff --git a/packfile.c b/packfile.c
index 0532a37..b1d33b6 100644
--- a/packfile.c
+++ b/packfile.c
@@ -1,5 +1,5 @@
#include "cache.h"
-#include "mru.h"
+#include "list.h"
#include "pack.h"
#include "dir.h"
#include "mergesort.h"
@@ -8,6 +8,11 @@
#include "list.h"
#include "streaming.h"
#include "sha1-lookup.h"
+#include "commit.h"
+#include "object.h"
+#include "tag.h"
+#include "tree-walk.h"
+#include "tree.h"
char *odb_pack_name(struct strbuf *buf,
const unsigned char *sha1,
@@ -40,7 +45,7 @@ static unsigned int pack_max_fds;
static size_t peak_pack_mapped;
static size_t pack_mapped;
struct packed_git *packed_git;
-struct mru packed_git_mru;
+LIST_HEAD(packed_git_mru);
#define SZ_FMT PRIuMAX
static inline uintmax_t sz_fmt(size_t s) { return s; }
@@ -299,7 +304,7 @@ void close_pack_index(struct packed_git *p)
}
}
-static void close_pack(struct packed_git *p)
+void close_pack(struct packed_git *p)
{
close_pack_windows(p);
close_pack_fd(p);
@@ -643,10 +648,10 @@ struct packed_git *add_packed_git(const char *path, size_t path_len, int local)
return NULL;
/*
- * ".pack" is long enough to hold any suffix we're adding (and
+ * ".promisor" is long enough to hold any suffix we're adding (and
* the use xsnprintf double-checks that)
*/
- alloc = st_add3(path_len, strlen(".pack"), 1);
+ alloc = st_add3(path_len, strlen(".promisor"), 1);
p = alloc_packed_git(alloc);
memcpy(p->pack_name, path, path_len);
@@ -654,6 +659,10 @@ struct packed_git *add_packed_git(const char *path, size_t path_len, int local)
if (!access(p->pack_name, F_OK))
p->pack_keep = 1;
+ xsnprintf(p->pack_name + path_len, alloc - path_len, ".promisor");
+ if (!access(p->pack_name, F_OK))
+ p->pack_promisor = 1;
+
xsnprintf(p->pack_name + path_len, alloc - path_len, ".pack");
if (stat(p->pack_name, &st) || !S_ISREG(st.st_mode)) {
free(p);
@@ -781,7 +790,8 @@ static void prepare_packed_git_one(char *objdir, int local)
if (ends_with(de->d_name, ".idx") ||
ends_with(de->d_name, ".pack") ||
ends_with(de->d_name, ".bitmap") ||
- ends_with(de->d_name, ".keep"))
+ ends_with(de->d_name, ".keep") ||
+ ends_with(de->d_name, ".promisor"))
string_list_append(&garbage, path.buf);
else
report_garbage(PACKDIR_FILE_GARBAGE, path.buf);
@@ -866,9 +876,10 @@ static void prepare_packed_git_mru(void)
{
struct packed_git *p;
- mru_clear(&packed_git_mru);
+ INIT_LIST_HEAD(&packed_git_mru);
+
for (p = packed_git; p; p = p->next)
- mru_append(&packed_git_mru, p);
+ list_add_tail(&p->mru, &packed_git_mru);
}
static int prepare_packed_git_run_once = 0;
@@ -1702,8 +1713,7 @@ off_t nth_packed_object_offset(const struct packed_git *p, uint32_t n)
return off;
index += p->num_objects * 4 + (off & 0x7fffffff) * 8;
check_pack_index_ptr(p, index);
- return (((uint64_t)ntohl(*((uint32_t *)(index + 0)))) << 32) |
- ntohl(*((uint32_t *)(index + 4)));
+ return get_be64(index);
}
}
@@ -1712,11 +1722,8 @@ off_t find_pack_entry_one(const unsigned char *sha1,
{
const uint32_t *level1_ofs = p->index_data;
const unsigned char *index = p->index_data;
- unsigned hi, lo, stride;
- static int debug_lookup = -1;
-
- if (debug_lookup < 0)
- debug_lookup = !!getenv("GIT_DEBUG_LOOKUP");
+ unsigned stride;
+ uint32_t result;
if (!index) {
if (open_pack_index(p))
@@ -1729,8 +1736,6 @@ off_t find_pack_entry_one(const unsigned char *sha1,
index += 8;
}
index += 4 * 256;
- hi = ntohl(level1_ofs[*sha1]);
- lo = ((*sha1 == 0x0) ? 0 : ntohl(level1_ofs[*sha1 - 1]));
if (p->index_version > 1) {
stride = 20;
} else {
@@ -1738,24 +1743,8 @@ off_t find_pack_entry_one(const unsigned char *sha1,
index += 4;
}
- if (debug_lookup)
- printf("%02x%02x%02x... lo %u hi %u nr %"PRIu32"\n",
- sha1[0], sha1[1], sha1[2], lo, hi, p->num_objects);
-
- while (lo < hi) {
- unsigned mi = lo + (hi - lo) / 2;
- int cmp = hashcmp(index + mi * stride, sha1);
-
- if (debug_lookup)
- printf("lo %u hi %u rg %u mi %u\n",
- lo, hi, hi - lo, mi);
- if (!cmp)
- return nth_packed_object_offset(p, mi);
- if (cmp > 0)
- hi = mi;
- else
- lo = mi+1;
- }
+ if (bsearch_hash(sha1, level1_ofs, index, stride, &result))
+ return nth_packed_object_offset(p, result);
return 0;
}
@@ -1831,15 +1820,16 @@ static int fill_pack_entry(const unsigned char *sha1,
*/
int find_pack_entry(const unsigned char *sha1, struct pack_entry *e)
{
- struct mru_entry *p;
+ struct list_head *pos;
prepare_packed_git();
if (!packed_git)
return 0;
- for (p = packed_git_mru.head; p; p = p->next) {
- if (fill_pack_entry(sha1, e, p->item)) {
- mru_mark(&packed_git_mru, p);
+ list_for_each(pos, &packed_git_mru) {
+ struct packed_git *p = list_entry(pos, struct packed_git, mru);
+ if (fill_pack_entry(sha1, e, p)) {
+ list_move(&p->mru, &packed_git_mru);
return 1;
}
}
@@ -1860,7 +1850,7 @@ int has_pack_index(const unsigned char *sha1)
return 1;
}
-static int for_each_object_in_pack(struct packed_git *p, each_packed_object_fn cb, void *data)
+int for_each_object_in_pack(struct packed_git *p, each_packed_object_fn cb, void *data)
{
uint32_t i;
int r = 0;
@@ -1889,6 +1879,9 @@ int for_each_packed_object(each_packed_object_fn cb, void *data, unsigned flags)
for (p = packed_git; p; p = p->next) {
if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local)
continue;
+ if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) &&
+ !p->pack_promisor)
+ continue;
if (open_pack_index(p)) {
pack_errors = 1;
continue;
@@ -1899,3 +1892,61 @@ int for_each_packed_object(each_packed_object_fn cb, void *data, unsigned flags)
}
return r ? r : pack_errors;
}
+
+static int add_promisor_object(const struct object_id *oid,
+ struct packed_git *pack,
+ uint32_t pos,
+ void *set_)
+{
+ struct oidset *set = set_;
+ struct object *obj = parse_object(oid);
+ if (!obj)
+ return 1;
+
+ oidset_insert(set, oid);
+
+ /*
+ * If this is a tree, commit, or tag, the objects it refers
+ * to are also promisor objects. (Blobs refer to no objects.)
+ */
+ if (obj->type == OBJ_TREE) {
+ struct tree *tree = (struct tree *)obj;
+ struct tree_desc desc;
+ struct name_entry entry;
+ if (init_tree_desc_gently(&desc, tree->buffer, tree->size))
+ /*
+ * Error messages are given when packs are
+ * verified, so do not print any here.
+ */
+ return 0;
+ while (tree_entry_gently(&desc, &entry))
+ oidset_insert(set, entry.oid);
+ } else if (obj->type == OBJ_COMMIT) {
+ struct commit *commit = (struct commit *) obj;
+ struct commit_list *parents = commit->parents;
+
+ oidset_insert(set, &commit->tree->object.oid);
+ for (; parents; parents = parents->next)
+ oidset_insert(set, &parents->item->object.oid);
+ } else if (obj->type == OBJ_TAG) {
+ struct tag *tag = (struct tag *) obj;
+ oidset_insert(set, &tag->tagged->oid);
+ }
+ return 0;
+}
+
+int is_promisor_object(const struct object_id *oid)
+{
+ static struct oidset promisor_objects;
+ static int promisor_objects_prepared;
+
+ if (!promisor_objects_prepared) {
+ if (repository_format_partial_clone) {
+ for_each_packed_object(add_promisor_object,
+ &promisor_objects,
+ FOR_EACH_OBJECT_PROMISOR_ONLY);
+ }
+ promisor_objects_prepared = 1;
+ }
+ return oidset_contains(&promisor_objects, oid);
+}
diff --git a/packfile.h b/packfile.h
index 0cdeb54..b341f2b 100644
--- a/packfile.h
+++ b/packfile.h
@@ -1,6 +1,8 @@
#ifndef PACKFILE_H
#define PACKFILE_H
+#include "oidset.h"
+
/*
* Generate the filename to be used for a pack file with checksum "sha1" and
* extension "ext". The result is written into the strbuf "buf", overwriting
@@ -61,6 +63,7 @@ extern void close_pack_index(struct packed_git *);
extern unsigned char *use_pack(struct packed_git *, struct pack_window **, off_t, unsigned long *);
extern void close_pack_windows(struct packed_git *);
+extern void close_pack(struct packed_git *);
extern void close_all_packs(void);
extern void unuse_pack(struct pack_window **);
extern void clear_delta_base_cache(void);
@@ -125,6 +128,11 @@ extern int has_sha1_pack(const unsigned char *sha1);
extern int has_pack_index(const unsigned char *sha1);
/*
+ * Only iterate over packs obtained from the promisor remote.
+ */
+#define FOR_EACH_OBJECT_PROMISOR_ONLY 2
+
+/*
* Iterate over packed objects in both the local
* repository and any alternates repositories (unless the
* FOR_EACH_OBJECT_LOCAL_ONLY flag, defined in cache.h, is set).
@@ -133,6 +141,13 @@ typedef int each_packed_object_fn(const struct object_id *oid,
struct packed_git *pack,
uint32_t pos,
void *data);
+extern int for_each_object_in_pack(struct packed_git *p, each_packed_object_fn, void *data);
extern int for_each_packed_object(each_packed_object_fn, void *, unsigned flags);
+/*
+ * Return 1 if an object in a promisor packfile is or refers to the given
+ * object, 0 otherwise.
+ */
+extern int is_promisor_object(const struct object_id *oid);
+
#endif
diff --git a/parse-options.c b/parse-options.c
index fca7159..d02eb8b 100644
--- a/parse-options.c
+++ b/parse-options.c
@@ -525,7 +525,7 @@ unknown:
int parse_options_end(struct parse_opt_ctx_t *ctx)
{
- memmove(ctx->out + ctx->cpidx, ctx->argv, ctx->argc * sizeof(*ctx->out));
+ MOVE_ARRAY(ctx->out + ctx->cpidx, ctx->argv, ctx->argc);
ctx->out[ctx->cpidx + ctx->argc] = NULL;
return ctx->cpidx + ctx->argc;
}
diff --git a/perl/.gitignore b/perl/.gitignore
index 0f1fc27..84c048a 100644
--- a/perl/.gitignore
+++ b/perl/.gitignore
@@ -1,8 +1 @@
-perl.mak
-perl.mak.old
-MYMETA.json
-MYMETA.yml
-blib
-blibdirs
-pm_to_blib
-PM.stamp
+/build/
diff --git a/perl/Git.pm b/perl/Git.pm
index 65e6b32..9d60d79 100644
--- a/perl/Git.pm
+++ b/perl/Git.pm
@@ -101,7 +101,7 @@ increase notwithstanding).
use Carp qw(carp croak); # but croak is bad - throw instead
-use Error qw(:try);
+use Git::Error qw(:try);
use Cwd qw(abs_path cwd);
use IPC::Open2 qw(open2);
use Fcntl qw(SEEK_SET SEEK_CUR);
diff --git a/perl/Git/Error.pm b/perl/Git/Error.pm
new file mode 100644
index 0000000..09bbc97
--- /dev/null
+++ b/perl/Git/Error.pm
@@ -0,0 +1,46 @@
+package Git::Error;
+use 5.008;
+use strict;
+use warnings;
+
+=head1 NAME
+
+Git::Error - Wrapper for the L<Error> module, in case it's not installed
+
+=head1 DESCRIPTION
+
+Wraps the import function for the L<Error> module.
+
+This module is only intended to be used for code shipping in the
+C<git.git> repository. Use it for anything else at your peril!
+
+=cut
+
+sub import {
+ shift;
+ my $caller = caller;
+
+ eval {
+ require Error;
+ 1;
+ } or do {
+ my $error = $@ || "Zombie Error";
+
+ my $Git_Error_pm_path = $INC{"Git/Error.pm"} || die "BUG: Should have our own path from %INC!";
+
+ require File::Basename;
+ my $Git_Error_pm_root = File::Basename::dirname($Git_Error_pm_path) || die "BUG: Can't figure out lib/Git dirname from '$Git_Error_pm_path'!";
+
+ require File::Spec;
+ my $Git_pm_FromCPAN_root = File::Spec->catdir($Git_Error_pm_root, 'FromCPAN');
+ die "BUG: '$Git_pm_FromCPAN_root' should be a directory!" unless -d $Git_pm_FromCPAN_root;
+
+ local @INC = ($Git_pm_FromCPAN_root, @INC);
+ require Error;
+ };
+
+ unshift @_, $caller;
+ goto &Error::import;
+}
+
+1;
diff --git a/perl/private-Error.pm b/perl/Git/FromCPAN/Error.pm
index 6098135..6098135 100644
--- a/perl/private-Error.pm
+++ b/perl/Git/FromCPAN/Error.pm
diff --git a/perl/Git/I18N.pm b/perl/Git/I18N.pm
index 836a5c2..dba96ff 100644
--- a/perl/Git/I18N.pm
+++ b/perl/Git/I18N.pm
@@ -18,7 +18,7 @@ our @EXPORT_OK = @EXPORT;
sub __bootstrap_locale_messages {
our $TEXTDOMAIN = 'git';
- our $TEXTDOMAINDIR = $ENV{GIT_TEXTDOMAINDIR} || '++LOCALEDIR++';
+ our $TEXTDOMAINDIR = $ENV{GIT_TEXTDOMAINDIR} || '@@LOCALEDIR@@';
require POSIX;
POSIX->import(qw(setlocale));
diff --git a/perl/Makefile b/perl/Makefile
deleted file mode 100644
index f657de2..0000000
--- a/perl/Makefile
+++ /dev/null
@@ -1,90 +0,0 @@
-#
-# Makefile for perl support modules and routine
-#
-makfile:=perl.mak
-modules =
-
-PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH))
-prefix_SQ = $(subst ','\'',$(prefix))
-localedir_SQ = $(subst ','\'',$(localedir))
-
-ifndef V
- QUIET = @
-endif
-
-all install instlibdir: $(makfile)
- $(QUIET)$(MAKE) -f $(makfile) $@
-
-clean:
- $(QUIET)test -f $(makfile) && $(MAKE) -f $(makfile) $@ || exit 0
- $(RM) ppport.h
- $(RM) $(makfile)
- $(RM) $(makfile).old
- $(RM) PM.stamp
-
-$(makfile): PM.stamp
-
-ifdef NO_PERL_MAKEMAKER
-instdir_SQ = $(subst ','\'',$(prefix)/lib)
-
-modules += Git
-modules += Git/I18N
-modules += Git/IndexInfo
-modules += Git/Packet
-modules += Git/SVN
-modules += Git/SVN/Memoize/YAML
-modules += Git/SVN/Fetcher
-modules += Git/SVN/Editor
-modules += Git/SVN/GlobSpec
-modules += Git/SVN/Log
-modules += Git/SVN/Migration
-modules += Git/SVN/Prompt
-modules += Git/SVN/Ra
-modules += Git/SVN/Utils
-
-$(makfile): ../GIT-CFLAGS Makefile
- echo all: private-Error.pm Git.pm Git/I18N.pm > $@
- set -e; \
- for i in $(modules); \
- do \
- if test $$i = $${i%/*}; \
- then \
- subdir=; \
- else \
- subdir=/$${i%/*}; \
- fi; \
- echo ' $(RM) blib/lib/'$$i'.pm' >> $@; \
- echo ' mkdir -p blib/lib'$$subdir >> $@; \
- echo ' cp '$$i'.pm blib/lib/'$$i'.pm' >> $@; \
- done
- echo ' $(RM) blib/lib/Error.pm' >> $@
- '$(PERL_PATH_SQ)' -MError -e 'exit($$Error::VERSION < 0.15009)' || \
- echo ' cp private-Error.pm blib/lib/Error.pm' >> $@
- echo install: >> $@
- set -e; \
- for i in $(modules); \
- do \
- if test $$i = $${i%/*}; \
- then \
- subdir=; \
- else \
- subdir=/$${i%/*}; \
- fi; \
- echo ' $(RM) "$$(DESTDIR)$(instdir_SQ)/'$$i'.pm"' >> $@; \
- echo ' mkdir -p "$$(DESTDIR)$(instdir_SQ)'$$subdir'"' >> $@; \
- echo ' cp '$$i'.pm "$$(DESTDIR)$(instdir_SQ)/'$$i'.pm"' >> $@; \
- done
- echo ' $(RM) "$$(DESTDIR)$(instdir_SQ)/Error.pm"' >> $@
- '$(PERL_PATH_SQ)' -MError -e 'exit($$Error::VERSION < 0.15009)' || \
- echo ' cp private-Error.pm "$$(DESTDIR)$(instdir_SQ)/Error.pm"' >> $@
- echo instlibdir: >> $@
- echo ' echo $(instdir_SQ)' >> $@
-else
-$(makfile): Makefile.PL ../GIT-CFLAGS
- $(PERL_PATH) $< PREFIX='$(prefix_SQ)' INSTALL_BASE='' --localedir='$(localedir_SQ)'
-endif
-
-# this is just added comfort for calling make directly in perl dir
-# (even though GIT-CFLAGS aren't used yet. If ever)
-../GIT-CFLAGS:
- $(MAKE) -C .. GIT-CFLAGS
diff --git a/perl/Makefile.PL b/perl/Makefile.PL
deleted file mode 100644
index 3f29ba9..0000000
--- a/perl/Makefile.PL
+++ /dev/null
@@ -1,62 +0,0 @@
-use strict;
-use warnings;
-use ExtUtils::MakeMaker;
-use Getopt::Long;
-use File::Find;
-
-# Don't forget to update the perl/Makefile, too.
-# Don't forget to test with NO_PERL_MAKEMAKER=YesPlease
-
-# Sanity: die at first unknown option
-Getopt::Long::Configure qw/ pass_through /;
-
-my $localedir = '';
-GetOptions("localedir=s" => \$localedir);
-
-sub MY::postamble {
- return <<'MAKE_FRAG';
-instlibdir:
- @echo '$(INSTALLSITELIB)'
-
-ifneq (,$(DESTDIR))
-ifeq (0,$(shell expr '$(MM_VERSION)' '>' 6.10))
-$(error ExtUtils::MakeMaker version "$(MM_VERSION)" is older than 6.11 and so \
- is likely incompatible with the DESTDIR mechanism. Try setting \
- NO_PERL_MAKEMAKER=1 instead)
-endif
-endif
-
-MAKE_FRAG
-}
-
-# Find all the .pm files in "Git/" and Git.pm
-my %pm;
-find sub {
- return unless /\.pm$/;
-
- # sometimes File::Find prepends a ./ Strip it.
- my $pm_path = $File::Find::name;
- $pm_path =~ s{^\./}{};
-
- $pm{$pm_path} = '$(INST_LIBDIR)/'.$pm_path;
-}, "Git", "Git.pm";
-
-
-# We come with our own bundled Error.pm. It's not in the set of default
-# Perl modules so install it if it's not available on the system yet.
-if ( !eval { require Error } || $Error::VERSION < 0.15009) {
- $pm{'private-Error.pm'} = '$(INST_LIBDIR)/Error.pm';
-}
-
-# redirect stdout, otherwise the message "Writing perl.mak for Git"
-# disrupts the output for the target 'instlibdir'
-open STDOUT, ">&STDERR";
-
-WriteMakefile(
- NAME => 'Git',
- VERSION_FROM => 'Git.pm',
- PM => \%pm,
- PM_FILTER => qq[\$(PERL) -pe "s<\\Q++LOCALEDIR++\\E><$localedir>"],
- MAKEFILE => 'perl.mak',
- INSTALLSITEMAN3DIR => '$(SITEPREFIX)/share/man/man3'
-);
diff --git a/preload-index.c b/preload-index.c
index 2a83255..4d08d44 100644
--- a/preload-index.c
+++ b/preload-index.c
@@ -78,6 +78,7 @@ static void preload_index(struct index_state *index,
{
int threads, i, work, offset;
struct thread_data data[MAX_PARALLEL];
+ uint64_t start = getnanotime();
if (!core_preload_index)
return;
@@ -108,6 +109,7 @@ static void preload_index(struct index_state *index,
if (pthread_join(p->pthread, NULL))
die("unable to join threaded lstat");
}
+ trace_performance_since(start, "preload index");
}
#endif
diff --git a/quote.c b/quote.c
index de2922d..37d2686 100644
--- a/quote.c
+++ b/quote.c
@@ -43,6 +43,22 @@ void sq_quote_buf(struct strbuf *dst, const char *src)
free(to_free);
}
+void sq_quote_buf_pretty(struct strbuf *dst, const char *src)
+{
+ static const char ok_punct[] = "+,-./:=@_^";
+ const char *p;
+
+ for (p = src; *p; p++) {
+ if (!isalpha(*p) && !isdigit(*p) && !strchr(ok_punct, *p)) {
+ sq_quote_buf(dst, src);
+ return;
+ }
+ }
+
+ /* if we get here, we did not need quoting */
+ strbuf_addstr(dst, src);
+}
+
void sq_quotef(struct strbuf *dst, const char *fmt, ...)
{
struct strbuf src = STRBUF_INIT;
@@ -56,7 +72,7 @@ void sq_quotef(struct strbuf *dst, const char *fmt, ...)
strbuf_release(&src);
}
-void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen)
+void sq_quote_argv(struct strbuf *dst, const char **argv)
{
int i;
@@ -65,8 +81,16 @@ void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen)
for (i = 0; argv[i]; ++i) {
strbuf_addch(dst, ' ');
sq_quote_buf(dst, argv[i]);
- if (maxlen && dst->len > maxlen)
- die("Too many or long arguments");
+ }
+}
+
+void sq_quote_argv_pretty(struct strbuf *dst, const char **argv)
+{
+ int i;
+
+ for (i = 0; argv[i]; i++) {
+ strbuf_addch(dst, ' ');
+ sq_quote_buf_pretty(dst, argv[i]);
}
}
diff --git a/quote.h b/quote.h
index 66f5644..ea992dc 100644
--- a/quote.h
+++ b/quote.h
@@ -30,9 +30,17 @@ struct strbuf;
*/
extern void sq_quote_buf(struct strbuf *, const char *src);
-extern void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen);
+extern void sq_quote_argv(struct strbuf *, const char **argv);
extern void sq_quotef(struct strbuf *, const char *fmt, ...);
+/*
+ * These match their non-pretty variants, except that they avoid
+ * quoting when there are no exotic characters. These should only be used for
+ * human-readable output, as sq_dequote() is not smart enough to dequote it.
+ */
+void sq_quote_buf_pretty(struct strbuf *, const char *src);
+void sq_quote_argv_pretty(struct strbuf *, const char **argv);
+
/* This unwraps what sq_quote() produces in place, but returns
* NULL if the input does not look like what sq_quote would have
* produced.
diff --git a/read-cache.c b/read-cache.c
index b22668b..977921d 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -631,10 +631,10 @@ static struct cache_entry *create_alias_ce(struct index_state *istate,
void set_object_name_for_intent_to_add_entry(struct cache_entry *ce)
{
- unsigned char sha1[20];
- if (write_sha1_file("", 0, blob_type, sha1))
+ struct object_id oid;
+ if (write_object_file("", 0, blob_type, &oid))
die("cannot create an empty blob in the object database");
- hashcpy(ce->oid.hash, sha1);
+ oidcpy(&ce->oid, &oid);
}
int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags)
@@ -1217,9 +1217,8 @@ int add_index_entry(struct index_state *istate, struct cache_entry *ce, int opti
/* Add it in.. */
istate->cache_nr++;
if (istate->cache_nr > pos + 1)
- memmove(istate->cache + pos + 1,
- istate->cache + pos,
- (istate->cache_nr - pos - 1) * sizeof(ce));
+ MOVE_ARRAY(istate->cache + pos + 1, istate->cache + pos,
+ istate->cache_nr - pos - 1);
set_index_entry(istate, pos, ce);
istate->cache_changed |= CE_ENTRY_ADDED;
return 0;
@@ -1372,6 +1371,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
const char *typechange_fmt;
const char *added_fmt;
const char *unmerged_fmt;
+ uint64_t start = getnanotime();
modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n");
deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n");
@@ -1442,6 +1442,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
replace_index_entry(istate, i, new_entry);
}
+ trace_performance_since(start, "refresh index");
return has_errors;
}
@@ -1545,8 +1546,8 @@ int verify_ce_order;
static int verify_hdr(struct cache_header *hdr, unsigned long size)
{
- git_SHA_CTX c;
- unsigned char sha1[20];
+ git_hash_ctx c;
+ unsigned char hash[GIT_MAX_RAWSZ];
int hdr_version;
if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
@@ -1558,10 +1559,10 @@ static int verify_hdr(struct cache_header *hdr, unsigned long size)
if (!verify_index_checksum)
return 0;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, size - 20);
- git_SHA1_Final(sha1, &c);
- if (hashcmp(sha1, (unsigned char *)hdr + size - 20))
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
+ the_hash_algo->final_fn(hash, &c);
+ if (hashcmp(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
return error("bad index file sha1 signature");
return 0;
}
@@ -1603,7 +1604,7 @@ int hold_locked_index(struct lock_file *lk, int lock_flags)
int read_index(struct index_state *istate)
{
- return read_index_from(istate, get_index_file());
+ return read_index_from(istate, get_index_file(), get_git_dir());
}
static struct cache_entry *cache_entry_from_ondisk(struct ondisk_cache_entry *ondisk,
@@ -1791,7 +1792,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
die_errno("cannot stat the open index");
mmap_size = xsize_t(st.st_size);
- if (mmap_size < sizeof(struct cache_header) + 20)
+ if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
die("index file smaller than expected");
mmap = xmmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);
@@ -1803,7 +1804,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
if (verify_hdr(hdr, mmap_size) < 0)
goto unmap;
- hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - 20);
+ hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz);
istate->version = ntohl(hdr->hdr_version);
istate->cache_nr = ntohl(hdr->hdr_entries);
istate->cache_alloc = alloc_nr(istate->cache_nr);
@@ -1831,7 +1832,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
istate->timestamp.sec = st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
- while (src_offset <= mmap_size - 20 - 8) {
+ while (src_offset <= mmap_size - the_hash_algo->rawsz - 8) {
/* After an array of active_nr index entries,
* there can be arbitrary number of extended
* sections, each of which is prefixed with
@@ -1863,26 +1864,27 @@ unmap:
* This way, shared index can be removed if they have not been used
* for some time.
*/
-static void freshen_shared_index(char *base_sha1_hex, int warn)
+static void freshen_shared_index(const char *shared_index, int warn)
{
- char *shared_index = git_pathdup("sharedindex.%s", base_sha1_hex);
if (!check_and_freshen_file(shared_index, 1) && warn)
warning("could not freshen shared index '%s'", shared_index);
- free(shared_index);
}
-int read_index_from(struct index_state *istate, const char *path)
+int read_index_from(struct index_state *istate, const char *path,
+ const char *gitdir)
{
+ uint64_t start = getnanotime();
struct split_index *split_index;
int ret;
char *base_sha1_hex;
- const char *base_path;
+ char *base_path;
/* istate->initialized covers both .git/index and .git/sharedindex.xxx */
if (istate->initialized)
return istate->cache_nr;
ret = do_read_index(istate, path, 0);
+ trace_performance_since(start, "read cache %s", path);
split_index = istate->split_index;
if (!split_index || is_null_sha1(split_index->base_sha1)) {
@@ -1896,16 +1898,18 @@ int read_index_from(struct index_state *istate, const char *path)
split_index->base = xcalloc(1, sizeof(*split_index->base));
base_sha1_hex = sha1_to_hex(split_index->base_sha1);
- base_path = git_path("sharedindex.%s", base_sha1_hex);
+ base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_sha1_hex);
ret = do_read_index(split_index->base, base_path, 1);
if (hashcmp(split_index->base_sha1, split_index->base->sha1))
die("broken index, expect %s in %s, got %s",
base_sha1_hex, base_path,
sha1_to_hex(split_index->base->sha1));
- freshen_shared_index(base_sha1_hex, 0);
+ freshen_shared_index(base_path, 0);
merge_base_index(istate);
post_read_index_from(istate);
+ trace_performance_since(start, "read cache %s", base_path);
+ free(base_path);
return ret;
}
@@ -1957,11 +1961,11 @@ int unmerged_index(const struct index_state *istate)
static unsigned char write_buffer[WRITE_BUFFER_SIZE];
static unsigned long write_buffer_len;
-static int ce_write_flush(git_SHA_CTX *context, int fd)
+static int ce_write_flush(git_hash_ctx *context, int fd)
{
unsigned int buffered = write_buffer_len;
if (buffered) {
- git_SHA1_Update(context, write_buffer, buffered);
+ the_hash_algo->update_fn(context, write_buffer, buffered);
if (write_in_full(fd, write_buffer, buffered) < 0)
return -1;
write_buffer_len = 0;
@@ -1969,7 +1973,7 @@ static int ce_write_flush(git_SHA_CTX *context, int fd)
return 0;
}
-static int ce_write(git_SHA_CTX *context, int fd, void *data, unsigned int len)
+static int ce_write(git_hash_ctx *context, int fd, void *data, unsigned int len)
{
while (len) {
unsigned int buffered = write_buffer_len;
@@ -1991,7 +1995,7 @@ static int ce_write(git_SHA_CTX *context, int fd, void *data, unsigned int len)
return 0;
}
-static int write_index_ext_header(git_SHA_CTX *context, int fd,
+static int write_index_ext_header(git_hash_ctx *context, int fd,
unsigned int ext, unsigned int sz)
{
ext = htonl(ext);
@@ -2000,26 +2004,26 @@ static int write_index_ext_header(git_SHA_CTX *context, int fd,
(ce_write(context, fd, &sz, 4) < 0)) ? -1 : 0;
}
-static int ce_flush(git_SHA_CTX *context, int fd, unsigned char *sha1)
+static int ce_flush(git_hash_ctx *context, int fd, unsigned char *hash)
{
unsigned int left = write_buffer_len;
if (left) {
write_buffer_len = 0;
- git_SHA1_Update(context, write_buffer, left);
+ the_hash_algo->update_fn(context, write_buffer, left);
}
- /* Flush first if not enough space for SHA1 signature */
- if (left + 20 > WRITE_BUFFER_SIZE) {
+ /* Flush first if not enough space for hash signature */
+ if (left + the_hash_algo->rawsz > WRITE_BUFFER_SIZE) {
if (write_in_full(fd, write_buffer, left) < 0)
return -1;
left = 0;
}
- /* Append the SHA1 signature at the end */
- git_SHA1_Final(write_buffer + left, context);
- hashcpy(sha1, write_buffer + left);
- left += 20;
+ /* Append the hash signature at the end */
+ the_hash_algo->final_fn(write_buffer + left, context);
+ hashcpy(hash, write_buffer + left);
+ left += the_hash_algo->rawsz;
return (write_in_full(fd, write_buffer, left) < 0) ? -1 : 0;
}
@@ -2100,7 +2104,7 @@ static void copy_cache_entry_to_ondisk(struct ondisk_cache_entry *ondisk,
}
}
-static int ce_write_entry(git_SHA_CTX *c, int fd, struct cache_entry *ce,
+static int ce_write_entry(git_hash_ctx *c, int fd, struct cache_entry *ce,
struct strbuf *previous_name, struct ondisk_cache_entry *ondisk)
{
int size;
@@ -2167,7 +2171,7 @@ static int verify_index_from(const struct index_state *istate, const char *path)
int fd;
ssize_t n;
struct stat st;
- unsigned char sha1[20];
+ unsigned char hash[GIT_MAX_RAWSZ];
if (!istate->initialized)
return 0;
@@ -2179,14 +2183,14 @@ static int verify_index_from(const struct index_state *istate, const char *path)
if (fstat(fd, &st))
goto out;
- if (st.st_size < sizeof(struct cache_header) + 20)
+ if (st.st_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
goto out;
- n = pread_in_full(fd, sha1, 20, st.st_size - 20);
- if (n != 20)
+ n = pread_in_full(fd, hash, the_hash_algo->rawsz, st.st_size - the_hash_algo->rawsz);
+ if (n != the_hash_algo->rawsz)
goto out;
- if (hashcmp(istate->sha1, sha1))
+ if (hashcmp(istate->sha1, hash))
goto out;
close(fd);
@@ -2234,8 +2238,9 @@ void update_index_if_able(struct index_state *istate, struct lock_file *lockfile
static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
int strip_extensions)
{
+ uint64_t start = getnanotime();
int newfd = tempfile->fd;
- git_SHA_CTX c;
+ git_hash_ctx c;
struct cache_header hdr;
int i, err = 0, removed, extended, hdr_version;
struct cache_entry **cache = istate->cache;
@@ -2243,7 +2248,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
struct stat st;
struct ondisk_cache_entry_extended ondisk;
struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
- int drop_cache_tree = 0;
+ int drop_cache_tree = istate->drop_cache_tree;
for (i = removed = extended = 0; i < entries; i++) {
if (cache[i]->ce_flags & CE_REMOVE)
@@ -2273,7 +2278,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
hdr.hdr_version = htonl(hdr_version);
hdr.hdr_entries = htonl(entries - removed);
- git_SHA1_Init(&c);
+ the_hash_algo->init_fn(&c);
if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0)
return -1;
@@ -2374,6 +2379,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
return -1;
istate->timestamp.sec = (unsigned int)st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
+ trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
return 0;
}
@@ -2472,32 +2478,21 @@ static int clean_shared_index_files(const char *current_hex)
}
static int write_shared_index(struct index_state *istate,
- struct lock_file *lock, unsigned flags)
+ struct tempfile **temp)
{
- struct tempfile *temp;
struct split_index *si = istate->split_index;
int ret;
- temp = mks_tempfile(git_path("sharedindex_XXXXXX"));
- if (!temp) {
- hashclr(si->base_sha1);
- return do_write_locked_index(istate, lock, flags);
- }
move_cache_to_base_index(istate);
- ret = do_write_index(si->base, temp, 1);
- if (ret) {
- delete_tempfile(&temp);
+ ret = do_write_index(si->base, *temp, 1);
+ if (ret)
return ret;
- }
- ret = adjust_shared_perm(get_tempfile_path(temp));
+ ret = adjust_shared_perm(get_tempfile_path(*temp));
if (ret) {
- int save_errno = errno;
- error("cannot fix permission bits on %s", get_tempfile_path(temp));
- delete_tempfile(&temp);
- errno = save_errno;
+ error("cannot fix permission bits on %s", get_tempfile_path(*temp));
return ret;
}
- ret = rename_tempfile(&temp,
+ ret = rename_tempfile(temp,
git_path("sharedindex.%s", sha1_to_hex(si->base->sha1)));
if (!ret) {
hashcpy(si->base_sha1, si->base->sha1);
@@ -2565,7 +2560,22 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock,
new_shared_index = istate->cache_changed & SPLIT_INDEX_ORDERED;
if (new_shared_index) {
- ret = write_shared_index(istate, lock, flags);
+ struct tempfile *temp;
+ int saved_errno;
+
+ temp = mks_tempfile(git_path("sharedindex_XXXXXX"));
+ if (!temp) {
+ hashclr(si->base_sha1);
+ ret = do_write_locked_index(istate, lock, flags);
+ goto out;
+ }
+ ret = write_shared_index(istate, &temp);
+
+ saved_errno = errno;
+ if (is_tempfile_active(temp))
+ delete_tempfile(&temp);
+ errno = saved_errno;
+
if (ret)
goto out;
}
@@ -2573,8 +2583,11 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock,
ret = write_split_index(istate, lock, flags);
/* Freshen the shared index only if the split-index was written */
- if (!ret && !new_shared_index)
- freshen_shared_index(sha1_to_hex(si->base_sha1), 1);
+ if (!ret && !new_shared_index) {
+ const char *shared_index = git_path("sharedindex.%s",
+ sha1_to_hex(si->base_sha1));
+ freshen_shared_index(shared_index, 1);
+ }
out:
if (flags & COMMIT_LOCK)
diff --git a/refs/files-backend.c b/refs/files-backend.c
index f75d960..bec8e30 100644
--- a/refs/files-backend.c
+++ b/refs/files-backend.c
@@ -2931,13 +2931,12 @@ static int files_initial_transaction_commit(struct ref_store *ref_store,
if (initial_ref_transaction_commit(packed_transaction, err)) {
ret = TRANSACTION_GENERIC_ERROR;
- goto cleanup;
}
+ packed_refs_unlock(refs->packed_ref_store);
cleanup:
if (packed_transaction)
ref_transaction_free(packed_transaction);
- packed_refs_unlock(refs->packed_ref_store);
transaction->state = REF_TRANSACTION_CLOSED;
string_list_clear(&affected_refnames, 0);
return ret;
diff --git a/refs/packed-backend.c b/refs/packed-backend.c
index 023243f..65288c6 100644
--- a/refs/packed-backend.c
+++ b/refs/packed-backend.c
@@ -68,17 +68,21 @@ struct snapshot {
int mmapped;
/*
- * The contents of the `packed-refs` file. If the file was
- * already sorted, this points at the mmapped contents of the
- * file. If not, this points at heap-allocated memory
- * containing the contents, sorted. If there were no contents
- * (e.g., because the file didn't exist), `buf` and `eof` are
- * both NULL.
+ * The contents of the `packed-refs` file:
+ *
+ * - buf -- a pointer to the start of the memory
+ * - start -- a pointer to the first byte of actual references
+ * (i.e., after the header line, if one is present)
+ * - eof -- a pointer just past the end of the reference
+ * contents
+ *
+ * If the `packed-refs` file was already sorted, `buf` points
+ * at the mmapped contents of the file. If not, it points at
+ * heap-allocated memory containing the contents, sorted. If
+ * there were no contents (e.g., because the file didn't
+ * exist), `buf`, `start`, and `eof` are all NULL.
*/
- char *buf, *eof;
-
- /* The size of the header line, if any; otherwise, 0: */
- size_t header_len;
+ char *buf, *start, *eof;
/*
* What is the peeled state of the `packed-refs` file that
@@ -169,8 +173,7 @@ static void clear_snapshot_buffer(struct snapshot *snapshot)
} else {
free(snapshot->buf);
}
- snapshot->buf = snapshot->eof = NULL;
- snapshot->header_len = 0;
+ snapshot->buf = snapshot->start = snapshot->eof = NULL;
}
/*
@@ -319,13 +322,14 @@ static void sort_snapshot(struct snapshot *snapshot)
size_t len, i;
char *new_buffer, *dst;
- pos = snapshot->buf + snapshot->header_len;
+ pos = snapshot->start;
eof = snapshot->eof;
- len = eof - pos;
- if (!len)
+ if (pos == eof)
return;
+ len = eof - pos;
+
/*
* Initialize records based on a crude estimate of the number
* of references in the file (we'll grow it below if needed):
@@ -391,9 +395,8 @@ static void sort_snapshot(struct snapshot *snapshot)
* place:
*/
clear_snapshot_buffer(snapshot);
- snapshot->buf = new_buffer;
+ snapshot->buf = snapshot->start = new_buffer;
snapshot->eof = new_buffer + len;
- snapshot->header_len = 0;
cleanup:
free(records);
@@ -442,23 +445,26 @@ static const char *find_end_of_record(const char *p, const char *end)
*/
static void verify_buffer_safe(struct snapshot *snapshot)
{
- const char *buf = snapshot->buf + snapshot->header_len;
+ const char *start = snapshot->start;
const char *eof = snapshot->eof;
const char *last_line;
- if (buf == eof)
+ if (start == eof)
return;
- last_line = find_start_of_record(buf, eof - 1);
+ last_line = find_start_of_record(start, eof - 1);
if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2)
die_invalid_line(snapshot->refs->path,
last_line, eof - last_line);
}
+#define SMALL_FILE_SIZE (32*1024)
+
/*
* Depending on `mmap_strategy`, either mmap or read the contents of
* the `packed-refs` file into the snapshot. Return 1 if the file
- * existed and was read, or 0 if the file was absent. Die on errors.
+ * existed and was read, or 0 if the file was absent or empty. Die on
+ * errors.
*/
static int load_contents(struct snapshot *snapshot)
{
@@ -489,24 +495,23 @@ static int load_contents(struct snapshot *snapshot)
die_errno("couldn't stat %s", snapshot->refs->path);
size = xsize_t(st.st_size);
- switch (mmap_strategy) {
- case MMAP_NONE:
+ if (!size) {
+ return 0;
+ } else if (mmap_strategy == MMAP_NONE || size <= SMALL_FILE_SIZE) {
snapshot->buf = xmalloc(size);
bytes_read = read_in_full(fd, snapshot->buf, size);
if (bytes_read < 0 || bytes_read != size)
die_errno("couldn't read %s", snapshot->refs->path);
- snapshot->eof = snapshot->buf + size;
snapshot->mmapped = 0;
- break;
- case MMAP_TEMPORARY:
- case MMAP_OK:
+ } else {
snapshot->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
- snapshot->eof = snapshot->buf + size;
snapshot->mmapped = 1;
- break;
}
close(fd);
+ snapshot->start = snapshot->buf;
+ snapshot->eof = snapshot->buf + size;
+
return 1;
}
@@ -515,9 +520,11 @@ static int load_contents(struct snapshot *snapshot)
* `refname` starts. If `mustexist` is true and the reference doesn't
* exist, then return NULL. If `mustexist` is false and the reference
* doesn't exist, then return the point where that reference would be
- * inserted. In the latter mode, `refname` doesn't have to be a proper
- * reference name; for example, one could search for "refs/replace/"
- * to find the start of any replace references.
+ * inserted, or `snapshot->eof` (which might be NULL) if it would be
+ * inserted at the end of the file. In the latter mode, `refname`
+ * doesn't have to be a proper reference name; for example, one could
+ * search for "refs/replace/" to find the start of any replace
+ * references.
*
* The record is sought using a binary search, so `snapshot->buf` must
* be sorted.
@@ -539,7 +546,7 @@ static const char *find_reference_location(struct snapshot *snapshot,
* preceding records all have reference names that come
* *before* `refname`.
*/
- const char *lo = snapshot->buf + snapshot->header_len;
+ const char *lo = snapshot->start;
/*
* A pointer to a the first character of a record whose
@@ -547,7 +554,7 @@ static const char *find_reference_location(struct snapshot *snapshot,
*/
const char *hi = snapshot->eof;
- while (lo < hi) {
+ while (lo != hi) {
const char *mid, *rec;
int cmp;
@@ -616,9 +623,7 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs)
/* If the file has a header line, process it: */
if (snapshot->buf < snapshot->eof && *snapshot->buf == '#') {
- struct strbuf tmp = STRBUF_INIT;
- char *p;
- const char *eol;
+ char *tmp, *p, *eol;
struct string_list traits = STRING_LIST_INIT_NODUP;
eol = memchr(snapshot->buf, '\n',
@@ -628,9 +633,9 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs)
snapshot->buf,
snapshot->eof - snapshot->buf);
- strbuf_add(&tmp, snapshot->buf, eol - snapshot->buf);
+ tmp = xmemdupz(snapshot->buf, eol - snapshot->buf);
- if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p))
+ if (!skip_prefix(tmp, "# pack-refs with:", (const char **)&p))
die_invalid_line(refs->path,
snapshot->buf,
snapshot->eof - snapshot->buf);
@@ -647,10 +652,10 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs)
/* perhaps other traits later as well */
/* The "+ 1" is for the LF character. */
- snapshot->header_len = eol + 1 - snapshot->buf;
+ snapshot->start = eol + 1;
string_list_clear(&traits, 0);
- strbuf_release(&tmp);
+ free(tmp);
}
verify_buffer_safe(snapshot);
@@ -671,13 +676,12 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs)
* We don't want to leave the file mmapped, so we are
* forced to make a copy now:
*/
- size_t size = snapshot->eof -
- (snapshot->buf + snapshot->header_len);
+ size_t size = snapshot->eof - snapshot->start;
char *buf_copy = xmalloc(size);
- memcpy(buf_copy, snapshot->buf + snapshot->header_len, size);
+ memcpy(buf_copy, snapshot->start, size);
clear_snapshot_buffer(snapshot);
- snapshot->buf = buf_copy;
+ snapshot->buf = snapshot->start = buf_copy;
snapshot->eof = buf_copy + size;
}
@@ -924,7 +928,12 @@ static struct ref_iterator *packed_ref_iterator_begin(
*/
snapshot = get_snapshot(refs);
- if (!snapshot->buf)
+ if (prefix && *prefix)
+ start = find_reference_location(snapshot, prefix, 0);
+ else
+ start = snapshot->start;
+
+ if (start == snapshot->eof)
return empty_ref_iterator_begin();
iter = xcalloc(1, sizeof(*iter));
@@ -934,11 +943,6 @@ static struct ref_iterator *packed_ref_iterator_begin(
iter->snapshot = snapshot;
acquire_snapshot(snapshot);
- if (prefix && *prefix)
- start = find_reference_location(snapshot, prefix, 0);
- else
- start = snapshot->buf + snapshot->header_len;
-
iter->pos = start;
iter->eof = snapshot->eof;
strbuf_init(&iter->refname_buf, 0);
diff --git a/refs/ref-cache.c b/refs/ref-cache.c
index 82c1cf9..e90bd3e 100644
--- a/refs/ref-cache.c
+++ b/refs/ref-cache.c
@@ -238,10 +238,8 @@ int remove_entry_from_dir(struct ref_dir *dir, const char *refname)
return -1;
entry = dir->entries[entry_index];
- memmove(&dir->entries[entry_index],
- &dir->entries[entry_index + 1],
- (dir->nr - entry_index - 1) * sizeof(*dir->entries)
- );
+ MOVE_ARRAY(&dir->entries[entry_index],
+ &dir->entries[entry_index + 1], dir->nr - entry_index - 1);
dir->nr--;
if (dir->sorted > entry_index)
dir->sorted--;
diff --git a/remote-curl.c b/remote-curl.c
index 0053b09..6ec5352 100644
--- a/remote-curl.c
+++ b/remote-curl.c
@@ -24,6 +24,7 @@ struct options {
char *deepen_since;
struct string_list deepen_not;
struct string_list push_options;
+ char *filter;
unsigned progress : 1,
check_self_contained_and_connected : 1,
cloning : 1,
@@ -33,7 +34,9 @@ struct options {
thin : 1,
/* One of the SEND_PACK_PUSH_CERT_* constants. */
push_cert : 2,
- deepen_relative : 1;
+ deepen_relative : 1,
+ from_promisor : 1,
+ no_dependents : 1;
};
static struct options options;
static struct string_list cas_options = STRING_LIST_INIT_DUP;
@@ -157,6 +160,15 @@ static int set_option(const char *name, const char *value)
return -1;
return 0;
#endif /* LIBCURL_VERSION_NUM >= 0x070a08 */
+ } else if (!strcmp(name, "from-promisor")) {
+ options.from_promisor = 1;
+ return 0;
+ } else if (!strcmp(name, "no-dependents")) {
+ options.no_dependents = 1;
+ return 0;
+ } else if (!strcmp(name, "filter")) {
+ options.filter = xstrdup(value);;
+ return 0;
} else {
return 1 /* unsupported */;
}
@@ -822,6 +834,12 @@ static int fetch_git(struct discovery *heads,
options.deepen_not.items[i].string);
if (options.deepen_relative && options.depth)
argv_array_push(&args, "--deepen-relative");
+ if (options.from_promisor)
+ argv_array_push(&args, "--from-promisor");
+ if (options.no_dependents)
+ argv_array_push(&args, "--no-dependents");
+ if (options.filter)
+ argv_array_pushf(&args, "--filter=%s", options.filter);
argv_array_push(&args, url.buf);
for (i = 0; i < nr_heads; i++) {
diff --git a/replace_object.c b/replace_object.c
index f0b39f0..3e49965 100644
--- a/replace_object.c
+++ b/replace_object.c
@@ -44,10 +44,8 @@ static int register_replace_object(struct replace_object *replace,
ALLOC_GROW(replace_object, replace_object_nr + 1, replace_object_alloc);
replace_object_nr++;
if (pos < replace_object_nr)
- memmove(replace_object + pos + 1,
- replace_object + pos,
- (replace_object_nr - pos - 1) *
- sizeof(*replace_object));
+ MOVE_ARRAY(replace_object + pos + 1, replace_object + pos,
+ replace_object_nr - pos - 1);
replace_object[pos] = replace;
return 0;
}
diff --git a/repository.c b/repository.c
index f66fcb1..4ffbe9b 100644
--- a/repository.c
+++ b/repository.c
@@ -236,5 +236,5 @@ int repo_read_index(struct repository *repo)
if (!repo->index)
repo->index = xcalloc(1, sizeof(*repo->index));
- return read_index_from(repo->index, repo->index_file);
+ return read_index_from(repo->index, repo->index_file, repo->gitdir);
}
diff --git a/rerere.c b/rerere.c
index 1ce440f..79203c6 100644
--- a/rerere.c
+++ b/rerere.c
@@ -159,8 +159,8 @@ static struct rerere_dir *find_rerere_dir(const char *hex)
ALLOC_GROW(rerere_dir, rerere_dir_nr + 1, rerere_dir_alloc);
/* ... and add it in. */
rerere_dir_nr++;
- memmove(rerere_dir + pos + 1, rerere_dir + pos,
- (rerere_dir_nr - pos - 1) * sizeof(*rerere_dir));
+ MOVE_ARRAY(rerere_dir + pos + 1, rerere_dir + pos,
+ rerere_dir_nr - pos - 1);
rerere_dir[pos] = rr_dir;
scan_rerere_dir(rr_dir);
}
diff --git a/revision.c b/revision.c
index a60628f..5c1cb72 100644
--- a/revision.c
+++ b/revision.c
@@ -198,6 +198,8 @@ static struct object *get_reference(struct rev_info *revs, const char *name,
if (!object) {
if (revs->ignore_missing)
return object;
+ if (revs->exclude_promisor_objects && is_promisor_object(oid))
+ return NULL;
die("bad object %s", name);
}
object->flags |= flags;
@@ -799,9 +801,17 @@ static int add_parents_to_list(struct rev_info *revs, struct commit *commit,
for (parent = commit->parents; parent; parent = parent->next) {
struct commit *p = parent->item;
-
- if (parse_commit_gently(p, revs->ignore_missing_links) < 0)
+ int gently = revs->ignore_missing_links ||
+ revs->exclude_promisor_objects;
+ if (parse_commit_gently(p, gently) < 0) {
+ if (revs->exclude_promisor_objects &&
+ is_promisor_object(&p->object.oid)) {
+ if (revs->first_parent_only)
+ break;
+ continue;
+ }
return -1;
+ }
if (revs->show_source && !p->util)
p->util = commit->util;
p->object.flags |= left_flag;
@@ -1055,14 +1065,9 @@ static int limit_list(struct rev_info *revs)
return -1;
if (obj->flags & UNINTERESTING) {
mark_parents_uninteresting(commit);
- if (revs->show_all)
- p = &commit_list_insert(commit, p)->next;
slop = still_interesting(list, date, slop, &interesting_cache);
if (slop)
continue;
- /* If showing all, add the whole pending list to the end */
- if (revs->show_all)
- *p = list;
break;
}
if (revs->min_age != -1 && (commit->date > revs->min_age))
@@ -1352,7 +1357,8 @@ void add_index_objects_to_pending(struct rev_info *revs, unsigned int flags)
continue; /* current index already taken care of */
if (read_index_from(&istate,
- worktree_git_path(wt, "index")) > 0)
+ worktree_git_path(wt, "index"),
+ get_worktree_git_dir(wt)) > 0)
do_add_index_objects_to_pending(revs, &istate);
discard_index(&istate);
}
@@ -1853,8 +1859,6 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg
revs->dense = 1;
} else if (!strcmp(arg, "--sparse")) {
revs->dense = 0;
- } else if (!strcmp(arg, "--show-all")) {
- revs->show_all = 1;
} else if (!strcmp(arg, "--in-commit-order")) {
revs->tree_blobs_in_commit_order = 1;
} else if (!strcmp(arg, "--remove-empty")) {
@@ -2100,6 +2104,10 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg
revs->limited = 1;
} else if (!strcmp(arg, "--ignore-missing")) {
revs->ignore_missing = 1;
+ } else if (!strcmp(arg, "--exclude-promisor-objects")) {
+ if (fetch_if_missing)
+ die("BUG: exclude_promisor_objects can only be used when fetch_if_missing is 0");
+ revs->exclude_promisor_objects = 1;
} else {
int opts = diff_opt_parse(&revs->diffopt, argv, argc, revs->prefix);
if (!opts)
@@ -2845,6 +2853,16 @@ void reset_revision_walk(void)
clear_object_flags(SEEN | ADDED | SHOWN);
}
+static int mark_uninteresting(const struct object_id *oid,
+ struct packed_git *pack,
+ uint32_t pos,
+ void *unused)
+{
+ struct object *o = parse_object(oid);
+ o->flags |= UNINTERESTING | SEEN;
+ return 0;
+}
+
int prepare_revision_walk(struct rev_info *revs)
{
int i;
@@ -2872,6 +2890,11 @@ int prepare_revision_walk(struct rev_info *revs)
(revs->limited && limiting_can_increase_treesame(revs)))
revs->treesame.name = "treesame";
+ if (revs->exclude_promisor_objects) {
+ for_each_packed_object(mark_uninteresting, NULL,
+ FOR_EACH_OBJECT_PROMISOR_ONLY);
+ }
+
if (revs->no_walk != REVISION_WALK_NO_WALK_UNSORTED)
commit_list_sort_by_date(&revs->commits);
if (revs->no_walk)
@@ -3064,8 +3087,6 @@ enum commit_action get_commit_action(struct rev_info *revs, struct commit *commi
return commit_ignore;
if (revs->unpacked && has_sha1_pack(commit->object.oid.hash))
return commit_ignore;
- if (revs->show_all)
- return commit_show;
if (commit->object.flags & UNINTERESTING)
return commit_ignore;
if (revs->min_age != -1 &&
@@ -3164,7 +3185,6 @@ enum commit_action simplify_commit(struct rev_info *revs, struct commit *commit)
enum commit_action action = get_commit_action(revs, commit);
if (action == commit_show &&
- !revs->show_all &&
revs->prune && revs->dense && want_ancestry(revs)) {
/*
* --full-diff on simplified parents is no good: it
diff --git a/revision.h b/revision.h
index d7a35c8..b8c47b9 100644
--- a/revision.h
+++ b/revision.h
@@ -90,7 +90,6 @@ struct rev_info {
unsigned int dense:1,
prune:1,
no_walk:2,
- show_all:1,
remove_empty_trees:1,
simplify_history:1,
topo_order:1,
@@ -122,7 +121,10 @@ struct rev_info {
ancestry_path:1,
first_parent_only:1,
line_level_traverse:1,
- tree_blobs_in_commit_order:1;
+ tree_blobs_in_commit_order:1,
+
+ /* for internal use only */
+ exclude_promisor_objects:1;
/* Diff flags */
unsigned int diff:1,
diff --git a/run-command.c b/run-command.c
index 31fc5ea..a483d59 100644
--- a/run-command.c
+++ b/run-command.c
@@ -6,6 +6,7 @@
#include "thread-utils.h"
#include "strbuf.h"
#include "string-list.h"
+#include "quote.h"
void child_process_init(struct child_process *child)
{
@@ -556,6 +557,90 @@ static int wait_or_whine(pid_t pid, const char *argv0, int in_signal)
return code;
}
+static void trace_add_env(struct strbuf *dst, const char *const *deltaenv)
+{
+ struct string_list envs = STRING_LIST_INIT_DUP;
+ const char *const *e;
+ int i;
+ int printed_unset = 0;
+
+ /* Last one wins, see run-command.c:prep_childenv() for context */
+ for (e = deltaenv; e && *e; e++) {
+ struct strbuf key = STRBUF_INIT;
+ char *equals = strchr(*e, '=');
+
+ if (equals) {
+ strbuf_add(&key, *e, equals - *e);
+ string_list_insert(&envs, key.buf)->util = equals + 1;
+ } else {
+ string_list_insert(&envs, *e)->util = NULL;
+ }
+ strbuf_release(&key);
+ }
+
+ /* "unset X Y...;" */
+ for (i = 0; i < envs.nr; i++) {
+ const char *var = envs.items[i].string;
+ const char *val = envs.items[i].util;
+
+ if (val || !getenv(var))
+ continue;
+
+ if (!printed_unset) {
+ strbuf_addstr(dst, " unset");
+ printed_unset = 1;
+ }
+ strbuf_addf(dst, " %s", var);
+ }
+ if (printed_unset)
+ strbuf_addch(dst, ';');
+
+ /* ... followed by "A=B C=D ..." */
+ for (i = 0; i < envs.nr; i++) {
+ const char *var = envs.items[i].string;
+ const char *val = envs.items[i].util;
+ const char *oldval;
+
+ if (!val)
+ continue;
+
+ oldval = getenv(var);
+ if (oldval && !strcmp(val, oldval))
+ continue;
+
+ strbuf_addf(dst, " %s=", var);
+ sq_quote_buf_pretty(dst, val);
+ }
+ string_list_clear(&envs, 0);
+}
+
+static void trace_run_command(const struct child_process *cp)
+{
+ struct strbuf buf = STRBUF_INIT;
+
+ if (!trace_want(&trace_default_key))
+ return;
+
+ strbuf_addf(&buf, "trace: run_command:");
+ if (cp->dir) {
+ strbuf_addstr(&buf, " cd ");
+ sq_quote_buf_pretty(&buf, cp->dir);
+ strbuf_addch(&buf, ';');
+ }
+ /*
+ * The caller is responsible for initializing cp->env from
+ * cp->env_array if needed. We only check one place.
+ */
+ if (cp->env)
+ trace_add_env(&buf, cp->env);
+ if (cp->git_cmd)
+ strbuf_addstr(&buf, " git");
+ sq_quote_argv_pretty(&buf, cp->argv);
+
+ trace_printf("%s", buf.buf);
+ strbuf_release(&buf);
+}
+
int start_command(struct child_process *cmd)
{
int need_in, need_out, need_err;
@@ -624,7 +709,8 @@ fail_pipe:
cmd->err = fderr[0];
}
- trace_argv_printf(cmd->argv, "trace: run_command:");
+ trace_run_command(cmd);
+
fflush(NULL);
#ifndef GIT_WINDOWS_NATIVE
diff --git a/sequencer.c b/sequencer.c
index f6b77e6..6ca4499 100644
--- a/sequencer.c
+++ b/sequencer.c
@@ -1,10 +1,10 @@
#include "cache.h"
#include "config.h"
#include "lockfile.h"
-#include "sequencer.h"
#include "dir.h"
#include "object.h"
#include "commit.h"
+#include "sequencer.h"
#include "tag.h"
#include "run-command.h"
#include "exec_cmd.h"
@@ -21,12 +21,16 @@
#include "log-tree.h"
#include "wt-status.h"
#include "hashmap.h"
+#include "notes-utils.h"
+#include "sigchain.h"
#define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
const char sign_off_header[] = "Signed-off-by: ";
static const char cherry_picked_prefix[] = "(cherry picked from commit ";
+GIT_PATH_FUNC(git_path_commit_editmsg, "COMMIT_EDITMSG")
+
GIT_PATH_FUNC(git_path_seq_dir, "sequencer")
static GIT_PATH_FUNC(git_path_todo_file, "sequencer/todo")
@@ -130,6 +134,51 @@ static GIT_PATH_FUNC(rebase_path_strategy, "rebase-merge/strategy")
static GIT_PATH_FUNC(rebase_path_strategy_opts, "rebase-merge/strategy_opts")
static GIT_PATH_FUNC(rebase_path_allow_rerere_autoupdate, "rebase-merge/allow_rerere_autoupdate")
+static int git_sequencer_config(const char *k, const char *v, void *cb)
+{
+ struct replay_opts *opts = cb;
+ int status;
+
+ if (!strcmp(k, "commit.cleanup")) {
+ const char *s;
+
+ status = git_config_string(&s, k, v);
+ if (status)
+ return status;
+
+ if (!strcmp(s, "verbatim"))
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_NONE;
+ else if (!strcmp(s, "whitespace"))
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_SPACE;
+ else if (!strcmp(s, "strip"))
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_ALL;
+ else if (!strcmp(s, "scissors"))
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_SPACE;
+ else
+ warning(_("invalid commit message cleanup mode '%s'"),
+ s);
+
+ return status;
+ }
+
+ if (!strcmp(k, "commit.gpgsign")) {
+ opts->gpg_sign = git_config_bool(k, v) ? xstrdup("") : NULL;
+ return 0;
+ }
+
+ status = git_gpg_config(k, v, NULL);
+ if (status)
+ return status;
+
+ return git_diff_basic_config(k, v, NULL);
+}
+
+void sequencer_init_config(struct replay_opts *opts)
+{
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_NONE;
+ git_config(git_sequencer_config, opts);
+}
+
static inline int is_rebase_i(const struct replay_opts *opts)
{
return opts->action == REPLAY_INTERACTIVE_REBASE;
@@ -478,9 +527,6 @@ static int do_recursive_merge(struct commit *base, struct commit *next,
_(action_name(opts)));
rollback_lock_file(&index_lock);
- if (opts->signoff)
- append_signoff(msgbuf, 0, 0);
-
if (!clean)
append_conflicts_hint(msgbuf);
@@ -596,6 +642,18 @@ static int read_env_script(struct argv_array *env)
return 0;
}
+static char *get_author(const char *message)
+{
+ size_t len;
+ const char *a;
+
+ a = find_commit_header(message, "author", &len);
+ if (a)
+ return xmemdupz(a, len);
+
+ return NULL;
+}
+
static const char staged_changes_advice[] =
N_("you have staged changes in your working tree\n"
"If these changes are meant to be squashed into the previous commit, run:\n"
@@ -658,8 +716,6 @@ static int run_git_commit(const char *defmsg, struct replay_opts *opts,
argv_array_push(&cmd.args, "--amend");
if (opts->gpg_sign)
argv_array_pushf(&cmd.args, "-S%s", opts->gpg_sign);
- if (opts->signoff)
- argv_array_push(&cmd.args, "-s");
if (defmsg)
argv_array_pushl(&cmd.args, "-F", defmsg, NULL);
if ((flags & CLEANUP_MSG))
@@ -694,6 +750,461 @@ static int run_git_commit(const char *defmsg, struct replay_opts *opts,
return run_command(&cmd);
}
+static int rest_is_empty(const struct strbuf *sb, int start)
+{
+ int i, eol;
+ const char *nl;
+
+ /* Check if the rest is just whitespace and Signed-off-by's. */
+ for (i = start; i < sb->len; i++) {
+ nl = memchr(sb->buf + i, '\n', sb->len - i);
+ if (nl)
+ eol = nl - sb->buf;
+ else
+ eol = sb->len;
+
+ if (strlen(sign_off_header) <= eol - i &&
+ starts_with(sb->buf + i, sign_off_header)) {
+ i = eol;
+ continue;
+ }
+ while (i < eol)
+ if (!isspace(sb->buf[i++]))
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Find out if the message in the strbuf contains only whitespace and
+ * Signed-off-by lines.
+ */
+int message_is_empty(const struct strbuf *sb,
+ enum commit_msg_cleanup_mode cleanup_mode)
+{
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_NONE && sb->len)
+ return 0;
+ return rest_is_empty(sb, 0);
+}
+
+/*
+ * See if the user edited the message in the editor or left what
+ * was in the template intact
+ */
+int template_untouched(const struct strbuf *sb, const char *template_file,
+ enum commit_msg_cleanup_mode cleanup_mode)
+{
+ struct strbuf tmpl = STRBUF_INIT;
+ const char *start;
+
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_NONE && sb->len)
+ return 0;
+
+ if (!template_file || strbuf_read_file(&tmpl, template_file, 0) <= 0)
+ return 0;
+
+ strbuf_stripspace(&tmpl, cleanup_mode == COMMIT_MSG_CLEANUP_ALL);
+ if (!skip_prefix(sb->buf, tmpl.buf, &start))
+ start = sb->buf;
+ strbuf_release(&tmpl);
+ return rest_is_empty(sb, start - sb->buf);
+}
+
+int update_head_with_reflog(const struct commit *old_head,
+ const struct object_id *new_head,
+ const char *action, const struct strbuf *msg,
+ struct strbuf *err)
+{
+ struct ref_transaction *transaction;
+ struct strbuf sb = STRBUF_INIT;
+ const char *nl;
+ int ret = 0;
+
+ if (action) {
+ strbuf_addstr(&sb, action);
+ strbuf_addstr(&sb, ": ");
+ }
+
+ nl = strchr(msg->buf, '\n');
+ if (nl) {
+ strbuf_add(&sb, msg->buf, nl + 1 - msg->buf);
+ } else {
+ strbuf_addbuf(&sb, msg);
+ strbuf_addch(&sb, '\n');
+ }
+
+ transaction = ref_transaction_begin(err);
+ if (!transaction ||
+ ref_transaction_update(transaction, "HEAD", new_head,
+ old_head ? &old_head->object.oid : &null_oid,
+ 0, sb.buf, err) ||
+ ref_transaction_commit(transaction, err)) {
+ ret = -1;
+ }
+ ref_transaction_free(transaction);
+ strbuf_release(&sb);
+
+ return ret;
+}
+
+static int run_rewrite_hook(const struct object_id *oldoid,
+ const struct object_id *newoid)
+{
+ struct child_process proc = CHILD_PROCESS_INIT;
+ const char *argv[3];
+ int code;
+ struct strbuf sb = STRBUF_INIT;
+
+ argv[0] = find_hook("post-rewrite");
+ if (!argv[0])
+ return 0;
+
+ argv[1] = "amend";
+ argv[2] = NULL;
+
+ proc.argv = argv;
+ proc.in = -1;
+ proc.stdout_to_stderr = 1;
+
+ code = start_command(&proc);
+ if (code)
+ return code;
+ strbuf_addf(&sb, "%s %s\n", oid_to_hex(oldoid), oid_to_hex(newoid));
+ sigchain_push(SIGPIPE, SIG_IGN);
+ write_in_full(proc.in, sb.buf, sb.len);
+ close(proc.in);
+ strbuf_release(&sb);
+ sigchain_pop(SIGPIPE);
+ return finish_command(&proc);
+}
+
+void commit_post_rewrite(const struct commit *old_head,
+ const struct object_id *new_head)
+{
+ struct notes_rewrite_cfg *cfg;
+
+ cfg = init_copy_notes_for_rewrite("amend");
+ if (cfg) {
+ /* we are amending, so old_head is not NULL */
+ copy_note_for_rewrite(cfg, &old_head->object.oid, new_head);
+ finish_copy_notes_for_rewrite(cfg, "Notes added by 'git commit --amend'");
+ }
+ run_rewrite_hook(&old_head->object.oid, new_head);
+}
+
+static int run_prepare_commit_msg_hook(struct strbuf *msg, const char *commit)
+{
+ struct argv_array hook_env = ARGV_ARRAY_INIT;
+ int ret;
+ const char *name;
+
+ name = git_path_commit_editmsg();
+ if (write_message(msg->buf, msg->len, name, 0))
+ return -1;
+
+ argv_array_pushf(&hook_env, "GIT_INDEX_FILE=%s", get_index_file());
+ argv_array_push(&hook_env, "GIT_EDITOR=:");
+ if (commit)
+ ret = run_hook_le(hook_env.argv, "prepare-commit-msg", name,
+ "commit", commit, NULL);
+ else
+ ret = run_hook_le(hook_env.argv, "prepare-commit-msg", name,
+ "message", NULL);
+ if (ret)
+ ret = error(_("'prepare-commit-msg' hook failed"));
+ argv_array_clear(&hook_env);
+
+ return ret;
+}
+
+static const char implicit_ident_advice_noconfig[] =
+N_("Your name and email address were configured automatically based\n"
+"on your username and hostname. Please check that they are accurate.\n"
+"You can suppress this message by setting them explicitly. Run the\n"
+"following command and follow the instructions in your editor to edit\n"
+"your configuration file:\n"
+"\n"
+" git config --global --edit\n"
+"\n"
+"After doing this, you may fix the identity used for this commit with:\n"
+"\n"
+" git commit --amend --reset-author\n");
+
+static const char implicit_ident_advice_config[] =
+N_("Your name and email address were configured automatically based\n"
+"on your username and hostname. Please check that they are accurate.\n"
+"You can suppress this message by setting them explicitly:\n"
+"\n"
+" git config --global user.name \"Your Name\"\n"
+" git config --global user.email you@example.com\n"
+"\n"
+"After doing this, you may fix the identity used for this commit with:\n"
+"\n"
+" git commit --amend --reset-author\n");
+
+static const char *implicit_ident_advice(void)
+{
+ char *user_config = expand_user_path("~/.gitconfig", 0);
+ char *xdg_config = xdg_config_home("config");
+ int config_exists = file_exists(user_config) || file_exists(xdg_config);
+
+ free(user_config);
+ free(xdg_config);
+
+ if (config_exists)
+ return _(implicit_ident_advice_config);
+ else
+ return _(implicit_ident_advice_noconfig);
+
+}
+
+void print_commit_summary(const char *prefix, const struct object_id *oid,
+ unsigned int flags)
+{
+ struct rev_info rev;
+ struct commit *commit;
+ struct strbuf format = STRBUF_INIT;
+ const char *head;
+ struct pretty_print_context pctx = {0};
+ struct strbuf author_ident = STRBUF_INIT;
+ struct strbuf committer_ident = STRBUF_INIT;
+
+ commit = lookup_commit(oid);
+ if (!commit)
+ die(_("couldn't look up newly created commit"));
+ if (parse_commit(commit))
+ die(_("could not parse newly created commit"));
+
+ strbuf_addstr(&format, "format:%h] %s");
+
+ format_commit_message(commit, "%an <%ae>", &author_ident, &pctx);
+ format_commit_message(commit, "%cn <%ce>", &committer_ident, &pctx);
+ if (strbuf_cmp(&author_ident, &committer_ident)) {
+ strbuf_addstr(&format, "\n Author: ");
+ strbuf_addbuf_percentquote(&format, &author_ident);
+ }
+ if (flags & SUMMARY_SHOW_AUTHOR_DATE) {
+ struct strbuf date = STRBUF_INIT;
+
+ format_commit_message(commit, "%ad", &date, &pctx);
+ strbuf_addstr(&format, "\n Date: ");
+ strbuf_addbuf_percentquote(&format, &date);
+ strbuf_release(&date);
+ }
+ if (!committer_ident_sufficiently_given()) {
+ strbuf_addstr(&format, "\n Committer: ");
+ strbuf_addbuf_percentquote(&format, &committer_ident);
+ if (advice_implicit_identity) {
+ strbuf_addch(&format, '\n');
+ strbuf_addstr(&format, implicit_ident_advice());
+ }
+ }
+ strbuf_release(&author_ident);
+ strbuf_release(&committer_ident);
+
+ init_revisions(&rev, prefix);
+ setup_revisions(0, NULL, &rev, NULL);
+
+ rev.diff = 1;
+ rev.diffopt.output_format =
+ DIFF_FORMAT_SHORTSTAT | DIFF_FORMAT_SUMMARY;
+
+ rev.verbose_header = 1;
+ rev.show_root_diff = 1;
+ get_commit_format(format.buf, &rev);
+ rev.always_show_header = 0;
+ rev.diffopt.detect_rename = DIFF_DETECT_RENAME;
+ rev.diffopt.break_opt = 0;
+ diff_setup_done(&rev.diffopt);
+
+ head = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
+ if (!head)
+ die_errno(_("unable to resolve HEAD after creating commit"));
+ if (!strcmp(head, "HEAD"))
+ head = _("detached HEAD");
+ else
+ skip_prefix(head, "refs/heads/", &head);
+ printf("[%s%s ", head, (flags & SUMMARY_INITIAL_COMMIT) ?
+ _(" (root-commit)") : "");
+
+ if (!log_tree_commit(&rev, commit)) {
+ rev.always_show_header = 1;
+ rev.use_terminator = 1;
+ log_tree_commit(&rev, commit);
+ }
+
+ strbuf_release(&format);
+}
+
+static int parse_head(struct commit **head)
+{
+ struct commit *current_head;
+ struct object_id oid;
+
+ if (get_oid("HEAD", &oid)) {
+ current_head = NULL;
+ } else {
+ current_head = lookup_commit_reference(&oid);
+ if (!current_head)
+ return error(_("could not parse HEAD"));
+ if (oidcmp(&oid, &current_head->object.oid)) {
+ warning(_("HEAD %s is not a commit!"),
+ oid_to_hex(&oid));
+ }
+ if (parse_commit(current_head))
+ return error(_("could not parse HEAD commit"));
+ }
+ *head = current_head;
+
+ return 0;
+}
+
+/*
+ * Try to commit without forking 'git commit'. In some cases we need
+ * to run 'git commit' to display an error message
+ *
+ * Returns:
+ * -1 - error unable to commit
+ * 0 - success
+ * 1 - run 'git commit'
+ */
+static int try_to_commit(struct strbuf *msg, const char *author,
+ struct replay_opts *opts, unsigned int flags,
+ struct object_id *oid)
+{
+ struct object_id tree;
+ struct commit *current_head;
+ struct commit_list *parents = NULL;
+ struct commit_extra_header *extra = NULL;
+ struct strbuf err = STRBUF_INIT;
+ struct strbuf commit_msg = STRBUF_INIT;
+ char *amend_author = NULL;
+ const char *hook_commit = NULL;
+ enum commit_msg_cleanup_mode cleanup;
+ int res = 0;
+
+ if (parse_head(&current_head))
+ return -1;
+
+ if (flags & AMEND_MSG) {
+ const char *exclude_gpgsig[] = { "gpgsig", NULL };
+ const char *out_enc = get_commit_output_encoding();
+ const char *message = logmsg_reencode(current_head, NULL,
+ out_enc);
+
+ if (!msg) {
+ const char *orig_message = NULL;
+
+ find_commit_subject(message, &orig_message);
+ msg = &commit_msg;
+ strbuf_addstr(msg, orig_message);
+ hook_commit = "HEAD";
+ }
+ author = amend_author = get_author(message);
+ unuse_commit_buffer(current_head, message);
+ if (!author) {
+ res = error(_("unable to parse commit author"));
+ goto out;
+ }
+ parents = copy_commit_list(current_head->parents);
+ extra = read_commit_extra_headers(current_head, exclude_gpgsig);
+ } else if (current_head) {
+ commit_list_insert(current_head, &parents);
+ }
+
+ if (write_cache_as_tree(tree.hash, 0, NULL)) {
+ res = error(_("git write-tree failed to write a tree"));
+ goto out;
+ }
+
+ if (!(flags & ALLOW_EMPTY) && !oidcmp(current_head ?
+ &current_head->tree->object.oid :
+ &empty_tree_oid, &tree)) {
+ res = 1; /* run 'git commit' to display error message */
+ goto out;
+ }
+
+ if (find_hook("prepare-commit-msg")) {
+ res = run_prepare_commit_msg_hook(msg, hook_commit);
+ if (res)
+ goto out;
+ if (strbuf_read_file(&commit_msg, git_path_commit_editmsg(),
+ 2048) < 0) {
+ res = error_errno(_("unable to read commit message "
+ "from '%s'"),
+ git_path_commit_editmsg());
+ goto out;
+ }
+ msg = &commit_msg;
+ }
+
+ cleanup = (flags & CLEANUP_MSG) ? COMMIT_MSG_CLEANUP_ALL :
+ opts->default_msg_cleanup;
+
+ if (cleanup != COMMIT_MSG_CLEANUP_NONE)
+ strbuf_stripspace(msg, cleanup == COMMIT_MSG_CLEANUP_ALL);
+ if (!opts->allow_empty_message && message_is_empty(msg, cleanup)) {
+ res = 1; /* run 'git commit' to display error message */
+ goto out;
+ }
+
+ if (commit_tree_extended(msg->buf, msg->len, &tree, parents,
+ oid, author, opts->gpg_sign, extra)) {
+ res = error(_("failed to write commit object"));
+ goto out;
+ }
+
+ if (update_head_with_reflog(current_head, oid,
+ getenv("GIT_REFLOG_ACTION"), msg, &err)) {
+ res = error("%s", err.buf);
+ goto out;
+ }
+
+ if (flags & AMEND_MSG)
+ commit_post_rewrite(current_head, oid);
+
+out:
+ free_commit_extra_headers(extra);
+ strbuf_release(&err);
+ strbuf_release(&commit_msg);
+ free(amend_author);
+
+ return res;
+}
+
+static int do_commit(const char *msg_file, const char *author,
+ struct replay_opts *opts, unsigned int flags)
+{
+ int res = 1;
+
+ if (!(flags & EDIT_MSG) && !(flags & VERIFY_MSG)) {
+ struct object_id oid;
+ struct strbuf sb = STRBUF_INIT;
+
+ if (msg_file && strbuf_read_file(&sb, msg_file, 2048) < 0)
+ return error_errno(_("unable to read commit message "
+ "from '%s'"),
+ msg_file);
+
+ res = try_to_commit(msg_file ? &sb : NULL, author, opts, flags,
+ &oid);
+ strbuf_release(&sb);
+ if (!res) {
+ unlink(git_path_cherry_pick_head());
+ unlink(git_path_merge_msg());
+ if (!is_rebase_i(opts))
+ print_commit_summary(NULL, &oid,
+ SUMMARY_SHOW_AUTHOR_DATE);
+ return res;
+ }
+ }
+ if (res == 1)
+ return run_git_commit(msg_file, opts, flags);
+
+ return res;
+}
+
static int is_original_commit_empty(struct commit *commit)
{
const struct object_id *ptree_oid;
@@ -952,6 +1463,7 @@ static int do_pick_commit(enum todo_command command, struct commit *commit,
struct object_id head;
struct commit *base, *next, *parent;
const char *base_label, *next_label;
+ char *author = NULL;
struct commit_message msg = { NULL, NULL, NULL, NULL };
struct strbuf msgbuf = STRBUF_INIT;
int res, unborn = 0, allow;
@@ -1066,6 +1578,8 @@ static int do_pick_commit(enum todo_command command, struct commit *commit,
strbuf_addstr(&msgbuf, oid_to_hex(&commit->object.oid));
strbuf_addstr(&msgbuf, ")\n");
}
+ if (!is_fixup(command))
+ author = get_author(msg.message);
}
if (command == TODO_REWORD)
@@ -1091,6 +1605,9 @@ static int do_pick_commit(enum todo_command command, struct commit *commit,
}
}
+ if (opts->signoff)
+ append_signoff(&msgbuf, 0, 0);
+
if (is_rebase_i(opts) && write_author_script(msg.message) < 0)
res = -1;
else if (!opts->strategy || !strcmp(opts->strategy, "recursive") || command == TODO_REVERT) {
@@ -1148,9 +1665,13 @@ static int do_pick_commit(enum todo_command command, struct commit *commit,
goto leave;
} else if (allow)
flags |= ALLOW_EMPTY;
- if (!opts->no_commit)
+ if (!opts->no_commit) {
fast_forward_edit:
- res = run_git_commit(msg_file, opts, flags);
+ if (author || command == TODO_REVERT || (flags & AMEND_MSG))
+ res = do_commit(msg_file, author, opts, flags);
+ else
+ res = error(_("unable to parse commit author"));
+ }
if (!res && final_fixup) {
unlink(rebase_path_fixup_msg());
@@ -1159,6 +1680,7 @@ fast_forward_edit:
leave:
free_message(commit, &msg);
+ free(author);
update_abort_safety_file();
return res;
diff --git a/sequencer.h b/sequencer.h
index 81f6d7d..e45b178 100644
--- a/sequencer.h
+++ b/sequencer.h
@@ -1,6 +1,7 @@
#ifndef SEQUENCER_H
#define SEQUENCER_H
+const char *git_path_commit_editmsg(void);
const char *git_path_seq_dir(void);
#define APPEND_SIGNOFF_DEDUP (1u << 0)
@@ -11,6 +12,13 @@ enum replay_action {
REPLAY_INTERACTIVE_REBASE
};
+enum commit_msg_cleanup_mode {
+ COMMIT_MSG_CLEANUP_SPACE,
+ COMMIT_MSG_CLEANUP_NONE,
+ COMMIT_MSG_CLEANUP_SCISSORS,
+ COMMIT_MSG_CLEANUP_ALL
+};
+
struct replay_opts {
enum replay_action action;
@@ -29,6 +37,7 @@ struct replay_opts {
int mainline;
char *gpg_sign;
+ enum commit_msg_cleanup_mode default_msg_cleanup;
/* Merge strategy */
char *strategy;
@@ -40,6 +49,8 @@ struct replay_opts {
};
#define REPLAY_OPTS_INIT { -1 }
+/* Call this to setup defaults before parsing command line options */
+void sequencer_init_config(struct replay_opts *opts);
int sequencer_pick_revisions(struct replay_opts *opts);
int sequencer_continue(struct replay_opts *opts);
int sequencer_rollback(struct replay_opts *opts);
@@ -61,5 +72,19 @@ extern const char sign_off_header[];
void append_signoff(struct strbuf *msgbuf, int ignore_footer, unsigned flag);
void append_conflicts_hint(struct strbuf *msgbuf);
+int message_is_empty(const struct strbuf *sb,
+ enum commit_msg_cleanup_mode cleanup_mode);
+int template_untouched(const struct strbuf *sb, const char *template_file,
+ enum commit_msg_cleanup_mode cleanup_mode);
+int update_head_with_reflog(const struct commit *old_head,
+ const struct object_id *new_head,
+ const char *action, const struct strbuf *msg,
+ struct strbuf *err);
+void commit_post_rewrite(const struct commit *current_head,
+ const struct object_id *new_head);
+#define SUMMARY_INITIAL_COMMIT (1 << 0)
+#define SUMMARY_SHOW_AUTHOR_DATE (1 << 1)
+void print_commit_summary(const char *prefix, const struct object_id *oid,
+ unsigned int flags);
#endif
diff --git a/setup.c b/setup.c
index 8cc3418..c5d55dc 100644
--- a/setup.c
+++ b/setup.c
@@ -422,7 +422,11 @@ static int check_repo_format(const char *var, const char *value, void *vdata)
;
else if (!strcmp(ext, "preciousobjects"))
data->precious_objects = git_config_bool(var, value);
- else
+ else if (!strcmp(ext, "partialclone")) {
+ if (!value)
+ return config_error_nonbool(var);
+ data->partial_clone = xstrdup(value);
+ } else
string_list_append(&data->unknown_extensions, ext);
} else if (strcmp(var, "core.bare") == 0) {
data->is_bare = git_config_bool(var, value);
@@ -464,6 +468,7 @@ static int check_repository_format_gently(const char *gitdir, struct repository_
}
repository_format_precious_objects = candidate->precious_objects;
+ repository_format_partial_clone = candidate->partial_clone;
string_list_clear(&candidate->unknown_extensions, 0);
if (!has_common) {
if (candidate->is_bare != -1) {
diff --git a/sha1-lookup.c b/sha1-lookup.c
index 4cf3ebd..8d0b1db 100644
--- a/sha1-lookup.c
+++ b/sha1-lookup.c
@@ -99,3 +99,31 @@ int sha1_pos(const unsigned char *sha1, void *table, size_t nr,
} while (lo < hi);
return -lo-1;
}
+
+int bsearch_hash(const unsigned char *sha1, const uint32_t *fanout_nbo,
+ const unsigned char *table, size_t stride, uint32_t *result)
+{
+ uint32_t hi, lo;
+
+ hi = ntohl(fanout_nbo[*sha1]);
+ lo = ((*sha1 == 0x0) ? 0 : ntohl(fanout_nbo[*sha1 - 1]));
+
+ while (lo < hi) {
+ unsigned mi = lo + (hi - lo) / 2;
+ int cmp = hashcmp(table + mi * stride, sha1);
+
+ if (!cmp) {
+ if (result)
+ *result = mi;
+ return 1;
+ }
+ if (cmp > 0)
+ hi = mi;
+ else
+ lo = mi + 1;
+ }
+
+ if (result)
+ *result = lo;
+ return 0;
+}
diff --git a/sha1-lookup.h b/sha1-lookup.h
index cf5314f..7678b23 100644
--- a/sha1-lookup.h
+++ b/sha1-lookup.h
@@ -7,4 +7,26 @@ extern int sha1_pos(const unsigned char *sha1,
void *table,
size_t nr,
sha1_access_fn fn);
+
+/*
+ * Searches for sha1 in table, using the given fanout table to determine the
+ * interval to search, then using binary search. Returns 1 if found, 0 if not.
+ *
+ * Takes the following parameters:
+ *
+ * - sha1: the hash to search for
+ * - fanout_nbo: a 256-element array of NETWORK-order 32-bit integers; the
+ * integer at position i represents the number of elements in table whose
+ * first byte is less than or equal to i
+ * - table: a sorted list of hashes with optional extra information in between
+ * - stride: distance between two consecutive elements in table (should be
+ * GIT_MAX_RAWSZ or greater)
+ * - result: if not NULL, this function stores the element index of the
+ * position found (if the search is successful) or the index of the least
+ * element that is greater than sha1 (if the search is not successful)
+ *
+ * This function does not verify the validity of the fanout table.
+ */
+int bsearch_hash(const unsigned char *sha1, const uint32_t *fanout_nbo,
+ const unsigned char *table, size_t stride, uint32_t *result);
#endif
diff --git a/sha1_file.c b/sha1_file.c
index 2288bb7..1b94f39 100644
--- a/sha1_file.c
+++ b/sha1_file.c
@@ -24,11 +24,11 @@
#include "bulk-checkin.h"
#include "streaming.h"
#include "dir.h"
-#include "mru.h"
#include "list.h"
#include "mergesort.h"
#include "quote.h"
#include "packfile.h"
+#include "fetch-object.h"
const unsigned char null_sha1[GIT_MAX_RAWSZ];
const struct object_id null_oid;
@@ -39,32 +39,32 @@ const struct object_id empty_blob_oid = {
EMPTY_BLOB_SHA1_BIN_LITERAL
};
-static void git_hash_sha1_init(void *ctx)
+static void git_hash_sha1_init(git_hash_ctx *ctx)
{
- git_SHA1_Init((git_SHA_CTX *)ctx);
+ git_SHA1_Init(&ctx->sha1);
}
-static void git_hash_sha1_update(void *ctx, const void *data, size_t len)
+static void git_hash_sha1_update(git_hash_ctx *ctx, const void *data, size_t len)
{
- git_SHA1_Update((git_SHA_CTX *)ctx, data, len);
+ git_SHA1_Update(&ctx->sha1, data, len);
}
-static void git_hash_sha1_final(unsigned char *hash, void *ctx)
+static void git_hash_sha1_final(unsigned char *hash, git_hash_ctx *ctx)
{
- git_SHA1_Final(hash, (git_SHA_CTX *)ctx);
+ git_SHA1_Final(hash, &ctx->sha1);
}
-static void git_hash_unknown_init(void *ctx)
+static void git_hash_unknown_init(git_hash_ctx *ctx)
{
die("trying to init unknown hash");
}
-static void git_hash_unknown_update(void *ctx, const void *data, size_t len)
+static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len)
{
die("trying to update unknown hash");
}
-static void git_hash_unknown_final(unsigned char *hash, void *ctx)
+static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx)
{
die("trying to finalize unknown hash");
}
@@ -75,7 +75,6 @@ const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
0x00000000,
0,
0,
- 0,
git_hash_unknown_init,
git_hash_unknown_update,
git_hash_unknown_final,
@@ -86,7 +85,6 @@ const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
"sha-1",
/* "sha1", big-endian */
0x73686131,
- sizeof(git_SHA_CTX),
GIT_SHA1_RAWSZ,
GIT_SHA1_HEXSZ,
git_hash_sha1_init,
@@ -133,14 +131,14 @@ static struct cached_object *find_cached_object(const unsigned char *sha1)
}
-static enum safe_crlf get_safe_crlf(unsigned flags)
+static int get_conv_flags(unsigned flags)
{
if (flags & HASH_RENORMALIZE)
- return SAFE_CRLF_RENORMALIZE;
+ return CONV_EOL_RENORMALIZE;
else if (flags & HASH_WRITE_OBJECT)
- return safe_crlf;
+ return global_conv_flags_eol;
else
- return SAFE_CRLF_FALSE;
+ return 0;
}
@@ -321,15 +319,11 @@ static void fill_sha1_path(struct strbuf *buf, const unsigned char *sha1)
}
}
-const char *sha1_file_name(const unsigned char *sha1)
+void sha1_file_name(struct strbuf *buf, const unsigned char *sha1)
{
- static struct strbuf buf = STRBUF_INIT;
-
- strbuf_reset(&buf);
- strbuf_addf(&buf, "%s/", get_object_directory());
-
- fill_sha1_path(&buf, sha1);
- return buf.buf;
+ strbuf_addstr(buf, get_object_directory());
+ strbuf_addch(buf, '/');
+ fill_sha1_path(buf, sha1);
}
struct strbuf *alt_scratch_buf(struct alternate_object_database *alt)
@@ -710,7 +704,12 @@ int check_and_freshen_file(const char *fn, int freshen)
static int check_and_freshen_local(const unsigned char *sha1, int freshen)
{
- return check_and_freshen_file(sha1_file_name(sha1), freshen);
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ sha1_file_name(&buf, sha1);
+
+ return check_and_freshen_file(buf.buf, freshen);
}
static int check_and_freshen_nonlocal(const unsigned char *sha1, int freshen)
@@ -788,16 +787,16 @@ void *xmmap(void *start, size_t length,
int check_sha1_signature(const unsigned char *sha1, void *map,
unsigned long size, const char *type)
{
- unsigned char real_sha1[20];
+ struct object_id real_oid;
enum object_type obj_type;
struct git_istream *st;
- git_SHA_CTX c;
+ git_hash_ctx c;
char hdr[32];
int hdrlen;
if (map) {
- hash_sha1_file(map, size, type, real_sha1);
- return hashcmp(sha1, real_sha1) ? -1 : 0;
+ hash_object_file(map, size, type, &real_oid);
+ return hashcmp(sha1, real_oid.hash) ? -1 : 0;
}
st = open_istream(sha1, &obj_type, &size, NULL);
@@ -808,8 +807,8 @@ int check_sha1_signature(const unsigned char *sha1, void *map,
hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(obj_type), size) + 1;
/* Sha1.. */
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, hdrlen);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
for (;;) {
char buf[1024 * 16];
ssize_t readlen = read_istream(st, buf, sizeof(buf));
@@ -820,11 +819,11 @@ int check_sha1_signature(const unsigned char *sha1, void *map,
}
if (!readlen)
break;
- git_SHA1_Update(&c, buf, readlen);
+ the_hash_algo->update_fn(&c, buf, readlen);
}
- git_SHA1_Final(real_sha1, &c);
+ the_hash_algo->final_fn(real_oid.hash, &c);
close_istream(st);
- return hashcmp(sha1, real_sha1) ? -1 : 0;
+ return hashcmp(sha1, real_oid.hash) ? -1 : 0;
}
int git_open_cloexec(const char *name, int flags)
@@ -866,8 +865,12 @@ static int stat_sha1_file(const unsigned char *sha1, struct stat *st,
const char **path)
{
struct alternate_object_database *alt;
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ sha1_file_name(&buf, sha1);
+ *path = buf.buf;
- *path = sha1_file_name(sha1);
if (!lstat(*path, st))
return 0;
@@ -891,8 +894,12 @@ static int open_sha1_file(const unsigned char *sha1, const char **path)
int fd;
struct alternate_object_database *alt;
int most_interesting_errno;
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ sha1_file_name(&buf, sha1);
+ *path = buf.buf;
- *path = sha1_file_name(sha1);
fd = git_open(*path);
if (fd >= 0)
return fd;
@@ -1213,6 +1220,8 @@ static int sha1_loose_object_info(const unsigned char *sha1,
return (status < 0) ? status : 0;
}
+int fetch_if_missing = 1;
+
int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, unsigned flags)
{
static struct object_info blank_oi = OBJECT_INFO_INIT;
@@ -1221,6 +1230,7 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi,
const unsigned char *real = (flags & OBJECT_INFO_LOOKUP_REPLACE) ?
lookup_replace_object(sha1) :
sha1;
+ int already_retried = 0;
if (is_null_sha1(real))
return -1;
@@ -1248,19 +1258,32 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi,
}
}
- if (!find_pack_entry(real, &e)) {
+ while (1) {
+ if (find_pack_entry(real, &e))
+ break;
+
/* Most likely it's a loose object. */
if (!sha1_loose_object_info(real, oi, flags))
return 0;
/* Not a loose object; someone else may have just packed it. */
- if (flags & OBJECT_INFO_QUICK) {
- return -1;
- } else {
- reprepare_packed_git();
- if (!find_pack_entry(real, &e))
- return -1;
+ reprepare_packed_git();
+ if (find_pack_entry(real, &e))
+ break;
+
+ /* Check if it is a missing object */
+ if (fetch_if_missing && repository_format_partial_clone &&
+ !already_retried) {
+ /*
+ * TODO Investigate haveing fetch_object() return
+ * TODO error/success and stopping the music here.
+ */
+ fetch_object(repository_format_partial_clone, real);
+ already_retried = 1;
+ continue;
}
+
+ return -1;
}
if (oi == &blank_oi)
@@ -1269,7 +1292,6 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi,
* information below, so return early.
*/
return 0;
-
rtype = packed_object_info(e.p, e.offset, oi);
if (rtype < 0) {
mark_bad_packed_object(e.p, real);
@@ -1312,13 +1334,13 @@ static void *read_object(const unsigned char *sha1, enum object_type *type,
return content;
}
-int pretend_sha1_file(void *buf, unsigned long len, enum object_type type,
- unsigned char *sha1)
+int pretend_object_file(void *buf, unsigned long len, enum object_type type,
+ struct object_id *oid)
{
struct cached_object *co;
- hash_sha1_file(buf, len, type_name(type), sha1);
- if (has_sha1_file(sha1) || find_cached_object(sha1))
+ hash_object_file(buf, len, type_name(type), oid);
+ if (has_sha1_file(oid->hash) || find_cached_object(oid->hash))
return 0;
ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc);
co = &cached_objects[cached_object_nr++];
@@ -1326,7 +1348,7 @@ int pretend_sha1_file(void *buf, unsigned long len, enum object_type type,
co->type = type;
co->buf = xmalloc(len);
memcpy(co->buf, buf, len);
- hashcpy(co->sha1, sha1);
+ hashcpy(co->sha1, oid->hash);
return 0;
}
@@ -1419,20 +1441,20 @@ void *read_object_with_reference(const unsigned char *sha1,
}
}
-static void write_sha1_file_prepare(const void *buf, unsigned long len,
- const char *type, unsigned char *sha1,
- char *hdr, int *hdrlen)
+static void write_object_file_prepare(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ char *hdr, int *hdrlen)
{
- git_SHA_CTX c;
+ git_hash_ctx c;
/* Generate the header */
*hdrlen = xsnprintf(hdr, *hdrlen, "%s %lu", type, len)+1;
/* Sha1.. */
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, *hdrlen);
- git_SHA1_Update(&c, buf, len);
- git_SHA1_Final(sha1, &c);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, *hdrlen);
+ the_hash_algo->update_fn(&c, buf, len);
+ the_hash_algo->final_fn(oid->hash, &c);
}
/*
@@ -1485,12 +1507,12 @@ static int write_buffer(int fd, const void *buf, size_t len)
return 0;
}
-int hash_sha1_file(const void *buf, unsigned long len, const char *type,
- unsigned char *sha1)
+int hash_object_file(const void *buf, unsigned long len, const char *type,
+ struct object_id *oid)
{
char hdr[32];
int hdrlen = sizeof(hdr);
- write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
+ write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
return 0;
}
@@ -1548,18 +1570,22 @@ static int create_tmpfile(struct strbuf *tmp, const char *filename)
return fd;
}
-static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
- const void *buf, unsigned long len, time_t mtime)
+static int write_loose_object(const struct object_id *oid, char *hdr,
+ int hdrlen, const void *buf, unsigned long len,
+ time_t mtime)
{
int fd, ret;
unsigned char compressed[4096];
git_zstream stream;
- git_SHA_CTX c;
- unsigned char parano_sha1[20];
+ git_hash_ctx c;
+ struct object_id parano_oid;
static struct strbuf tmp_file = STRBUF_INIT;
- const char *filename = sha1_file_name(sha1);
+ static struct strbuf filename = STRBUF_INIT;
+
+ strbuf_reset(&filename);
+ sha1_file_name(&filename, oid->hash);
- fd = create_tmpfile(&tmp_file, filename);
+ fd = create_tmpfile(&tmp_file, filename.buf);
if (fd < 0) {
if (errno == EACCES)
return error("insufficient permission for adding an object to repository database %s", get_object_directory());
@@ -1571,14 +1597,14 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
git_deflate_init(&stream, zlib_compression_level);
stream.next_out = compressed;
stream.avail_out = sizeof(compressed);
- git_SHA1_Init(&c);
+ the_hash_algo->init_fn(&c);
/* First header.. */
stream.next_in = (unsigned char *)hdr;
stream.avail_in = hdrlen;
while (git_deflate(&stream, 0) == Z_OK)
; /* nothing */
- git_SHA1_Update(&c, hdr, hdrlen);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
/* Then the data itself.. */
stream.next_in = (void *)buf;
@@ -1586,7 +1612,7 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
do {
unsigned char *in0 = stream.next_in;
ret = git_deflate(&stream, Z_FINISH);
- git_SHA1_Update(&c, in0, stream.next_in - in0);
+ the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
die("unable to write sha1 file");
stream.next_out = compressed;
@@ -1594,13 +1620,16 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
} while (ret == Z_OK);
if (ret != Z_STREAM_END)
- die("unable to deflate new object %s (%d)", sha1_to_hex(sha1), ret);
+ die("unable to deflate new object %s (%d)", oid_to_hex(oid),
+ ret);
ret = git_deflate_end_gently(&stream);
if (ret != Z_OK)
- die("deflateEnd on object %s failed (%d)", sha1_to_hex(sha1), ret);
- git_SHA1_Final(parano_sha1, &c);
- if (hashcmp(sha1, parano_sha1) != 0)
- die("confused by unstable object source data for %s", sha1_to_hex(sha1));
+ die("deflateEnd on object %s failed (%d)", oid_to_hex(oid),
+ ret);
+ the_hash_algo->final_fn(parano_oid.hash, &c);
+ if (oidcmp(oid, &parano_oid) != 0)
+ die("confused by unstable object source data for %s",
+ oid_to_hex(oid));
close_sha1_file(fd);
@@ -1612,7 +1641,7 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
warning_errno("failed utime() on %s", tmp_file.buf);
}
- return finalize_object_file(tmp_file.buf, filename);
+ return finalize_object_file(tmp_file.buf, filename.buf);
}
static int freshen_loose_object(const unsigned char *sha1)
@@ -1633,7 +1662,8 @@ static int freshen_packed_object(const unsigned char *sha1)
return 1;
}
-int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1)
+int write_object_file(const void *buf, unsigned long len, const char *type,
+ struct object_id *oid)
{
char hdr[32];
int hdrlen = sizeof(hdr);
@@ -1641,14 +1671,15 @@ int write_sha1_file(const void *buf, unsigned long len, const char *type, unsign
/* Normally if we have it in the pack then we do not bother writing
* it out into .git/objects/??/?{38} file.
*/
- write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
- if (freshen_packed_object(sha1) || freshen_loose_object(sha1))
+ write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
+ if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
return 0;
- return write_loose_object(sha1, hdr, hdrlen, buf, len, 0);
+ return write_loose_object(oid, hdr, hdrlen, buf, len, 0);
}
-int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type,
- struct object_id *oid, unsigned flags)
+int hash_object_file_literally(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ unsigned flags)
{
char *header;
int hdrlen, status = 0;
@@ -1656,20 +1687,20 @@ int hash_sha1_file_literally(const void *buf, unsigned long len, const char *typ
/* type string, SP, %lu of the length plus NUL must fit this */
hdrlen = strlen(type) + 32;
header = xmalloc(hdrlen);
- write_sha1_file_prepare(buf, len, type, oid->hash, header, &hdrlen);
+ write_object_file_prepare(buf, len, type, oid, header, &hdrlen);
if (!(flags & HASH_WRITE_OBJECT))
goto cleanup;
if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
goto cleanup;
- status = write_loose_object(oid->hash, header, hdrlen, buf, len, 0);
+ status = write_loose_object(oid, header, hdrlen, buf, len, 0);
cleanup:
free(header);
return status;
}
-int force_object_loose(const unsigned char *sha1, time_t mtime)
+int force_object_loose(const struct object_id *oid, time_t mtime)
{
void *buf;
unsigned long len;
@@ -1678,13 +1709,13 @@ int force_object_loose(const unsigned char *sha1, time_t mtime)
int hdrlen;
int ret;
- if (has_loose_object(sha1))
+ if (has_loose_object(oid->hash))
return 0;
- buf = read_object(sha1, &type, &len);
+ buf = read_object(oid->hash, &type, &len);
if (!buf)
- return error("cannot read sha1_file for %s", sha1_to_hex(sha1));
+ return error("cannot read sha1_file for %s", oid_to_hex(oid));
hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
- ret = write_loose_object(sha1, hdr, hdrlen, buf, len, mtime);
+ ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
free(buf);
return ret;
@@ -1752,7 +1783,7 @@ static int index_mem(struct object_id *oid, void *buf, size_t size,
if ((type == OBJ_BLOB) && path) {
struct strbuf nbuf = STRBUF_INIT;
if (convert_to_git(&the_index, path, buf, size, &nbuf,
- get_safe_crlf(flags))) {
+ get_conv_flags(flags))) {
buf = strbuf_detach(&nbuf, &size);
re_allocated = 1;
}
@@ -1767,9 +1798,9 @@ static int index_mem(struct object_id *oid, void *buf, size_t size,
}
if (write_object)
- ret = write_sha1_file(buf, size, type_name(type), oid->hash);
+ ret = write_object_file(buf, size, type_name(type), oid);
else
- ret = hash_sha1_file(buf, size, type_name(type), oid->hash);
+ ret = hash_object_file(buf, size, type_name(type), oid);
if (re_allocated)
free(buf);
return ret;
@@ -1786,14 +1817,14 @@ static int index_stream_convert_blob(struct object_id *oid, int fd,
assert(would_convert_to_git_filter_fd(path));
convert_to_git_filter_fd(&the_index, path, fd, &sbuf,
- get_safe_crlf(flags));
+ get_conv_flags(flags));
if (write_object)
- ret = write_sha1_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
- oid->hash);
+ ret = write_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+ oid);
else
- ret = hash_sha1_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
- oid->hash);
+ ret = hash_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+ oid);
strbuf_release(&sbuf);
return ret;
}
@@ -1907,8 +1938,8 @@ int index_path(struct object_id *oid, const char *path, struct stat *st, unsigne
if (strbuf_readlink(&sb, path, st->st_size))
return error_errno("readlink(\"%s\")", path);
if (!(flags & HASH_WRITE_OBJECT))
- hash_sha1_file(sb.buf, sb.len, blob_type, oid->hash);
- else if (write_sha1_file(sb.buf, sb.len, blob_type, oid->hash))
+ hash_object_file(sb.buf, sb.len, blob_type, oid);
+ else if (write_object_file(sb.buf, sb.len, blob_type, oid))
rc = error("%s: failed to insert into database", path);
strbuf_release(&sb);
break;
@@ -2093,14 +2124,14 @@ static int check_stream_sha1(git_zstream *stream,
const char *path,
const unsigned char *expected_sha1)
{
- git_SHA_CTX c;
+ git_hash_ctx c;
unsigned char real_sha1[GIT_MAX_RAWSZ];
unsigned char buf[4096];
unsigned long total_read;
int status = Z_OK;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, stream->total_out);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, stream->total_out);
/*
* We already read some bytes into hdr, but the ones up to the NUL
@@ -2119,7 +2150,7 @@ static int check_stream_sha1(git_zstream *stream,
if (size - total_read < stream->avail_out)
stream->avail_out = size - total_read;
status = git_inflate(stream, Z_FINISH);
- git_SHA1_Update(&c, buf, stream->next_out - buf);
+ the_hash_algo->update_fn(&c, buf, stream->next_out - buf);
total_read += stream->next_out - buf;
}
git_inflate_end(stream);
@@ -2134,7 +2165,7 @@ static int check_stream_sha1(git_zstream *stream,
return -1;
}
- git_SHA1_Final(real_sha1, &c);
+ the_hash_algo->final_fn(real_sha1, &c);
if (hashcmp(expected_sha1, real_sha1)) {
error("sha1 mismatch for %s (expected %s)", path,
sha1_to_hex(expected_sha1));
diff --git a/sha1dc_git.h b/sha1dc_git.h
index a8c2729..41e1c3f 100644
--- a/sha1dc_git.h
+++ b/sha1dc_git.h
@@ -1,9 +1,9 @@
/* Plumbing with collition-detecting SHA1 code */
-#ifdef DC_SHA1_SUBMODULE
-#include "sha1collisiondetection/lib/sha1.h"
-#elif defined(DC_SHA1_EXTERNAL)
+#ifdef DC_SHA1_EXTERNAL
#include <sha1dc/sha1.h>
+#elif defined(DC_SHA1_SUBMODULE)
+#include "sha1collisiondetection/lib/sha1.h"
#else
#include "sha1dc/sha1.h"
#endif
diff --git a/split-index.c b/split-index.c
index 7be5f74..3eb8ff1 100644
--- a/split-index.c
+++ b/split-index.c
@@ -238,6 +238,8 @@ void prepare_to_write_split_index(struct index_state *istate)
ALLOC_GROW(entries, nr_entries+1, nr_alloc);
entries[nr_entries++] = ce;
}
+ if (is_null_oid(&ce->oid))
+ istate->drop_cache_tree = 1;
}
}
diff --git a/sub-process.h b/sub-process.h
index 4970199..71b18ad 100644
--- a/sub-process.h
+++ b/sub-process.h
@@ -73,8 +73,8 @@ static inline struct child_process *subprocess_get_child_process(
}
/*
- * Perform the version and capability negotiation as described in the "Long
- * Running Filter Process" section of the gitattributes documentation using the
+ * Perform the version and capability negotiation as described in the
+ * "Handshake" section of long-running-process-protocol.txt using the
* given requested versions and capabilities. The "versions" and "capabilities"
* parameters are arrays terminated by a 0 or blank struct.
*
diff --git a/t/helper/test-dump-untracked-cache.c b/t/helper/test-dump-untracked-cache.c
index f752532..d7c55c2 100644
--- a/t/helper/test-dump-untracked-cache.c
+++ b/t/helper/test-dump-untracked-cache.c
@@ -54,8 +54,8 @@ int cmd_main(int ac, const char **av)
printf("no untracked cache\n");
return 0;
}
- printf("info/exclude %s\n", sha1_to_hex(uc->ss_info_exclude.sha1));
- printf("core.excludesfile %s\n", sha1_to_hex(uc->ss_excludes_file.sha1));
+ printf("info/exclude %s\n", oid_to_hex(&uc->ss_info_exclude.oid));
+ printf("core.excludesfile %s\n", oid_to_hex(&uc->ss_excludes_file.oid));
printf("exclude_per_dir %s\n", uc->exclude_per_dir);
printf("flags %08x\n", uc->dir_flags);
if (uc->root)
diff --git a/t/helper/test-run-command.c b/t/helper/test-run-command.c
index d24d157..153342e 100644
--- a/t/helper/test-run-command.c
+++ b/t/helper/test-run-command.c
@@ -56,6 +56,15 @@ int cmd_main(int argc, const char **argv)
if (argc < 3)
return 1;
+ while (!strcmp(argv[1], "env")) {
+ if (!argv[2])
+ die("env specifier without a value");
+ argv_array_push(&proc.env_array, argv[2]);
+ argv += 2;
+ argc -= 2;
+ }
+ if (argc < 3)
+ return 1;
proc.argv = (const char **)argv + 2;
if (!strcmp(argv[1], "start-command-ENOENT")) {
diff --git a/t/helper/test-wildmatch.c b/t/helper/test-wildmatch.c
index 921d7b3..66d33df 100644
--- a/t/helper/test-wildmatch.c
+++ b/t/helper/test-wildmatch.c
@@ -16,6 +16,8 @@ int cmd_main(int argc, const char **argv)
return !!wildmatch(argv[3], argv[2], WM_PATHNAME | WM_CASEFOLD);
else if (!strcmp(argv[1], "pathmatch"))
return !!wildmatch(argv[3], argv[2], 0);
+ else if (!strcmp(argv[1], "ipathmatch"))
+ return !!wildmatch(argv[3], argv[2], WM_CASEFOLD);
else
return 1;
}
diff --git a/t/lib-git-daemon.sh b/t/lib-git-daemon.sh
index 987d406..edbea2d 100644
--- a/t/lib-git-daemon.sh
+++ b/t/lib-git-daemon.sh
@@ -32,7 +32,8 @@ LIB_GIT_DAEMON_PORT=${LIB_GIT_DAEMON_PORT-${this_test#t}}
GIT_DAEMON_PID=
GIT_DAEMON_DOCUMENT_ROOT_PATH="$PWD"/repo
-GIT_DAEMON_URL=git://127.0.0.1:$LIB_GIT_DAEMON_PORT
+GIT_DAEMON_HOST_PORT=127.0.0.1:$LIB_GIT_DAEMON_PORT
+GIT_DAEMON_URL=git://$GIT_DAEMON_HOST_PORT
start_git_daemon() {
if test -n "$GIT_DAEMON_PID"
@@ -53,11 +54,19 @@ start_git_daemon() {
"$@" "$GIT_DAEMON_DOCUMENT_ROOT_PATH" \
>&3 2>git_daemon_output &
GIT_DAEMON_PID=$!
+ >daemon.log
{
- read line <&7
- echo >&4 "$line"
- cat <&7 >&4 &
- } 7<git_daemon_output &&
+ read -r line <&7
+ printf "%s\n" "$line"
+ printf >&4 "%s\n" "$line"
+ (
+ while read -r line <&7
+ do
+ printf "%s\n" "$line"
+ printf >&4 "%s\n" "$line"
+ done
+ ) &
+ } 7<git_daemon_output >>"$TRASH_DIRECTORY/daemon.log" &&
# Check expected output
if test x"$(expr "$line" : "\[[0-9]*\] \(.*\)")" != x"Ready to rumble"
@@ -90,3 +99,25 @@ stop_git_daemon() {
GIT_DAEMON_PID=
rm -f git_daemon_output
}
+
+# A stripped-down version of a netcat client, that connects to a "host:port"
+# given in $1, sends its stdin followed by EOF, then dumps the response (until
+# EOF) to stdout.
+fake_nc() {
+ if ! test_declared_prereq FAKENC
+ then
+ echo >&4 "fake_nc: need to declare FAKENC prerequisite"
+ return 127
+ fi
+ perl -Mstrict -MIO::Socket::INET -e '
+ my $s = IO::Socket::INET->new(shift)
+ or die "unable to open socket: $!";
+ print $s <STDIN>;
+ $s->shutdown(1);
+ print <$s>;
+ ' "$@"
+}
+
+test_lazy_prereq FAKENC '
+ perl -MIO::Socket::INET -e "exit 0"
+'
diff --git a/t/perf/aggregate.perl b/t/perf/aggregate.perl
index 5c439f6..821cf14 100755
--- a/t/perf/aggregate.perl
+++ b/t/perf/aggregate.perl
@@ -1,6 +1,6 @@
#!/usr/bin/perl
-use lib '../../perl/blib/lib';
+use lib '../../perl/build/lib';
use strict;
use warnings;
use JSON;
@@ -36,7 +36,8 @@ sub format_times {
return $out;
}
-my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests, $codespeed);
+my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests,
+ $codespeed, $subsection, $reponame);
while (scalar @ARGV) {
my $arg = $ARGV[0];
my $dir;
@@ -45,6 +46,24 @@ while (scalar @ARGV) {
shift @ARGV;
next;
}
+ if ($arg eq "--subsection") {
+ shift @ARGV;
+ $subsection = $ARGV[0];
+ shift @ARGV;
+ if (! $subsection) {
+ die "empty subsection";
+ }
+ next;
+ }
+ if ($arg eq "--reponame") {
+ shift @ARGV;
+ $reponame = $ARGV[0];
+ shift @ARGV;
+ if (! $reponame) {
+ die "empty reponame";
+ }
+ next;
+ }
last if -f $arg or $arg eq "--";
if (! -d $arg) {
my $rev = Git::command_oneline(qw(rev-parse --verify), $arg);
@@ -76,10 +95,15 @@ if (not @tests) {
}
my $resultsdir = "test-results";
-my $results_section = "";
-if (exists $ENV{GIT_PERF_SUBSECTION} and $ENV{GIT_PERF_SUBSECTION} ne "") {
- $resultsdir .= "/" . $ENV{GIT_PERF_SUBSECTION};
- $results_section = $ENV{GIT_PERF_SUBSECTION};
+
+if (! $subsection and
+ exists $ENV{GIT_PERF_SUBSECTION} and
+ $ENV{GIT_PERF_SUBSECTION} ne "") {
+ $subsection = $ENV{GIT_PERF_SUBSECTION};
+}
+
+if ($subsection) {
+ $resultsdir .= "/" . $subsection;
}
my @subtests;
@@ -183,19 +207,21 @@ sub print_default_results {
}
sub print_codespeed_results {
- my ($results_section) = @_;
+ my ($subsection) = @_;
my $project = "Git";
my $executable = `uname -s -m`;
chomp $executable;
- if ($results_section ne "") {
- $executable .= ", " . $results_section;
+ if ($subsection) {
+ $executable .= ", " . $subsection;
}
my $environment;
- if (exists $ENV{GIT_PERF_REPO_NAME} and $ENV{GIT_PERF_REPO_NAME} ne "") {
+ if ($reponame) {
+ $environment = $reponame;
+ } elsif (exists $ENV{GIT_PERF_REPO_NAME} and $ENV{GIT_PERF_REPO_NAME} ne "") {
$environment = $ENV{GIT_PERF_REPO_NAME};
} elsif (exists $ENV{GIT_TEST_INSTALLED} and $ENV{GIT_TEST_INSTALLED} ne "") {
$environment = $ENV{GIT_TEST_INSTALLED};
@@ -227,13 +253,13 @@ sub print_codespeed_results {
}
}
- print to_json(\@data, {utf8 => 1, pretty => 1}), "\n";
+ print to_json(\@data, {utf8 => 1, pretty => 1, canonical => 1}), "\n";
}
binmode STDOUT, ":utf8" or die "PANIC on binmode: $!";
if ($codespeed) {
- print_codespeed_results($results_section);
+ print_codespeed_results($subsection);
} else {
print_default_results();
}
diff --git a/t/t0061-run-command.sh b/t/t0061-run-command.sh
index e473917..24c92b6 100755
--- a/t/t0061-run-command.sh
+++ b/t/t0061-run-command.sh
@@ -141,4 +141,41 @@ test_expect_success 'run_command outputs ' '
test_cmp expect actual
'
+test_trace () {
+ expect="$1"
+ shift
+ GIT_TRACE=1 test-run-command "$@" run-command true 2>&1 >/dev/null | \
+ sed 's/.* run_command: //' >actual &&
+ echo "$expect true" >expect &&
+ test_cmp expect actual
+}
+
+test_expect_success 'GIT_TRACE with environment variables' '
+ test_trace "abc=1 def=2" env abc=1 env def=2 &&
+ test_trace "abc=2" env abc env abc=1 env abc=2 &&
+ test_trace "abc=2" env abc env abc=2 &&
+ (
+ abc=1 && export abc &&
+ test_trace "def=1" env abc=1 env def=1
+ ) &&
+ (
+ abc=1 && export abc &&
+ test_trace "def=1" env abc env abc=1 env def=1
+ ) &&
+ test_trace "def=1" env non-exist env def=1 &&
+ test_trace "abc=2" env abc=1 env abc env abc=2 &&
+ (
+ abc=1 def=2 && export abc def &&
+ test_trace "unset abc def;" env abc env def
+ ) &&
+ (
+ abc=1 def=2 && export abc def &&
+ test_trace "unset def; abc=3" env abc env def env abc=3
+ ) &&
+ (
+ abc=1 && export abc &&
+ test_trace "unset abc;" env abc=2 env abc
+ )
+'
+
test_done
diff --git a/t/t0410-partial-clone.sh b/t/t0410-partial-clone.sh
new file mode 100755
index 0000000..cc18b75
--- /dev/null
+++ b/t/t0410-partial-clone.sh
@@ -0,0 +1,343 @@
+#!/bin/sh
+
+test_description='partial clone'
+
+. ./test-lib.sh
+
+delete_object () {
+ rm $1/.git/objects/$(echo $2 | sed -e 's|^..|&/|')
+}
+
+pack_as_from_promisor () {
+ HASH=$(git -C repo pack-objects .git/objects/pack/pack) &&
+ >repo/.git/objects/pack/pack-$HASH.promisor &&
+ echo $HASH
+}
+
+promise_and_delete () {
+ HASH=$(git -C repo rev-parse "$1") &&
+ git -C repo tag -a -m message my_annotated_tag "$HASH" &&
+ git -C repo rev-parse my_annotated_tag | pack_as_from_promisor &&
+ # tag -d prints a message to stdout, so redirect it
+ git -C repo tag -d my_annotated_tag >/dev/null &&
+ delete_object repo "$HASH"
+}
+
+test_expect_success 'missing reflog object, but promised by a commit, passes fsck' '
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+ C=$(git -C repo commit-tree -m c -p $A HEAD^{tree}) &&
+
+ # Reference $A only from reflog, and delete it
+ git -C repo branch my_branch "$A" &&
+ git -C repo branch -f my_branch my_commit &&
+ delete_object repo "$A" &&
+
+ # State that we got $C, which refers to $A, from promisor
+ printf "$C\n" | pack_as_from_promisor &&
+
+ # Normally, it fails
+ test_must_fail git -C repo fsck &&
+
+ # But with the extension, it succeeds
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo fsck
+'
+
+test_expect_success 'missing reflog object, but promised by a tag, passes fsck' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+ git -C repo tag -a -m d my_tag_name $A &&
+ T=$(git -C repo rev-parse my_tag_name) &&
+ git -C repo tag -d my_tag_name &&
+
+ # Reference $A only from reflog, and delete it
+ git -C repo branch my_branch "$A" &&
+ git -C repo branch -f my_branch my_commit &&
+ delete_object repo "$A" &&
+
+ # State that we got $T, which refers to $A, from promisor
+ printf "$T\n" | pack_as_from_promisor &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo fsck
+'
+
+test_expect_success 'missing reflog object alone fails fsck, even with extension set' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+ B=$(git -C repo commit-tree -m b HEAD^{tree}) &&
+
+ # Reference $A only from reflog, and delete it
+ git -C repo branch my_branch "$A" &&
+ git -C repo branch -f my_branch my_commit &&
+ delete_object repo "$A" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ test_must_fail git -C repo fsck
+'
+
+test_expect_success 'missing ref object, but promised, passes fsck' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+
+ # Reference $A only from ref
+ git -C repo branch my_branch "$A" &&
+ promise_and_delete "$A" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo fsck
+'
+
+test_expect_success 'missing object, but promised, passes fsck' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo 1 &&
+ test_commit -C repo 2 &&
+ test_commit -C repo 3 &&
+ git -C repo tag -a annotated_tag -m "annotated tag" &&
+
+ C=$(git -C repo rev-parse 1) &&
+ T=$(git -C repo rev-parse 2^{tree}) &&
+ B=$(git hash-object repo/3.t) &&
+ AT=$(git -C repo rev-parse annotated_tag) &&
+
+ promise_and_delete "$C" &&
+ promise_and_delete "$T" &&
+ promise_and_delete "$B" &&
+ promise_and_delete "$AT" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo fsck
+'
+
+test_expect_success 'missing CLI object, but promised, passes fsck' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+ promise_and_delete "$A" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo fsck "$A"
+'
+
+test_expect_success 'fetching of missing objects' '
+ rm -rf repo &&
+ test_create_repo server &&
+ test_commit -C server foo &&
+ git -C server repack -a -d --write-bitmap-index &&
+
+ git clone "file://$(pwd)/server" repo &&
+ HASH=$(git -C repo rev-parse foo) &&
+ rm -rf repo/.git/objects/* &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "origin" &&
+ git -C repo cat-file -p "$HASH" &&
+
+ # Ensure that the .promisor file is written, and check that its
+ # associated packfile contains the object
+ ls repo/.git/objects/pack/pack-*.promisor >promisorlist &&
+ test_line_count = 1 promisorlist &&
+ IDX=$(cat promisorlist | sed "s/promisor$/idx/") &&
+ git verify-pack --verbose "$IDX" | grep "$HASH"
+'
+
+test_expect_success 'rev-list stops traversal at missing and promised commit' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo foo &&
+ test_commit -C repo bar &&
+
+ FOO=$(git -C repo rev-parse foo) &&
+ promise_and_delete "$FOO" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo rev-list --exclude-promisor-objects --objects bar >out &&
+ grep $(git -C repo rev-parse bar) out &&
+ ! grep $FOO out
+'
+
+test_expect_success 'rev-list stops traversal at missing and promised tree' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo foo &&
+ mkdir repo/a_dir &&
+ echo something >repo/a_dir/something &&
+ git -C repo add a_dir/something &&
+ git -C repo commit -m bar &&
+
+ # foo^{tree} (tree referenced from commit)
+ TREE=$(git -C repo rev-parse foo^{tree}) &&
+
+ # a tree referenced by HEAD^{tree} (tree referenced from tree)
+ TREE2=$(git -C repo ls-tree HEAD^{tree} | grep " tree " | head -1 | cut -b13-52) &&
+
+ promise_and_delete "$TREE" &&
+ promise_and_delete "$TREE2" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo rev-list --exclude-promisor-objects --objects HEAD >out &&
+ grep $(git -C repo rev-parse foo) out &&
+ ! grep $TREE out &&
+ grep $(git -C repo rev-parse HEAD) out &&
+ ! grep $TREE2 out
+'
+
+test_expect_success 'rev-list stops traversal at missing and promised blob' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ echo something >repo/something &&
+ git -C repo add something &&
+ git -C repo commit -m foo &&
+
+ BLOB=$(git -C repo hash-object -w something) &&
+ promise_and_delete "$BLOB" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo rev-list --exclude-promisor-objects --objects HEAD >out &&
+ grep $(git -C repo rev-parse HEAD) out &&
+ ! grep $BLOB out
+'
+
+test_expect_success 'rev-list stops traversal at promisor commit, tree, and blob' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo foo &&
+ test_commit -C repo bar &&
+ test_commit -C repo baz &&
+
+ COMMIT=$(git -C repo rev-parse foo) &&
+ TREE=$(git -C repo rev-parse bar^{tree}) &&
+ BLOB=$(git hash-object repo/baz.t) &&
+ printf "%s\n%s\n%s\n" $COMMIT $TREE $BLOB | pack_as_from_promisor &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo rev-list --exclude-promisor-objects --objects HEAD >out &&
+ ! grep $COMMIT out &&
+ ! grep $TREE out &&
+ ! grep $BLOB out &&
+ grep $(git -C repo rev-parse bar) out # sanity check that some walking was done
+'
+
+test_expect_success 'rev-list accepts missing and promised objects on command line' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo foo &&
+ test_commit -C repo bar &&
+ test_commit -C repo baz &&
+
+ COMMIT=$(git -C repo rev-parse foo) &&
+ TREE=$(git -C repo rev-parse bar^{tree}) &&
+ BLOB=$(git hash-object repo/baz.t) &&
+
+ promise_and_delete $COMMIT &&
+ promise_and_delete $TREE &&
+ promise_and_delete $BLOB &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo rev-list --exclude-promisor-objects --objects "$COMMIT" "$TREE" "$BLOB"
+'
+
+test_expect_success 'gc does not repack promisor objects' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ TREE_HASH=$(git -C repo rev-parse HEAD^{tree}) &&
+ HASH=$(printf "$TREE_HASH\n" | pack_as_from_promisor) &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo gc &&
+
+ # Ensure that the promisor packfile still exists, and remove it
+ test -e repo/.git/objects/pack/pack-$HASH.pack &&
+ rm repo/.git/objects/pack/pack-$HASH.* &&
+
+ # Ensure that the single other pack contains the commit, but not the tree
+ ls repo/.git/objects/pack/pack-*.pack >packlist &&
+ test_line_count = 1 packlist &&
+ git verify-pack repo/.git/objects/pack/pack-*.pack -v >out &&
+ grep "$(git -C repo rev-parse HEAD)" out &&
+ ! grep "$TREE_HASH" out
+'
+
+test_expect_success 'gc stops traversal when a missing but promised object is reached' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ TREE_HASH=$(git -C repo rev-parse HEAD^{tree}) &&
+ HASH=$(promise_and_delete $TREE_HASH) &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo gc &&
+
+ # Ensure that the promisor packfile still exists, and remove it
+ test -e repo/.git/objects/pack/pack-$HASH.pack &&
+ rm repo/.git/objects/pack/pack-$HASH.* &&
+
+ # Ensure that the single other pack contains the commit, but not the tree
+ ls repo/.git/objects/pack/pack-*.pack >packlist &&
+ test_line_count = 1 packlist &&
+ git verify-pack repo/.git/objects/pack/pack-*.pack -v >out &&
+ grep "$(git -C repo rev-parse HEAD)" out &&
+ ! grep "$TREE_HASH" out
+'
+
+LIB_HTTPD_PORT=12345 # default port, 410, cannot be used as non-root
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success 'fetching of missing objects from an HTTP server' '
+ rm -rf repo &&
+ SERVER="$HTTPD_DOCUMENT_ROOT_PATH/server" &&
+ test_create_repo "$SERVER" &&
+ test_commit -C "$SERVER" foo &&
+ git -C "$SERVER" repack -a -d --write-bitmap-index &&
+
+ git clone $HTTPD_URL/smart/server repo &&
+ HASH=$(git -C repo rev-parse foo) &&
+ rm -rf repo/.git/objects/* &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "origin" &&
+ git -C repo cat-file -p "$HASH" &&
+
+ # Ensure that the .promisor file is written, and check that its
+ # associated packfile contains the object
+ ls repo/.git/objects/pack/pack-*.promisor >promisorlist &&
+ test_line_count = 1 promisorlist &&
+ IDX=$(cat promisorlist | sed "s/promisor$/idx/") &&
+ git verify-pack --verbose "$IDX" | grep "$HASH"
+'
+
+stop_httpd
+
+test_done
diff --git a/t/t1700-split-index.sh b/t/t1700-split-index.sh
index af9b847..a66936f 100755
--- a/t/t1700-split-index.sh
+++ b/t/t1700-split-index.sh
@@ -401,4 +401,42 @@ done <<\EOF
0642 -rw-r---w-
EOF
+test_expect_success POSIXPERM,SANITY 'graceful handling when splitting index is not allowed' '
+ test_create_repo ro &&
+ (
+ cd ro &&
+ test_commit initial &&
+ git update-index --split-index &&
+ test -f .git/sharedindex.*
+ ) &&
+ cp ro/.git/index new-index &&
+ test_when_finished "chmod u+w ro/.git" &&
+ chmod u-w ro/.git &&
+ GIT_INDEX_FILE="$(pwd)/new-index" git -C ro update-index --split-index &&
+ chmod u+w ro/.git &&
+ rm ro/.git/sharedindex.* &&
+ GIT_INDEX_FILE=new-index git ls-files >actual &&
+ echo initial.t >expected &&
+ test_cmp expected actual
+'
+
+test_expect_success 'writing split index with null sha1 does not write cache tree' '
+ git config core.splitIndex true &&
+ git config splitIndex.maxPercentChange 0 &&
+ git commit -m "commit" &&
+ {
+ git ls-tree HEAD &&
+ printf "160000 commit $_z40\\tbroken\\n"
+ } >broken-tree &&
+ echo "add broken entry" >msg &&
+
+ tree=$(git mktree <broken-tree) &&
+ test_tick &&
+ commit=$(git commit-tree $tree -p HEAD <msg) &&
+ git update-ref HEAD "$commit" &&
+ GIT_ALLOW_NULL_SHA1=1 git reset --hard &&
+ (test-dump-cache-tree >cache-tree.out || true) &&
+ test_line_count = 0 cache-tree.out
+'
+
test_done
diff --git a/t/t3070-wildmatch.sh b/t/t3070-wildmatch.sh
index 163a14a..c1fc6ca 100755
--- a/t/t3070-wildmatch.sh
+++ b/t/t3070-wildmatch.sh
@@ -4,266 +4,431 @@ test_description='wildmatch tests'
. ./test-lib.sh
-match() {
- if [ $1 = 1 ]; then
- test_expect_success "wildmatch: match '$3' '$4'" "
- test-wildmatch wildmatch '$3' '$4'
- "
- else
- test_expect_success "wildmatch: no match '$3' '$4'" "
- ! test-wildmatch wildmatch '$3' '$4'
- "
- fi
+should_create_test_file() {
+ file=$1
+
+ case $file in
+ # `touch .` will succeed but obviously not do what we intend
+ # here.
+ ".")
+ return 1
+ ;;
+ # We cannot create a file with an empty filename.
+ "")
+ return 1
+ ;;
+ # The tests that are testing that e.g. foo//bar is matched by
+ # foo/*/bar can't be tested on filesystems since there's no
+ # way we're getting a double slash.
+ *//*)
+ return 1
+ ;;
+ # When testing the difference between foo/bar and foo/bar/ we
+ # can't test the latter.
+ */)
+ return 1
+ ;;
+ # On Windows, \ in paths is silently converted to /, which
+ # would result in the "touch" below working, but the test
+ # itself failing. See 6fd1106aa4 ("t3700: Skip a test with
+ # backslashes in pathspec", 2009-03-13) for prior art and
+ # details.
+ *\\*)
+ if ! test_have_prereq BSLASHPSPEC
+ then
+ return 1
+ fi
+ # NOTE: The ;;& bash extension is not portable, so
+ # this test needs to be at the end of the pattern
+ # list.
+ #
+ # If we want to add more conditional returns we either
+ # need a new case statement, or turn this whole thing
+ # into a series of "if" tests.
+ ;;
+ esac
+
+
+ # On Windows proper (i.e. not Cygwin) many file names which
+ # under Cygwin would be emulated don't work.
+ if test_have_prereq MINGW
+ then
+ case $file in
+ " ")
+ # Files called " " are forbidden on Windows
+ return 1
+ ;;
+ *\<*|*\>*|*:*|*\"*|*\|*|*\?*|*\**)
+ # Files with various special characters aren't
+ # allowed on Windows. Sourced from
+ # https://stackoverflow.com/a/31976060
+ return 1
+ ;;
+ esac
+ fi
+
+ return 0
}
-imatch() {
- if [ $1 = 1 ]; then
- test_expect_success "iwildmatch: match '$2' '$3'" "
- test-wildmatch iwildmatch '$2' '$3'
- "
- else
- test_expect_success "iwildmatch: no match '$2' '$3'" "
- ! test-wildmatch iwildmatch '$2' '$3'
- "
- fi
+match_with_function() {
+ text=$1
+ pattern=$2
+ match_expect=$3
+ match_function=$4
+
+ if test "$match_expect" = 1
+ then
+ test_expect_success "$match_function: match '$text' '$pattern'" "
+ test-wildmatch $match_function '$text' '$pattern'
+ "
+ elif test "$match_expect" = 0
+ then
+ test_expect_success "$match_function: no match '$text' '$pattern'" "
+ test_must_fail test-wildmatch $match_function '$text' '$pattern'
+ "
+ else
+ test_expect_success "PANIC: Test framework error. Unknown matches value $match_expect" 'false'
+ fi
+
+}
+
+match_with_ls_files() {
+ text=$1
+ pattern=$2
+ match_expect=$3
+ match_function=$4
+ ls_files_args=$5
+
+ match_stdout_stderr_cmp="
+ tr -d '\0' <actual.raw >actual &&
+ >expect.err &&
+ test_cmp expect.err actual.err &&
+ test_cmp expect actual"
+
+ if test "$match_expect" = 'E'
+ then
+ if test -e .git/created_test_file
+ then
+ test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match dies on '$pattern' '$text'" "
+ printf '%s' '$text' >expect &&
+ test_must_fail git$ls_files_args ls-files -z -- '$pattern'
+ "
+ else
+ test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match skip '$pattern' '$text'" 'false'
+ fi
+ elif test "$match_expect" = 1
+ then
+ if test -e .git/created_test_file
+ then
+ test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match '$pattern' '$text'" "
+ printf '%s' '$text' >expect &&
+ git$ls_files_args ls-files -z -- '$pattern' >actual.raw 2>actual.err &&
+ $match_stdout_stderr_cmp
+ "
+ else
+ test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match skip '$pattern' '$text'" 'false'
+ fi
+ elif test "$match_expect" = 0
+ then
+ if test -e .git/created_test_file
+ then
+ test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): no match '$pattern' '$text'" "
+ >expect &&
+ git$ls_files_args ls-files -z -- '$pattern' >actual.raw 2>actual.err &&
+ $match_stdout_stderr_cmp
+ "
+ else
+ test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): no match skip '$pattern' '$text'" 'false'
+ fi
+ else
+ test_expect_success "PANIC: Test framework error. Unknown matches value $match_expect" 'false'
+ fi
}
-pathmatch() {
- if [ $1 = 1 ]; then
- test_expect_success "pathmatch: match '$2' '$3'" "
- test-wildmatch pathmatch '$2' '$3'
- "
- else
- test_expect_success "pathmatch: no match '$2' '$3'" "
- ! test-wildmatch pathmatch '$2' '$3'
- "
- fi
+match() {
+ if test "$#" = 6
+ then
+ # When test-wildmatch and git ls-files produce the same
+ # result.
+ match_glob=$1
+ match_file_glob=$match_glob
+ match_iglob=$2
+ match_file_iglob=$match_iglob
+ match_pathmatch=$3
+ match_file_pathmatch=$match_pathmatch
+ match_pathmatchi=$4
+ match_file_pathmatchi=$match_pathmatchi
+ text=$5
+ pattern=$6
+ elif test "$#" = 10
+ then
+ match_glob=$1
+ match_iglob=$2
+ match_pathmatch=$3
+ match_pathmatchi=$4
+ match_file_glob=$5
+ match_file_iglob=$6
+ match_file_pathmatch=$7
+ match_file_pathmatchi=$8
+ text=$9
+ pattern=${10}
+ fi
+
+ test_expect_success EXPENSIVE_ON_WINDOWS 'cleanup after previous file test' '
+ if test -e .git/created_test_file
+ then
+ git reset &&
+ git clean -df
+ fi
+ '
+
+ printf '%s' "$text" >.git/expected_test_file
+
+ test_expect_success EXPENSIVE_ON_WINDOWS "setup match file test for $text" '
+ file=$(cat .git/expected_test_file) &&
+ if should_create_test_file "$file"
+ then
+ dirs=${file%/*}
+ if test "$file" != "$dirs"
+ then
+ mkdir -p -- "$dirs" &&
+ touch -- "./$text"
+ else
+ touch -- "./$file"
+ fi &&
+ git add -A &&
+ printf "%s" "$file" >.git/created_test_file
+ elif test -e .git/created_test_file
+ then
+ rm .git/created_test_file
+ fi
+ '
+
+ # $1: Case sensitive glob match: test-wildmatch & ls-files
+ match_with_function "$text" "$pattern" $match_glob "wildmatch"
+ match_with_ls_files "$text" "$pattern" $match_file_glob "wildmatch" " --glob-pathspecs"
+
+ # $2: Case insensitive glob match: test-wildmatch & ls-files
+ match_with_function "$text" "$pattern" $match_iglob "iwildmatch"
+ match_with_ls_files "$text" "$pattern" $match_file_iglob "iwildmatch" " --glob-pathspecs --icase-pathspecs"
+
+ # $3: Case sensitive path match: test-wildmatch & ls-files
+ match_with_function "$text" "$pattern" $match_pathmatch "pathmatch"
+ match_with_ls_files "$text" "$pattern" $match_file_pathmatch "pathmatch" ""
+
+ # $4: Case insensitive path match: test-wildmatch & ls-files
+ match_with_function "$text" "$pattern" $match_pathmatchi "ipathmatch"
+ match_with_ls_files "$text" "$pattern" $match_file_pathmatchi "ipathmatch" " --icase-pathspecs"
}
-# Basic wildmat features
-match 1 1 foo foo
-match 0 0 foo bar
-match 1 1 '' ""
-match 1 1 foo '???'
-match 0 0 foo '??'
-match 1 1 foo '*'
-match 1 1 foo 'f*'
-match 0 0 foo '*f'
-match 1 1 foo '*foo*'
-match 1 1 foobar '*ob*a*r*'
-match 1 1 aaaaaaabababab '*ab'
-match 1 1 'foo*' 'foo\*'
-match 0 0 foobar 'foo\*bar'
-match 1 1 'f\oo' 'f\\oo'
-match 1 1 ball '*[al]?'
-match 0 0 ten '[ten]'
-match 0 1 ten '**[!te]'
-match 0 0 ten '**[!ten]'
-match 1 1 ten 't[a-g]n'
-match 0 0 ten 't[!a-g]n'
-match 1 1 ton 't[!a-g]n'
-match 1 1 ton 't[^a-g]n'
-match 1 x 'a]b' 'a[]]b'
-match 1 x a-b 'a[]-]b'
-match 1 x 'a]b' 'a[]-]b'
-match 0 x aab 'a[]-]b'
-match 1 x aab 'a[]a-]b'
-match 1 1 ']' ']'
+# Basic wildmatch features
+match 1 1 1 1 foo foo
+match 0 0 0 0 foo bar
+match 1 1 1 1 '' ""
+match 1 1 1 1 foo '???'
+match 0 0 0 0 foo '??'
+match 1 1 1 1 foo '*'
+match 1 1 1 1 foo 'f*'
+match 0 0 0 0 foo '*f'
+match 1 1 1 1 foo '*foo*'
+match 1 1 1 1 foobar '*ob*a*r*'
+match 1 1 1 1 aaaaaaabababab '*ab'
+match 1 1 1 1 'foo*' 'foo\*'
+match 0 0 0 0 foobar 'foo\*bar'
+match 1 1 1 1 'f\oo' 'f\\oo'
+match 1 1 1 1 ball '*[al]?'
+match 0 0 0 0 ten '[ten]'
+match 0 0 1 1 ten '**[!te]'
+match 0 0 0 0 ten '**[!ten]'
+match 1 1 1 1 ten 't[a-g]n'
+match 0 0 0 0 ten 't[!a-g]n'
+match 1 1 1 1 ton 't[!a-g]n'
+match 1 1 1 1 ton 't[^a-g]n'
+match 1 1 1 1 'a]b' 'a[]]b'
+match 1 1 1 1 a-b 'a[]-]b'
+match 1 1 1 1 'a]b' 'a[]-]b'
+match 0 0 0 0 aab 'a[]-]b'
+match 1 1 1 1 aab 'a[]a-]b'
+match 1 1 1 1 ']' ']'
# Extended slash-matching features
-match 0 0 'foo/baz/bar' 'foo*bar'
-match 0 0 'foo/baz/bar' 'foo**bar'
-match 0 1 'foobazbar' 'foo**bar'
-match 1 1 'foo/baz/bar' 'foo/**/bar'
-match 1 0 'foo/baz/bar' 'foo/**/**/bar'
-match 1 0 'foo/b/a/z/bar' 'foo/**/bar'
-match 1 0 'foo/b/a/z/bar' 'foo/**/**/bar'
-match 1 0 'foo/bar' 'foo/**/bar'
-match 1 0 'foo/bar' 'foo/**/**/bar'
-match 0 0 'foo/bar' 'foo?bar'
-match 0 0 'foo/bar' 'foo[/]bar'
-match 0 0 'foo/bar' 'foo[^a-z]bar'
-match 0 0 'foo/bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
-match 1 1 'foo-bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
-match 1 0 'foo' '**/foo'
-match 1 x 'XXX/foo' '**/foo'
-match 1 0 'bar/baz/foo' '**/foo'
-match 0 0 'bar/baz/foo' '*/foo'
-match 0 0 'foo/bar/baz' '**/bar*'
-match 1 0 'deep/foo/bar/baz' '**/bar/*'
-match 0 0 'deep/foo/bar/baz/' '**/bar/*'
-match 1 0 'deep/foo/bar/baz/' '**/bar/**'
-match 0 0 'deep/foo/bar' '**/bar/*'
-match 1 0 'deep/foo/bar/' '**/bar/**'
-match 0 0 'foo/bar/baz' '**/bar**'
-match 1 0 'foo/bar/baz/x' '*/bar/**'
-match 0 0 'deep/foo/bar/baz/x' '*/bar/**'
-match 1 0 'deep/foo/bar/baz/x' '**/bar/*/*'
+match 0 0 1 1 'foo/baz/bar' 'foo*bar'
+match 0 0 1 1 'foo/baz/bar' 'foo**bar'
+match 0 0 1 1 'foobazbar' 'foo**bar'
+match 1 1 1 1 'foo/baz/bar' 'foo/**/bar'
+match 1 1 0 0 'foo/baz/bar' 'foo/**/**/bar'
+match 1 1 1 1 'foo/b/a/z/bar' 'foo/**/bar'
+match 1 1 1 1 'foo/b/a/z/bar' 'foo/**/**/bar'
+match 1 1 0 0 'foo/bar' 'foo/**/bar'
+match 1 1 0 0 'foo/bar' 'foo/**/**/bar'
+match 0 0 1 1 'foo/bar' 'foo?bar'
+match 0 0 1 1 'foo/bar' 'foo[/]bar'
+match 0 0 1 1 'foo/bar' 'foo[^a-z]bar'
+match 0 0 1 1 'foo/bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
+match 1 1 1 1 'foo-bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
+match 1 1 0 0 'foo' '**/foo'
+match 1 1 1 1 'XXX/foo' '**/foo'
+match 1 1 1 1 'bar/baz/foo' '**/foo'
+match 0 0 1 1 'bar/baz/foo' '*/foo'
+match 0 0 1 1 'foo/bar/baz' '**/bar*'
+match 1 1 1 1 'deep/foo/bar/baz' '**/bar/*'
+match 0 0 1 1 'deep/foo/bar/baz/' '**/bar/*'
+match 1 1 1 1 'deep/foo/bar/baz/' '**/bar/**'
+match 0 0 0 0 'deep/foo/bar' '**/bar/*'
+match 1 1 1 1 'deep/foo/bar/' '**/bar/**'
+match 0 0 1 1 'foo/bar/baz' '**/bar**'
+match 1 1 1 1 'foo/bar/baz/x' '*/bar/**'
+match 0 0 1 1 'deep/foo/bar/baz/x' '*/bar/**'
+match 1 1 1 1 'deep/foo/bar/baz/x' '**/bar/*/*'
# Various additional tests
-match 0 0 'acrt' 'a[c-c]st'
-match 1 1 'acrt' 'a[c-c]rt'
-match 0 0 ']' '[!]-]'
-match 1 x 'a' '[!]-]'
-match 0 0 '' '\'
-match 0 x '\' '\'
-match 0 x 'XXX/\' '*/\'
-match 1 x 'XXX/\' '*/\\'
-match 1 1 'foo' 'foo'
-match 1 1 '@foo' '@foo'
-match 0 0 'foo' '@foo'
-match 1 1 '[ab]' '\[ab]'
-match 1 1 '[ab]' '[[]ab]'
-match 1 x '[ab]' '[[:]ab]'
-match 0 x '[ab]' '[[::]ab]'
-match 1 x '[ab]' '[[:digit]ab]'
-match 1 x '[ab]' '[\[:]ab]'
-match 1 1 '?a?b' '\??\?b'
-match 1 1 'abc' '\a\b\c'
-match 0 0 'foo' ''
-match 1 0 'foo/bar/baz/to' '**/t[o]'
+match 0 0 0 0 'acrt' 'a[c-c]st'
+match 1 1 1 1 'acrt' 'a[c-c]rt'
+match 0 0 0 0 ']' '[!]-]'
+match 1 1 1 1 'a' '[!]-]'
+match 0 0 0 0 '' '\'
+match 0 0 0 0 \
+ 1 1 1 1 '\' '\'
+match 0 0 0 0 'XXX/\' '*/\'
+match 1 1 1 1 'XXX/\' '*/\\'
+match 1 1 1 1 'foo' 'foo'
+match 1 1 1 1 '@foo' '@foo'
+match 0 0 0 0 'foo' '@foo'
+match 1 1 1 1 '[ab]' '\[ab]'
+match 1 1 1 1 '[ab]' '[[]ab]'
+match 1 1 1 1 '[ab]' '[[:]ab]'
+match 0 0 0 0 '[ab]' '[[::]ab]'
+match 1 1 1 1 '[ab]' '[[:digit]ab]'
+match 1 1 1 1 '[ab]' '[\[:]ab]'
+match 1 1 1 1 '?a?b' '\??\?b'
+match 1 1 1 1 'abc' '\a\b\c'
+match 0 0 0 0 \
+ E E E E 'foo' ''
+match 1 1 1 1 'foo/bar/baz/to' '**/t[o]'
# Character class tests
-match 1 x 'a1B' '[[:alpha:]][[:digit:]][[:upper:]]'
-match 0 x 'a' '[[:digit:][:upper:][:space:]]'
-match 1 x 'A' '[[:digit:][:upper:][:space:]]'
-match 1 x '1' '[[:digit:][:upper:][:space:]]'
-match 0 x '1' '[[:digit:][:upper:][:spaci:]]'
-match 1 x ' ' '[[:digit:][:upper:][:space:]]'
-match 0 x '.' '[[:digit:][:upper:][:space:]]'
-match 1 x '.' '[[:digit:][:punct:][:space:]]'
-match 1 x '5' '[[:xdigit:]]'
-match 1 x 'f' '[[:xdigit:]]'
-match 1 x 'D' '[[:xdigit:]]'
-match 1 x '_' '[[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:graph:][:lower:][:print:][:punct:][:space:][:upper:][:xdigit:]]'
-match 1 x '.' '[^[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:lower:][:space:][:upper:][:xdigit:]]'
-match 1 x '5' '[a-c[:digit:]x-z]'
-match 1 x 'b' '[a-c[:digit:]x-z]'
-match 1 x 'y' '[a-c[:digit:]x-z]'
-match 0 x 'q' '[a-c[:digit:]x-z]'
-
-# Additional tests, including some malformed wildmats
-match 1 x ']' '[\\-^]'
-match 0 0 '[' '[\\-^]'
-match 1 x '-' '[\-_]'
-match 1 x ']' '[\]]'
-match 0 0 '\]' '[\]]'
-match 0 0 '\' '[\]]'
-match 0 0 'ab' 'a[]b'
-match 0 x 'a[]b' 'a[]b'
-match 0 x 'ab[' 'ab['
-match 0 0 'ab' '[!'
-match 0 0 'ab' '[-'
-match 1 1 '-' '[-]'
-match 0 0 '-' '[a-'
-match 0 0 '-' '[!a-'
-match 1 x '-' '[--A]'
-match 1 x '5' '[--A]'
-match 1 1 ' ' '[ --]'
-match 1 1 '$' '[ --]'
-match 1 1 '-' '[ --]'
-match 0 0 '0' '[ --]'
-match 1 x '-' '[---]'
-match 1 x '-' '[------]'
-match 0 0 'j' '[a-e-n]'
-match 1 x '-' '[a-e-n]'
-match 1 x 'a' '[!------]'
-match 0 0 '[' '[]-a]'
-match 1 x '^' '[]-a]'
-match 0 0 '^' '[!]-a]'
-match 1 x '[' '[!]-a]'
-match 1 1 '^' '[a^bc]'
-match 1 x '-b]' '[a-]b]'
-match 0 0 '\' '[\]'
-match 1 1 '\' '[\\]'
-match 0 0 '\' '[!\\]'
-match 1 1 'G' '[A-\\]'
-match 0 0 'aaabbb' 'b*a'
-match 0 0 'aabcaa' '*ba*'
-match 1 1 ',' '[,]'
-match 1 1 ',' '[\\,]'
-match 1 1 '\' '[\\,]'
-match 1 1 '-' '[,-.]'
-match 0 0 '+' '[,-.]'
-match 0 0 '-.]' '[,-.]'
-match 1 1 '2' '[\1-\3]'
-match 1 1 '3' '[\1-\3]'
-match 0 0 '4' '[\1-\3]'
-match 1 1 '\' '[[-\]]'
-match 1 1 '[' '[[-\]]'
-match 1 1 ']' '[[-\]]'
-match 0 0 '-' '[[-\]]'
+match 1 1 1 1 'a1B' '[[:alpha:]][[:digit:]][[:upper:]]'
+match 0 1 0 1 'a' '[[:digit:][:upper:][:space:]]'
+match 1 1 1 1 'A' '[[:digit:][:upper:][:space:]]'
+match 1 1 1 1 '1' '[[:digit:][:upper:][:space:]]'
+match 0 0 0 0 '1' '[[:digit:][:upper:][:spaci:]]'
+match 1 1 1 1 ' ' '[[:digit:][:upper:][:space:]]'
+match 0 0 0 0 '.' '[[:digit:][:upper:][:space:]]'
+match 1 1 1 1 '.' '[[:digit:][:punct:][:space:]]'
+match 1 1 1 1 '5' '[[:xdigit:]]'
+match 1 1 1 1 'f' '[[:xdigit:]]'
+match 1 1 1 1 'D' '[[:xdigit:]]'
+match 1 1 1 1 '_' '[[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:graph:][:lower:][:print:][:punct:][:space:][:upper:][:xdigit:]]'
+match 1 1 1 1 '.' '[^[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:lower:][:space:][:upper:][:xdigit:]]'
+match 1 1 1 1 '5' '[a-c[:digit:]x-z]'
+match 1 1 1 1 'b' '[a-c[:digit:]x-z]'
+match 1 1 1 1 'y' '[a-c[:digit:]x-z]'
+match 0 0 0 0 'q' '[a-c[:digit:]x-z]'
-# Test recursion and the abort code (use "wildtest -i" to see iteration counts)
-match 1 1 '-adobe-courier-bold-o-normal--12-120-75-75-m-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
-match 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-X-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
-match 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-/-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
-match 1 1 'XXX/adobe/courier/bold/o/normal//12/120/75/75/m/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
-match 0 0 'XXX/adobe/courier/bold/o/normal//12/120/75/75/X/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
-match 1 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txt' '**/*a*b*g*n*t'
-match 0 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txtz' '**/*a*b*g*n*t'
-match 0 x foo '*/*/*'
-match 0 x foo/bar '*/*/*'
-match 1 x foo/bba/arr '*/*/*'
-match 0 x foo/bb/aa/rr '*/*/*'
-match 1 x foo/bb/aa/rr '**/**/**'
-match 1 x abcXdefXghi '*X*i'
-match 0 x ab/cXd/efXg/hi '*X*i'
-match 1 x ab/cXd/efXg/hi '*/*X*/*/*i'
-match 1 x ab/cXd/efXg/hi '**/*X*/**/*i'
+# Additional tests, including some malformed wildmatch patterns
+match 1 1 1 1 ']' '[\\-^]'
+match 0 0 0 0 '[' '[\\-^]'
+match 1 1 1 1 '-' '[\-_]'
+match 1 1 1 1 ']' '[\]]'
+match 0 0 0 0 '\]' '[\]]'
+match 0 0 0 0 '\' '[\]]'
+match 0 0 0 0 'ab' 'a[]b'
+match 0 0 0 0 \
+ 1 1 1 1 'a[]b' 'a[]b'
+match 0 0 0 0 \
+ 1 1 1 1 'ab[' 'ab['
+match 0 0 0 0 'ab' '[!'
+match 0 0 0 0 'ab' '[-'
+match 1 1 1 1 '-' '[-]'
+match 0 0 0 0 '-' '[a-'
+match 0 0 0 0 '-' '[!a-'
+match 1 1 1 1 '-' '[--A]'
+match 1 1 1 1 '5' '[--A]'
+match 1 1 1 1 ' ' '[ --]'
+match 1 1 1 1 '$' '[ --]'
+match 1 1 1 1 '-' '[ --]'
+match 0 0 0 0 '0' '[ --]'
+match 1 1 1 1 '-' '[---]'
+match 1 1 1 1 '-' '[------]'
+match 0 0 0 0 'j' '[a-e-n]'
+match 1 1 1 1 '-' '[a-e-n]'
+match 1 1 1 1 'a' '[!------]'
+match 0 0 0 0 '[' '[]-a]'
+match 1 1 1 1 '^' '[]-a]'
+match 0 0 0 0 '^' '[!]-a]'
+match 1 1 1 1 '[' '[!]-a]'
+match 1 1 1 1 '^' '[a^bc]'
+match 1 1 1 1 '-b]' '[a-]b]'
+match 0 0 0 0 '\' '[\]'
+match 1 1 1 1 '\' '[\\]'
+match 0 0 0 0 '\' '[!\\]'
+match 1 1 1 1 'G' '[A-\\]'
+match 0 0 0 0 'aaabbb' 'b*a'
+match 0 0 0 0 'aabcaa' '*ba*'
+match 1 1 1 1 ',' '[,]'
+match 1 1 1 1 ',' '[\\,]'
+match 1 1 1 1 '\' '[\\,]'
+match 1 1 1 1 '-' '[,-.]'
+match 0 0 0 0 '+' '[,-.]'
+match 0 0 0 0 '-.]' '[,-.]'
+match 1 1 1 1 '2' '[\1-\3]'
+match 1 1 1 1 '3' '[\1-\3]'
+match 0 0 0 0 '4' '[\1-\3]'
+match 1 1 1 1 '\' '[[-\]]'
+match 1 1 1 1 '[' '[[-\]]'
+match 1 1 1 1 ']' '[[-\]]'
+match 0 0 0 0 '-' '[[-\]]'
-pathmatch 1 foo foo
-pathmatch 0 foo fo
-pathmatch 1 foo/bar foo/bar
-pathmatch 1 foo/bar 'foo/*'
-pathmatch 1 foo/bba/arr 'foo/*'
-pathmatch 1 foo/bba/arr 'foo/**'
-pathmatch 1 foo/bba/arr 'foo*'
-pathmatch 1 foo/bba/arr 'foo**'
-pathmatch 1 foo/bba/arr 'foo/*arr'
-pathmatch 1 foo/bba/arr 'foo/**arr'
-pathmatch 0 foo/bba/arr 'foo/*z'
-pathmatch 0 foo/bba/arr 'foo/**z'
-pathmatch 1 foo/bar 'foo?bar'
-pathmatch 1 foo/bar 'foo[/]bar'
-pathmatch 1 foo/bar 'foo[^a-z]bar'
-pathmatch 0 foo '*/*/*'
-pathmatch 0 foo/bar '*/*/*'
-pathmatch 1 foo/bba/arr '*/*/*'
-pathmatch 1 foo/bb/aa/rr '*/*/*'
-pathmatch 1 abcXdefXghi '*X*i'
-pathmatch 1 ab/cXd/efXg/hi '*/*X*/*/*i'
-pathmatch 1 ab/cXd/efXg/hi '*Xg*i'
+# Test recursion
+match 1 1 1 1 '-adobe-courier-bold-o-normal--12-120-75-75-m-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
+match 0 0 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-X-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
+match 0 0 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-/-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
+match 1 1 1 1 'XXX/adobe/courier/bold/o/normal//12/120/75/75/m/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
+match 0 0 0 0 'XXX/adobe/courier/bold/o/normal//12/120/75/75/X/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
+match 1 1 1 1 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txt' '**/*a*b*g*n*t'
+match 0 0 0 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txtz' '**/*a*b*g*n*t'
+match 0 0 0 0 foo '*/*/*'
+match 0 0 0 0 foo/bar '*/*/*'
+match 1 1 1 1 foo/bba/arr '*/*/*'
+match 0 0 1 1 foo/bb/aa/rr '*/*/*'
+match 1 1 1 1 foo/bb/aa/rr '**/**/**'
+match 1 1 1 1 abcXdefXghi '*X*i'
+match 0 0 1 1 ab/cXd/efXg/hi '*X*i'
+match 1 1 1 1 ab/cXd/efXg/hi '*/*X*/*/*i'
+match 1 1 1 1 ab/cXd/efXg/hi '**/*X*/**/*i'
-# Case-sensitivity features
-match 0 x 'a' '[A-Z]'
-match 1 x 'A' '[A-Z]'
-match 0 x 'A' '[a-z]'
-match 1 x 'a' '[a-z]'
-match 0 x 'a' '[[:upper:]]'
-match 1 x 'A' '[[:upper:]]'
-match 0 x 'A' '[[:lower:]]'
-match 1 x 'a' '[[:lower:]]'
-match 0 x 'A' '[B-Za]'
-match 1 x 'a' '[B-Za]'
-match 0 x 'A' '[B-a]'
-match 1 x 'a' '[B-a]'
-match 0 x 'z' '[Z-y]'
-match 1 x 'Z' '[Z-y]'
+# Extra pathmatch tests
+match 0 0 0 0 foo fo
+match 1 1 1 1 foo/bar foo/bar
+match 1 1 1 1 foo/bar 'foo/*'
+match 0 0 1 1 foo/bba/arr 'foo/*'
+match 1 1 1 1 foo/bba/arr 'foo/**'
+match 0 0 1 1 foo/bba/arr 'foo*'
+match 0 0 1 1 \
+ 1 1 1 1 foo/bba/arr 'foo**'
+match 0 0 1 1 foo/bba/arr 'foo/*arr'
+match 0 0 1 1 foo/bba/arr 'foo/**arr'
+match 0 0 0 0 foo/bba/arr 'foo/*z'
+match 0 0 0 0 foo/bba/arr 'foo/**z'
+match 0 0 1 1 foo/bar 'foo?bar'
+match 0 0 1 1 foo/bar 'foo[/]bar'
+match 0 0 1 1 foo/bar 'foo[^a-z]bar'
+match 0 0 1 1 ab/cXd/efXg/hi '*Xg*i'
-imatch 1 'a' '[A-Z]'
-imatch 1 'A' '[A-Z]'
-imatch 1 'A' '[a-z]'
-imatch 1 'a' '[a-z]'
-imatch 1 'a' '[[:upper:]]'
-imatch 1 'A' '[[:upper:]]'
-imatch 1 'A' '[[:lower:]]'
-imatch 1 'a' '[[:lower:]]'
-imatch 1 'A' '[B-Za]'
-imatch 1 'a' '[B-Za]'
-imatch 1 'A' '[B-a]'
-imatch 1 'a' '[B-a]'
-imatch 1 'z' '[Z-y]'
-imatch 1 'Z' '[Z-y]'
+# Extra case-sensitivity tests
+match 0 1 0 1 'a' '[A-Z]'
+match 1 1 1 1 'A' '[A-Z]'
+match 0 1 0 1 'A' '[a-z]'
+match 1 1 1 1 'a' '[a-z]'
+match 0 1 0 1 'a' '[[:upper:]]'
+match 1 1 1 1 'A' '[[:upper:]]'
+match 0 1 0 1 'A' '[[:lower:]]'
+match 1 1 1 1 'a' '[[:lower:]]'
+match 0 1 0 1 'A' '[B-Za]'
+match 1 1 1 1 'a' '[B-Za]'
+match 0 1 0 1 'A' '[B-a]'
+match 1 1 1 1 'a' '[B-a]'
+match 0 1 0 1 'z' '[Z-y]'
+match 1 1 1 1 'Z' '[Z-y]'
test_done
diff --git a/t/t3404-rebase-interactive.sh b/t/t3404-rebase-interactive.sh
index 481a350..ef2887b 100755
--- a/t/t3404-rebase-interactive.sh
+++ b/t/t3404-rebase-interactive.sh
@@ -453,6 +453,10 @@ test_expect_success C_LOCALE_OUTPUT 'squash and fixup generate correct log messa
git rebase -i $base &&
git cat-file commit HEAD | sed -e 1,/^\$/d > actual-squash-fixup &&
test_cmp expect-squash-fixup actual-squash-fixup &&
+ git cat-file commit HEAD@{2} |
+ grep "^# This is a combination of 3 commits\." &&
+ git cat-file commit HEAD@{3} |
+ grep "^# This is a combination of 2 commits\." &&
git checkout to-be-rebased &&
git branch -D squash-fixup
'
@@ -1336,6 +1340,16 @@ test_expect_success 'editor saves as CR/LF' '
SQ="'"
test_expect_success 'rebase -i --gpg-sign=<key-id>' '
+ test_when_finished "test_might_fail git rebase --abort" &&
+ set_fake_editor &&
+ FAKE_LINES="edit 1" git rebase -i --gpg-sign="\"S I Gner\"" HEAD^ \
+ >out 2>err &&
+ test_i18ngrep "$SQ-S\"S I Gner\"$SQ" err
+'
+
+test_expect_success 'rebase -i --gpg-sign=<key-id> overrides commit.gpgSign' '
+ test_when_finished "test_might_fail git rebase --abort" &&
+ test_config commit.gpgsign true &&
set_fake_editor &&
FAKE_LINES="edit 1" git rebase -i --gpg-sign="\"S I Gner\"" HEAD^ \
>out 2>err &&
diff --git a/t/t3501-revert-cherry-pick.sh b/t/t3501-revert-cherry-pick.sh
index 4f2a263..783bdbf 100755
--- a/t/t3501-revert-cherry-pick.sh
+++ b/t/t3501-revert-cherry-pick.sh
@@ -141,7 +141,7 @@ test_expect_success 'cherry-pick "-" works with arguments' '
test_cmp expect actual
'
-test_expect_success 'cherry-pick works with dirty renamed file' '
+test_expect_failure 'cherry-pick works with dirty renamed file' '
test_commit to-rename &&
git checkout -b unrelated &&
test_commit unrelated &&
@@ -150,7 +150,10 @@ test_expect_success 'cherry-pick works with dirty renamed file' '
test_tick &&
git commit -m renamed &&
echo modified >renamed &&
- git cherry-pick refs/heads/unrelated
+ test_must_fail git cherry-pick refs/heads/unrelated >out &&
+ test_i18ngrep "Refusing to lose dirty file at renamed" out &&
+ test $(git rev-parse :0:renamed) = $(git rev-parse HEAD^:to-rename.t) &&
+ grep -q "^modified$" renamed
'
test_done
diff --git a/t/t3512-cherry-pick-submodule.sh b/t/t3512-cherry-pick-submodule.sh
index ce48c4f..bd78287 100755
--- a/t/t3512-cherry-pick-submodule.sh
+++ b/t/t3512-cherry-pick-submodule.sh
@@ -5,7 +5,6 @@ test_description='cherry-pick can handle submodules'
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-submodule-update.sh
-KNOWN_FAILURE_CHERRY_PICK_SEES_EMPTY_COMMIT=1
KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1
KNOWN_FAILURE_NOFF_MERGE_ATTEMPTS_TO_MERGE_REMOVED_SUBMODULE_FILES=1
test_submodule_switch "git cherry-pick"
diff --git a/t/t3513-revert-submodule.sh b/t/t3513-revert-submodule.sh
index db93781..5e39fcd 100755
--- a/t/t3513-revert-submodule.sh
+++ b/t/t3513-revert-submodule.sh
@@ -25,7 +25,6 @@ git_revert () {
git revert HEAD
}
-KNOWN_FAILURE_CHERRY_PICK_SEES_EMPTY_COMMIT=1
KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1
test_submodule_switch "git_revert"
diff --git a/t/t5318-commit-graph.sh b/t/t5318-commit-graph.sh
new file mode 100755
index 0000000..a380419
--- /dev/null
+++ b/t/t5318-commit-graph.sh
@@ -0,0 +1,224 @@
+#!/bin/sh
+
+test_description='commit graph'
+. ./test-lib.sh
+
+test_expect_success 'setup full repo' '
+ mkdir full &&
+ cd "$TRASH_DIRECTORY/full" &&
+ git init &&
+ git config core.commitGraph true &&
+ objdir=".git/objects"
+'
+
+test_expect_success 'write graph with no packs' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git commit-graph write --object-dir . &&
+ test_path_is_file info/commit-graph
+'
+
+test_expect_success 'create commits and repack' '
+ cd "$TRASH_DIRECTORY/full" &&
+ for i in $(test_seq 3)
+ do
+ test_commit $i &&
+ git branch commits/$i
+ done &&
+ git repack
+'
+
+graph_git_two_modes() {
+ git -c core.graph=true $1 >output
+ git -c core.graph=false $1 >expect
+ test_cmp output expect
+}
+
+graph_git_behavior() {
+ MSG=$1
+ DIR=$2
+ BRANCH=$3
+ COMPARE=$4
+ test_expect_success "check normal git operations: $MSG" '
+ cd "$TRASH_DIRECTORY/$DIR" &&
+ graph_git_two_modes "log --oneline $BRANCH" &&
+ graph_git_two_modes "log --topo-order $BRANCH" &&
+ graph_git_two_modes "log --graph $COMPARE..$BRANCH" &&
+ graph_git_two_modes "branch -vv" &&
+ graph_git_two_modes "merge-base -a $BRANCH $COMPARE"
+ '
+}
+
+graph_git_behavior 'no graph' full commits/3 commits/1
+
+graph_read_expect() {
+ OPTIONAL=""
+ NUM_CHUNKS=3
+ if test ! -z $2
+ then
+ OPTIONAL=" $2"
+ NUM_CHUNKS=$((3 + $(echo "$2" | wc -w)))
+ fi
+ cat >expect <<- EOF
+ header: 43475048 1 1 $NUM_CHUNKS 0
+ num_commits: $1
+ chunks: oid_fanout oid_lookup commit_metadata$OPTIONAL
+ EOF
+ git commit-graph read >output &&
+ test_cmp expect output
+}
+
+test_expect_success 'write graph' '
+ cd "$TRASH_DIRECTORY/full" &&
+ graph1=$(git commit-graph write) &&
+ test_path_is_file $objdir/info/commit-graph &&
+ graph_read_expect "3"
+'
+
+graph_git_behavior 'graph exists' full commits/3 commits/1
+
+test_expect_success 'Add more commits' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git reset --hard commits/1 &&
+ for i in $(test_seq 4 5)
+ do
+ test_commit $i &&
+ git branch commits/$i
+ done &&
+ git reset --hard commits/2 &&
+ for i in $(test_seq 6 7)
+ do
+ test_commit $i &&
+ git branch commits/$i
+ done &&
+ git reset --hard commits/2 &&
+ git merge commits/4 &&
+ git branch merge/1 &&
+ git reset --hard commits/4 &&
+ git merge commits/6 &&
+ git branch merge/2 &&
+ git reset --hard commits/3 &&
+ git merge commits/5 commits/7 &&
+ git branch merge/3 &&
+ git repack
+'
+
+# Current graph structure:
+#
+# __M3___
+# / | \
+# 3 M1 5 M2 7
+# |/ \|/ \|
+# 2 4 6
+# |___/____/
+# 1
+
+test_expect_success 'write graph with merges' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git commit-graph write &&
+ test_path_is_file $objdir/info/commit-graph &&
+ graph_read_expect "10" "large_edges"
+'
+
+graph_git_behavior 'merge 1 vs 2' full merge/1 merge/2
+graph_git_behavior 'merge 1 vs 3' full merge/1 merge/3
+graph_git_behavior 'merge 2 vs 3' full merge/2 merge/3
+
+test_expect_success 'Add one more commit' '
+ cd "$TRASH_DIRECTORY/full" &&
+ test_commit 8 &&
+ git branch commits/8 &&
+ ls $objdir/pack | grep idx >existing-idx &&
+ git repack &&
+ ls $objdir/pack| grep idx | grep -v --file=existing-idx >new-idx
+'
+
+# Current graph structure:
+#
+# 8
+# |
+# __M3___
+# / | \
+# 3 M1 5 M2 7
+# |/ \|/ \|
+# 2 4 6
+# |___/____/
+# 1
+
+graph_git_behavior 'mixed mode, commit 8 vs merge 1' full commits/8 merge/1
+graph_git_behavior 'mixed mode, commit 8 vs merge 2' full commits/8 merge/2
+
+test_expect_success 'write graph with new commit' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git commit-graph write &&
+ test_path_is_file $objdir/info/commit-graph &&
+ graph_read_expect "11" "large_edges"
+'
+
+graph_git_behavior 'full graph, commit 8 vs merge 1' full commits/8 merge/1
+graph_git_behavior 'full graph, commit 8 vs merge 2' full commits/8 merge/2
+
+test_expect_success 'write graph with nothing new' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git commit-graph write &&
+ test_path_is_file $objdir/info/commit-graph &&
+ graph_read_expect "11" "large_edges"
+'
+
+graph_git_behavior 'cleared graph, commit 8 vs merge 1' full commits/8 merge/1
+graph_git_behavior 'cleared graph, commit 8 vs merge 2' full commits/8 merge/2
+
+test_expect_success 'build graph from latest pack with closure' '
+ cd "$TRASH_DIRECTORY/full" &&
+ cat new-idx | git commit-graph write --stdin-packs &&
+ test_path_is_file $objdir/info/commit-graph &&
+ graph_read_expect "9" "large_edges"
+'
+
+graph_git_behavior 'graph from pack, commit 8 vs merge 1' full commits/8 merge/1
+graph_git_behavior 'graph from pack, commit 8 vs merge 2' full commits/8 merge/2
+
+test_expect_success 'build graph from commits with closure' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git tag -a -m "merge" tag/merge merge/2 &&
+ git rev-parse tag/merge >commits-in &&
+ git rev-parse merge/1 >>commits-in &&
+ cat commits-in | git commit-graph write --stdin-commits &&
+ test_path_is_file $objdir/info/commit-graph &&
+ graph_read_expect "6"
+'
+
+graph_git_behavior 'graph from commits, commit 8 vs merge 1' full commits/8 merge/1
+graph_git_behavior 'graph from commits, commit 8 vs merge 2' full commits/8 merge/2
+
+test_expect_success 'build graph from commits with append' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git rev-parse merge/3 | git commit-graph write --stdin-commits --append &&
+ test_path_is_file $objdir/info/commit-graph &&
+ graph_read_expect "10" "large_edges"
+'
+
+graph_git_behavior 'append graph, commit 8 vs merge 1' full commits/8 merge/1
+graph_git_behavior 'append graph, commit 8 vs merge 2' full commits/8 merge/2
+
+test_expect_success 'setup bare repo' '
+ cd "$TRASH_DIRECTORY" &&
+ git clone --bare --no-local full bare &&
+ cd bare &&
+ git config core.commitGraph true &&
+ baredir="./objects"
+'
+
+graph_git_behavior 'bare repo, commit 8 vs merge 1' bare commits/8 merge/1
+graph_git_behavior 'bare repo, commit 8 vs merge 2' bare commits/8 merge/2
+
+test_expect_success 'write graph in bare repo' '
+ cd "$TRASH_DIRECTORY/bare" &&
+ git commit-graph write &&
+ test_path_is_file $baredir/info/commit-graph &&
+ graph_read_expect "11" "large_edges"
+'
+
+graph_git_behavior 'bare repo with graph, commit 8 vs merge 1' bare commits/8 merge/1
+graph_git_behavior 'bare repo with graph, commit 8 vs merge 2' bare commits/8 merge/2
+
+test_done
diff --git a/t/t5500-fetch-pack.sh b/t/t5500-fetch-pack.sh
index 80a1a32..ec9ba9b 100755
--- a/t/t5500-fetch-pack.sh
+++ b/t/t5500-fetch-pack.sh
@@ -755,4 +755,67 @@ test_expect_success 'fetching deepen' '
)
'
+test_expect_success 'filtering by size' '
+ rm -rf server client &&
+ test_create_repo server &&
+ test_commit -C server one &&
+ test_config -C server uploadpack.allowfilter 1 &&
+
+ test_create_repo client &&
+ git -C client fetch-pack --filter=blob:limit=0 ../server HEAD &&
+
+ # Ensure that object is not inadvertently fetched
+ test_must_fail git -C client cat-file -e $(git hash-object server/one.t)
+'
+
+test_expect_success 'filtering by size has no effect if support for it is not advertised' '
+ rm -rf server client &&
+ test_create_repo server &&
+ test_commit -C server one &&
+
+ test_create_repo client &&
+ git -C client fetch-pack --filter=blob:limit=0 ../server HEAD 2> err &&
+
+ # Ensure that object is fetched
+ git -C client cat-file -e $(git hash-object server/one.t) &&
+
+ test_i18ngrep "filtering not recognized by server" err
+'
+
+fetch_filter_blob_limit_zero () {
+ SERVER="$1"
+ URL="$2"
+
+ rm -rf "$SERVER" client &&
+ test_create_repo "$SERVER" &&
+ test_commit -C "$SERVER" one &&
+ test_config -C "$SERVER" uploadpack.allowfilter 1 &&
+
+ git clone "$URL" client &&
+ test_config -C client extensions.partialclone origin &&
+
+ test_commit -C "$SERVER" two &&
+
+ git -C client fetch --filter=blob:limit=0 origin HEAD:somewhere &&
+
+ # Ensure that commit is fetched, but blob is not
+ test_config -C client extensions.partialclone "arbitrary string" &&
+ git -C client cat-file -e $(git -C "$SERVER" rev-parse two) &&
+ test_must_fail git -C client cat-file -e $(git hash-object "$SERVER/two.t")
+}
+
+test_expect_success 'fetch with --filter=blob:limit=0' '
+ fetch_filter_blob_limit_zero server server
+'
+
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success 'fetch with --filter=blob:limit=0 and HTTP' '
+ fetch_filter_blob_limit_zero "$HTTPD_DOCUMENT_ROOT_PATH/server" "$HTTPD_URL/smart/server"
+'
+
+stop_httpd
+
+
test_done
diff --git a/t/t5551-http-fetch-smart.sh b/t/t5551-http-fetch-smart.sh
index a51b7e2..f5721b4 100755
--- a/t/t5551-http-fetch-smart.sh
+++ b/t/t5551-http-fetch-smart.sh
@@ -364,5 +364,38 @@ test_expect_success 'custom http headers' '
submodule update sub
'
+test_expect_success 'GIT_REDACT_COOKIES redacts cookies' '
+ rm -rf clone &&
+ echo "Set-Cookie: Foo=1" >cookies &&
+ echo "Set-Cookie: Bar=2" >>cookies &&
+ GIT_TRACE_CURL=true GIT_REDACT_COOKIES=Bar,Baz \
+ git -c "http.cookieFile=$(pwd)/cookies" clone \
+ $HTTPD_URL/smart/repo.git clone 2>err &&
+ grep "Cookie:.*Foo=1" err &&
+ grep "Cookie:.*Bar=<redacted>" err &&
+ ! grep "Cookie:.*Bar=2" err
+'
+
+test_expect_success 'GIT_REDACT_COOKIES handles empty values' '
+ rm -rf clone &&
+ echo "Set-Cookie: Foo=" >cookies &&
+ GIT_TRACE_CURL=true GIT_REDACT_COOKIES=Foo \
+ git -c "http.cookieFile=$(pwd)/cookies" clone \
+ $HTTPD_URL/smart/repo.git clone 2>err &&
+ grep "Cookie:.*Foo=<redacted>" err
+'
+
+test_expect_success 'GIT_TRACE_CURL_NO_DATA prevents data from being traced' '
+ rm -rf clone &&
+ GIT_TRACE_CURL=true \
+ git clone $HTTPD_URL/smart/repo.git clone 2>err &&
+ grep "=> Send data" err &&
+
+ rm -rf clone &&
+ GIT_TRACE_CURL=true GIT_TRACE_CURL_NO_DATA=1 \
+ git clone $HTTPD_URL/smart/repo.git clone 2>err &&
+ ! grep "=> Send data" err
+'
+
stop_httpd
test_done
diff --git a/t/t5570-git-daemon.sh b/t/t5570-git-daemon.sh
index 225a022..755b05a 100755
--- a/t/t5570-git-daemon.sh
+++ b/t/t5570-git-daemon.sh
@@ -167,23 +167,48 @@ test_expect_success 'access repo via interpolated hostname' '
git init --bare "$repo" &&
git push "$repo" HEAD &&
>"$repo"/git-daemon-export-ok &&
- rm -rf tmp.git &&
GIT_OVERRIDE_VIRTUAL_HOST=localhost \
- git clone --bare "$GIT_DAEMON_URL/interp.git" tmp.git &&
- rm -rf tmp.git &&
+ git ls-remote "$GIT_DAEMON_URL/interp.git" &&
GIT_OVERRIDE_VIRTUAL_HOST=LOCALHOST \
- git clone --bare "$GIT_DAEMON_URL/interp.git" tmp.git
+ git ls-remote "$GIT_DAEMON_URL/interp.git"
'
test_expect_success 'hostname cannot break out of directory' '
- rm -rf tmp.git &&
repo="$GIT_DAEMON_DOCUMENT_ROOT_PATH/../escape.git" &&
git init --bare "$repo" &&
git push "$repo" HEAD &&
>"$repo"/git-daemon-export-ok &&
test_must_fail \
env GIT_OVERRIDE_VIRTUAL_HOST=.. \
- git clone --bare "$GIT_DAEMON_URL/escape.git" tmp.git
+ git ls-remote "$GIT_DAEMON_URL/escape.git"
+'
+
+test_expect_success 'daemon log records all attributes' '
+ cat >expect <<-\EOF &&
+ Extended attribute "host": localhost
+ Extended attribute "protocol": version=1
+ EOF
+ >daemon.log &&
+ GIT_OVERRIDE_VIRTUAL_HOST=localhost \
+ git -c protocol.version=1 \
+ ls-remote "$GIT_DAEMON_URL/interp.git" &&
+ grep -i extended.attribute daemon.log | cut -d" " -f2- >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success FAKENC 'hostname interpolation works after LF-stripping' '
+ {
+ printf "git-upload-pack /interp.git\n\0host=localhost" | packetize
+ printf "0000"
+ } >input &&
+ fake_nc "$GIT_DAEMON_HOST_PORT" <input >output &&
+ depacketize <output >output.raw &&
+
+ # just pick out the value of master, which avoids any protocol
+ # particulars
+ perl -lne "print \$1 if m{^(\\S+) refs/heads/master}" <output.raw >actual &&
+ git -C "$repo" rev-parse master >expect &&
+ test_cmp expect actual
'
stop_git_daemon
diff --git a/t/t5601-clone.sh b/t/t5601-clone.sh
index 8c437bf..0b62037 100755
--- a/t/t5601-clone.sh
+++ b/t/t5601-clone.sh
@@ -628,4 +628,105 @@ test_expect_success 'clone on case-insensitive fs' '
)
'
+partial_clone () {
+ SERVER="$1" &&
+ URL="$2" &&
+
+ rm -rf "$SERVER" client &&
+ test_create_repo "$SERVER" &&
+ test_commit -C "$SERVER" one &&
+ HASH1=$(git hash-object "$SERVER/one.t") &&
+ git -C "$SERVER" revert HEAD &&
+ test_commit -C "$SERVER" two &&
+ HASH2=$(git hash-object "$SERVER/two.t") &&
+ test_config -C "$SERVER" uploadpack.allowfilter 1 &&
+ test_config -C "$SERVER" uploadpack.allowanysha1inwant 1 &&
+
+ git clone --filter=blob:limit=0 "$URL" client &&
+
+ git -C client fsck &&
+
+ # Ensure that unneeded blobs are not inadvertently fetched.
+ test_config -C client extensions.partialclone "not a remote" &&
+ test_must_fail git -C client cat-file -e "$HASH1" &&
+
+ # But this blob was fetched, because clone performs an initial checkout
+ git -C client cat-file -e "$HASH2"
+}
+
+test_expect_success 'partial clone' '
+ partial_clone server "file://$(pwd)/server"
+'
+
+test_expect_success 'partial clone: warn if server does not support object filtering' '
+ rm -rf server client &&
+ test_create_repo server &&
+ test_commit -C server one &&
+
+ git clone --filter=blob:limit=0 "file://$(pwd)/server" client 2> err &&
+
+ test_i18ngrep "filtering not recognized by server" err
+'
+
+test_expect_success 'batch missing blob request during checkout' '
+ rm -rf server client &&
+
+ test_create_repo server &&
+ echo a >server/a &&
+ echo b >server/b &&
+ git -C server add a b &&
+
+ git -C server commit -m x &&
+ echo aa >server/a &&
+ echo bb >server/b &&
+ git -C server add a b &&
+ git -C server commit -m x &&
+
+ test_config -C server uploadpack.allowfilter 1 &&
+ test_config -C server uploadpack.allowanysha1inwant 1 &&
+
+ git clone --filter=blob:limit=0 "file://$(pwd)/server" client &&
+
+ # Ensure that there is only one negotiation by checking that there is
+ # only "done" line sent. ("done" marks the end of negotiation.)
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client checkout HEAD^ &&
+ grep "git> done" trace >done_lines &&
+ test_line_count = 1 done_lines
+'
+
+test_expect_success 'batch missing blob request does not inadvertently try to fetch gitlinks' '
+ rm -rf server client &&
+
+ test_create_repo repo_for_submodule &&
+ test_commit -C repo_for_submodule x &&
+
+ test_create_repo server &&
+ echo a >server/a &&
+ echo b >server/b &&
+ git -C server add a b &&
+ git -C server commit -m x &&
+
+ echo aa >server/a &&
+ echo bb >server/b &&
+ # Also add a gitlink pointing to an arbitrary repository
+ git -C server submodule add "$(pwd)/repo_for_submodule" c &&
+ git -C server add a b c &&
+ git -C server commit -m x &&
+
+ test_config -C server uploadpack.allowfilter 1 &&
+ test_config -C server uploadpack.allowanysha1inwant 1 &&
+
+ # Make sure that it succeeds
+ git clone --filter=blob:limit=0 "file://$(pwd)/server" client
+'
+
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success 'partial clone using HTTP' '
+ partial_clone "$HTTPD_DOCUMENT_ROOT_PATH/server" "$HTTPD_URL/smart/server"
+'
+
+stop_httpd
+
test_done
diff --git a/t/t5616-partial-clone.sh b/t/t5616-partial-clone.sh
new file mode 100755
index 0000000..29d8631
--- /dev/null
+++ b/t/t5616-partial-clone.sh
@@ -0,0 +1,146 @@
+#!/bin/sh
+
+test_description='git partial clone'
+
+. ./test-lib.sh
+
+# create a normal "src" repo where we can later create new commits.
+# expect_1.oids will contain a list of the OIDs of all blobs.
+test_expect_success 'setup normal src repo' '
+ echo "{print \$1}" >print_1.awk &&
+ echo "{print \$2}" >print_2.awk &&
+
+ git init src &&
+ for n in 1 2 3 4
+ do
+ echo "This is file: $n" > src/file.$n.txt
+ git -C src add file.$n.txt
+ git -C src commit -m "file $n"
+ git -C src ls-files -s file.$n.txt >>temp
+ done &&
+ awk -f print_2.awk <temp | sort >expect_1.oids &&
+ test_line_count = 4 expect_1.oids
+'
+
+# bare clone "src" giving "srv.bare" for use as our server.
+test_expect_success 'setup bare clone for server' '
+ git clone --bare "file://$(pwd)/src" srv.bare &&
+ git -C srv.bare config --local uploadpack.allowfilter 1 &&
+ git -C srv.bare config --local uploadpack.allowanysha1inwant 1
+'
+
+# do basic partial clone from "srv.bare"
+# confirm we are missing all of the known blobs.
+# confirm partial clone was registered in the local config.
+test_expect_success 'do partial clone 1' '
+ git clone --no-checkout --filter=blob:none "file://$(pwd)/srv.bare" pc1 &&
+ git -C pc1 rev-list HEAD --quiet --objects --missing=print \
+ | awk -f print_1.awk \
+ | sed "s/?//" \
+ | sort >observed.oids &&
+ test_cmp expect_1.oids observed.oids &&
+ test "$(git -C pc1 config --local core.repositoryformatversion)" = "1" &&
+ test "$(git -C pc1 config --local extensions.partialclone)" = "origin" &&
+ test "$(git -C pc1 config --local core.partialclonefilter)" = "blob:none"
+'
+
+# checkout master to force dynamic object fetch of blobs at HEAD.
+test_expect_success 'verify checkout with dynamic object fetch' '
+ git -C pc1 rev-list HEAD --quiet --objects --missing=print >observed &&
+ test_line_count = 4 observed &&
+ git -C pc1 checkout master &&
+ git -C pc1 rev-list HEAD --quiet --objects --missing=print >observed &&
+ test_line_count = 0 observed
+'
+
+# create new commits in "src" repo to establish a blame history on file.1.txt
+# and push to "srv.bare".
+test_expect_success 'push new commits to server' '
+ git -C src remote add srv "file://$(pwd)/srv.bare" &&
+ for x in a b c d e
+ do
+ echo "Mod file.1.txt $x" >>src/file.1.txt
+ git -C src add file.1.txt
+ git -C src commit -m "mod $x"
+ done &&
+ git -C src blame master -- file.1.txt >expect.blame &&
+ git -C src push -u srv master
+'
+
+# (partial) fetch in the partial clone repo from the promisor remote.
+# verify that fetch inherited the filter-spec from the config and DOES NOT
+# have the new blobs.
+test_expect_success 'partial fetch inherits filter settings' '
+ git -C pc1 fetch origin &&
+ git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+ test_line_count = 5 observed
+'
+
+# force dynamic object fetch using diff.
+# we should only get 1 new blob (for the file in origin/master).
+test_expect_success 'verify diff causes dynamic object fetch' '
+ git -C pc1 diff master..origin/master -- file.1.txt &&
+ git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+ test_line_count = 4 observed
+'
+
+# force full dynamic object fetch of the file's history using blame.
+# we should get the intermediate blobs for the file.
+test_expect_success 'verify blame causes dynamic object fetch' '
+ git -C pc1 blame origin/master -- file.1.txt >observed.blame &&
+ test_cmp expect.blame observed.blame &&
+ git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+ test_line_count = 0 observed
+'
+
+# create new commits in "src" repo to establish a history on file.2.txt
+# and push to "srv.bare".
+test_expect_success 'push new commits to server for file.2.txt' '
+ for x in a b c d e f
+ do
+ echo "Mod file.2.txt $x" >>src/file.2.txt
+ git -C src add file.2.txt
+ git -C src commit -m "mod $x"
+ done &&
+ git -C src push -u srv master
+'
+
+# Do FULL fetch by disabling inherited filter-spec using --no-filter.
+# Verify we have all the new blobs.
+test_expect_success 'override inherited filter-spec using --no-filter' '
+ git -C pc1 fetch --no-filter origin &&
+ git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+ test_line_count = 0 observed
+'
+
+# create new commits in "src" repo to establish a history on file.3.txt
+# and push to "srv.bare".
+test_expect_success 'push new commits to server for file.3.txt' '
+ for x in a b c d e f
+ do
+ echo "Mod file.3.txt $x" >>src/file.3.txt
+ git -C src add file.3.txt
+ git -C src commit -m "mod $x"
+ done &&
+ git -C src push -u srv master
+'
+
+# Do a partial fetch and then try to manually fetch the missing objects.
+# This can be used as the basis of a pre-command hook to bulk fetch objects
+# perhaps combined with a command in dry-run mode.
+test_expect_success 'manual prefetch of missing objects' '
+ git -C pc1 fetch --filter=blob:none origin &&
+ git -C pc1 rev-list master..origin/master --quiet --objects --missing=print \
+ | awk -f print_1.awk \
+ | sed "s/?//" \
+ | sort >observed.oids &&
+ test_line_count = 6 observed.oids &&
+ git -C pc1 fetch-pack --stdin "file://$(pwd)/srv.bare" <observed.oids &&
+ git -C pc1 rev-list master..origin/master --quiet --objects --missing=print \
+ | awk -f print_1.awk \
+ | sed "s/?//" \
+ | sort >observed.oids &&
+ test_line_count = 0 observed.oids
+'
+
+test_done
diff --git a/t/t6015-rev-list-show-all-parents.sh b/t/t6015-rev-list-show-all-parents.sh
deleted file mode 100755
index 3c73c93..0000000
--- a/t/t6015-rev-list-show-all-parents.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/sh
-
-test_description='--show-all --parents does not rewrite TREESAME commits'
-
-. ./test-lib.sh
-
-test_expect_success 'set up --show-all --parents test' '
- test_commit one foo.txt &&
- commit1=$(git rev-list -1 HEAD) &&
- test_commit two bar.txt &&
- commit2=$(git rev-list -1 HEAD) &&
- test_commit three foo.txt &&
- commit3=$(git rev-list -1 HEAD)
- '
-
-test_expect_success '--parents rewrites TREESAME parents correctly' '
- echo $commit3 $commit1 > expected &&
- echo $commit1 >> expected &&
- git rev-list --parents HEAD -- foo.txt > actual &&
- test_cmp expected actual
- '
-
-test_expect_success '--parents --show-all does not rewrites TREESAME parents' '
- echo $commit3 $commit2 > expected &&
- echo $commit2 $commit1 >> expected &&
- echo $commit1 >> expected &&
- git rev-list --parents --show-all HEAD -- foo.txt > actual &&
- test_cmp expected actual
- '
-
-test_done
diff --git a/t/t7505-prepare-commit-msg-hook.sh b/t/t7505-prepare-commit-msg-hook.sh
index b13f729..1f43b3c 100755
--- a/t/t7505-prepare-commit-msg-hook.sh
+++ b/t/t7505-prepare-commit-msg-hook.sh
@@ -4,6 +4,38 @@ test_description='prepare-commit-msg hook'
. ./test-lib.sh
+test_expect_success 'set up commits for rebasing' '
+ test_commit root &&
+ test_commit a a a &&
+ test_commit b b b &&
+ git checkout -b rebase-me root &&
+ test_commit rebase-a a aa &&
+ test_commit rebase-b b bb &&
+ for i in $(test_seq 1 13)
+ do
+ test_commit rebase-$i c $i
+ done &&
+ git checkout master &&
+
+ cat >rebase-todo <<-EOF
+ pick $(git rev-parse rebase-a)
+ pick $(git rev-parse rebase-b)
+ fixup $(git rev-parse rebase-1)
+ fixup $(git rev-parse rebase-2)
+ pick $(git rev-parse rebase-3)
+ fixup $(git rev-parse rebase-4)
+ squash $(git rev-parse rebase-5)
+ reword $(git rev-parse rebase-6)
+ squash $(git rev-parse rebase-7)
+ fixup $(git rev-parse rebase-8)
+ fixup $(git rev-parse rebase-9)
+ edit $(git rev-parse rebase-10)
+ squash $(git rev-parse rebase-11)
+ squash $(git rev-parse rebase-12)
+ edit $(git rev-parse rebase-13)
+ EOF
+'
+
test_expect_success 'with no hook' '
echo "foo" > file &&
@@ -31,17 +63,41 @@ mkdir -p "$HOOKDIR"
echo "#!$SHELL_PATH" > "$HOOK"
cat >> "$HOOK" <<'EOF'
-if test "$2" = commit; then
- source=$(git rev-parse "$3")
+GIT_DIR=$(git rev-parse --git-dir)
+if test -d "$GIT_DIR/rebase-merge"
+then
+ rebasing=1
else
- source=${2-default}
+ rebasing=0
fi
-if test "$GIT_EDITOR" = :; then
- sed -e "1s/.*/$source (no editor)/" "$1" > msg.tmp
+
+get_last_cmd () {
+ tail -n1 "$GIT_DIR/rebase-merge/done" | {
+ read cmd id _
+ git log --pretty="[$cmd %s]" -n1 $id
+ }
+}
+
+if test "$2" = commit
+then
+ if test $rebasing = 1
+ then
+ source="$3"
+ else
+ source=$(git rev-parse "$3")
+ fi
else
- sed -e "1s/.*/$source/" "$1" > msg.tmp
+ source=${2-default}
+fi
+test "$GIT_EDITOR" = : && source="$source (no editor)"
+
+if test $rebasing = 1
+then
+ echo "$source $(get_last_cmd)" >"$1"
+else
+ sed -e "1s/.*/$source/" "$1" >msg.tmp
+ mv msg.tmp "$1"
fi
-mv msg.tmp "$1"
exit 0
EOF
chmod +x "$HOOK"
@@ -156,6 +212,63 @@ test_expect_success 'with hook and editor (merge)' '
test "$(git log -1 --pretty=format:%s)" = "merge"
'
+test_rebase () {
+ expect=$1 &&
+ mode=$2 &&
+ test_expect_$expect C_LOCALE_OUTPUT "with hook (rebase $mode)" '
+ test_when_finished "\
+ git rebase --abort
+ git checkout -f master
+ git branch -D tmp" &&
+ git checkout -b tmp rebase-me &&
+ GIT_SEQUENCE_EDITOR="cp rebase-todo" &&
+ GIT_EDITOR="\"$FAKE_EDITOR\"" &&
+ (
+ export GIT_SEQUENCE_EDITOR GIT_EDITOR &&
+ test_must_fail git rebase $mode b &&
+ echo x >a &&
+ git add a &&
+ test_must_fail git rebase --continue &&
+ echo x >b &&
+ git add b &&
+ git commit &&
+ git rebase --continue &&
+ echo y >a &&
+ git add a &&
+ git commit &&
+ git rebase --continue &&
+ echo y >b &&
+ git add b &&
+ git rebase --continue
+ ) &&
+ if test $mode = -p # reword amended after pick
+ then
+ n=18
+ else
+ n=17
+ fi &&
+ git log --pretty=%s -g -n$n HEAD@{1} >actual &&
+ test_cmp "$TEST_DIRECTORY/t7505/expected-rebase$mode" actual
+ '
+}
+
+test_rebase success -i
+test_rebase success -p
+
+test_expect_success 'with hook (cherry-pick)' '
+ test_when_finished "git checkout -f master" &&
+ git checkout -B other b &&
+ git cherry-pick rebase-1 &&
+ test "$(git log -1 --pretty=format:%s)" = "message (no editor)"
+'
+
+test_expect_success 'with hook and editor (cherry-pick)' '
+ test_when_finished "git checkout -f master" &&
+ git checkout -B other b &&
+ git cherry-pick -e rebase-1 &&
+ test "$(git log -1 --pretty=format:%s)" = merge
+'
+
cat > "$HOOK" <<'EOF'
#!/bin/sh
exit 1
@@ -197,4 +310,11 @@ test_expect_success 'with failing hook (merge)' '
'
+test_expect_success C_LOCALE_OUTPUT 'with failing hook (cherry-pick)' '
+ test_when_finished "git checkout -f master" &&
+ git checkout -B other b &&
+ test_must_fail git cherry-pick rebase-1 2>actual &&
+ test $(grep -c prepare-commit-msg actual) = 1
+'
+
test_done
diff --git a/t/t7505/expected-rebase-i b/t/t7505/expected-rebase-i
new file mode 100644
index 0000000..c514bdb
--- /dev/null
+++ b/t/t7505/expected-rebase-i
@@ -0,0 +1,17 @@
+message [edit rebase-13]
+message (no editor) [edit rebase-13]
+message [squash rebase-12]
+message (no editor) [squash rebase-11]
+default [edit rebase-10]
+message (no editor) [edit rebase-10]
+message [fixup rebase-9]
+message (no editor) [fixup rebase-8]
+message (no editor) [squash rebase-7]
+message [reword rebase-6]
+message [squash rebase-5]
+message (no editor) [fixup rebase-4]
+message (no editor) [pick rebase-3]
+message (no editor) [fixup rebase-2]
+message (no editor) [fixup rebase-1]
+merge [pick rebase-b]
+message [pick rebase-a]
diff --git a/t/t7505/expected-rebase-p b/t/t7505/expected-rebase-p
new file mode 100644
index 0000000..93bada5
--- /dev/null
+++ b/t/t7505/expected-rebase-p
@@ -0,0 +1,18 @@
+message [edit rebase-13]
+message (no editor) [edit rebase-13]
+message [squash rebase-12]
+message (no editor) [squash rebase-11]
+default [edit rebase-10]
+message (no editor) [edit rebase-10]
+message [fixup rebase-9]
+message (no editor) [fixup rebase-8]
+message (no editor) [squash rebase-7]
+HEAD [reword rebase-6]
+message (no editor) [reword rebase-6]
+message [squash rebase-5]
+message (no editor) [fixup rebase-4]
+message (no editor) [pick rebase-3]
+message (no editor) [fixup rebase-2]
+message (no editor) [fixup rebase-1]
+merge [pick rebase-b]
+message [pick rebase-a]
diff --git a/t/t7607-merge-overwrite.sh b/t/t7607-merge-overwrite.sh
index 9444d6a..9c422bc 100755
--- a/t/t7607-merge-overwrite.sh
+++ b/t/t7607-merge-overwrite.sh
@@ -97,7 +97,10 @@ test_expect_failure 'will not overwrite unstaged changes in renamed file' '
git mv c1.c other.c &&
git commit -m rename &&
cp important other.c &&
- git merge c1a &&
+ test_must_fail git merge c1a >out &&
+ test_i18ngrep "Refusing to lose dirty file at other.c" out &&
+ test_path_is_file other.c~HEAD &&
+ test $(git hash-object other.c~HEAD) = $(git rev-parse c1a:c1.c) &&
test_cmp important other.c
'
diff --git a/t/t9001-send-email.sh b/t/t9001-send-email.sh
index a06e5d7..19601fb 100755
--- a/t/t9001-send-email.sh
+++ b/t/t9001-send-email.sh
@@ -6,6 +6,12 @@ test_description='git send-email'
# May be altered later in the test
PREREQ="PERL"
+replace_variable_fields () {
+ sed -e "s/^\(Date:\).*/\1 DATE-STRING/" \
+ -e "s/^\(Message-Id:\).*/\1 MESSAGE-ID-STRING/" \
+ -e "s/^\(X-Mailer:\).*/\1 X-MAILER-STRING/"
+}
+
test_expect_success $PREREQ 'prepare reference tree' '
echo "1A quick brown fox jumps over the" >file &&
echo "lazy dog" >>file &&
@@ -315,10 +321,7 @@ test_expect_success $PREREQ 'Show all headers' '
--bcc=bcc@example.com \
--in-reply-to="<unique-message-id@example.com>" \
--smtp-server relay.example.com \
- $patches |
- sed -e "s/^\(Date:\).*/\1 DATE-STRING/" \
- -e "s/^\(Message-Id:\).*/\1 MESSAGE-ID-STRING/" \
- -e "s/^\(X-Mailer:\).*/\1 X-MAILER-STRING/" \
+ $patches | replace_variable_fields \
>actual-show-all-headers &&
test_cmp expected-show-all-headers actual-show-all-headers
'
@@ -573,12 +576,6 @@ Result: OK
EOF
"
-replace_variable_fields () {
- sed -e "s/^\(Date:\).*/\1 DATE-STRING/" \
- -e "s/^\(Message-Id:\).*/\1 MESSAGE-ID-STRING/" \
- -e "s/^\(X-Mailer:\).*/\1 X-MAILER-STRING/"
-}
-
test_suppression () {
git send-email \
--dry-run \
diff --git a/t/test-lib-functions.sh b/t/test-lib-functions.sh
index 1701fe2..a679b02 100644
--- a/t/test-lib-functions.sh
+++ b/t/test-lib-functions.sh
@@ -1020,3 +1020,37 @@ nongit () {
"$@"
)
}
+
+# convert stdin to pktline representation; note that empty input becomes an
+# empty packet, not a flush packet (for that you can just print 0000 yourself).
+packetize() {
+ cat >packetize.tmp &&
+ len=$(wc -c <packetize.tmp) &&
+ printf '%04x%s' "$(($len + 4))" &&
+ cat packetize.tmp &&
+ rm -f packetize.tmp
+}
+
+# Parse the input as a series of pktlines, writing the result to stdout.
+# Sideband markers are removed automatically, and the output is routed to
+# stderr if appropriate.
+#
+# NUL bytes are converted to "\\0" for ease of parsing with text tools.
+depacketize () {
+ perl -e '
+ while (read(STDIN, $len, 4) == 4) {
+ if ($len eq "0000") {
+ print "FLUSH\n";
+ } else {
+ read(STDIN, $buf, hex($len) - 4);
+ $buf =~ s/\0/\\0/g;
+ if ($buf =~ s/^[\x2\x3]//) {
+ print STDERR $buf;
+ } else {
+ $buf =~ s/^\x1//;
+ print $buf;
+ }
+ }
+ }
+ '
+}
diff --git a/t/test-lib.sh b/t/test-lib.sh
index 9a0a21f..452dec1 100644
--- a/t/test-lib.sh
+++ b/t/test-lib.sh
@@ -939,7 +939,7 @@ then
fi
fi
-GITPERLLIB="$GIT_BUILD_DIR"/perl/blib/lib:"$GIT_BUILD_DIR"/perl/blib/arch/auto/Git
+GITPERLLIB="$GIT_BUILD_DIR"/perl/build/lib
export GITPERLLIB
test -d "$GIT_BUILD_DIR"/templates/blt || {
error "You haven't built things yet, have you?"
@@ -1132,6 +1132,10 @@ test_lazy_prereq EXPENSIVE '
test -n "$GIT_TEST_LONG"
'
+test_lazy_prereq EXPENSIVE_ON_WINDOWS '
+ test_have_prereq EXPENSIVE || test_have_prereq !MINGW,!CYGWIN
+'
+
test_lazy_prereq USR_BIN_TIME '
test -x /usr/bin/time
'
diff --git a/trace.c b/trace.c
index b7530b5..7f3b08e 100644
--- a/trace.c
+++ b/trace.c
@@ -131,7 +131,6 @@ static void print_trace_line(struct trace_key *key, struct strbuf *buf)
{
strbuf_complete_line(buf);
trace_write(key, buf->buf, buf->len);
- strbuf_release(buf);
}
static void trace_vprintf_fl(const char *file, int line, struct trace_key *key,
@@ -144,6 +143,7 @@ static void trace_vprintf_fl(const char *file, int line, struct trace_key *key,
strbuf_vaddf(&buf, format, ap);
print_trace_line(key, &buf);
+ strbuf_release(&buf);
}
static void trace_argv_vprintf_fl(const char *file, int line,
@@ -157,8 +157,9 @@ static void trace_argv_vprintf_fl(const char *file, int line,
strbuf_vaddf(&buf, format, ap);
- sq_quote_argv(&buf, argv, 0);
+ sq_quote_argv_pretty(&buf, argv);
print_trace_line(&trace_default_key, &buf);
+ strbuf_release(&buf);
}
void trace_strbuf_fl(const char *file, int line, struct trace_key *key,
@@ -171,6 +172,7 @@ void trace_strbuf_fl(const char *file, int line, struct trace_key *key,
strbuf_addbuf(&buf, data);
print_trace_line(key, &buf);
+ strbuf_release(&buf);
}
static void trace_performance_vprintf_fl(const char *file, int line,
@@ -190,6 +192,7 @@ static void trace_performance_vprintf_fl(const char *file, int line,
}
print_trace_line(&trace_perf_key, &buf);
+ strbuf_release(&buf);
}
#ifndef HAVE_VARIADIC_MACROS
@@ -426,6 +429,6 @@ void trace_command_performance(const char **argv)
atexit(print_command_performance_atexit);
strbuf_reset(&command_line);
- sq_quote_argv(&command_line, argv, 0);
+ sq_quote_argv_pretty(&command_line, argv);
command_start_time = getnanotime();
}
diff --git a/transport-helper.c b/transport-helper.c
index 5080150..3f380d8 100644
--- a/transport-helper.c
+++ b/transport-helper.c
@@ -672,6 +672,11 @@ static int fetch(struct transport *transport,
if (data->transport_options.update_shallow)
set_helper_option(transport, "update-shallow", "true");
+ if (data->transport_options.filter_options.choice)
+ set_helper_option(
+ transport, "filter",
+ data->transport_options.filter_options.filter_spec);
+
if (data->fetch)
return fetch_with_fetch(transport, nr_heads, to_fetch);
diff --git a/transport.c b/transport.c
index fc80226..00d48b5 100644
--- a/transport.c
+++ b/transport.c
@@ -161,6 +161,15 @@ static int set_git_option(struct git_transport_options *opts,
} else if (!strcmp(name, TRANS_OPT_DEEPEN_RELATIVE)) {
opts->deepen_relative = !!value;
return 0;
+ } else if (!strcmp(name, TRANS_OPT_FROM_PROMISOR)) {
+ opts->from_promisor = !!value;
+ return 0;
+ } else if (!strcmp(name, TRANS_OPT_NO_DEPENDENTS)) {
+ opts->no_dependents = !!value;
+ return 0;
+ } else if (!strcmp(name, TRANS_OPT_LIST_OBJECTS_FILTER)) {
+ parse_list_objects_filter(&opts->filter_options, value);
+ return 0;
}
return 1;
}
@@ -229,6 +238,9 @@ static int fetch_refs_via_pack(struct transport *transport,
data->options.check_self_contained_and_connected;
args.cloning = transport->cloning;
args.update_shallow = data->options.update_shallow;
+ args.from_promisor = data->options.from_promisor;
+ args.no_dependents = data->options.no_dependents;
+ args.filter_options = data->options.filter_options;
if (!data->got_remote_heads) {
connect_setup(transport, 0);
diff --git a/transport.h b/transport.h
index 731c78b..3c68d73 100644
--- a/transport.h
+++ b/transport.h
@@ -4,6 +4,7 @@
#include "cache.h"
#include "run-command.h"
#include "remote.h"
+#include "list-objects-filter-options.h"
struct string_list;
@@ -15,12 +16,15 @@ struct git_transport_options {
unsigned self_contained_and_connected : 1;
unsigned update_shallow : 1;
unsigned deepen_relative : 1;
+ unsigned from_promisor : 1;
+ unsigned no_dependents : 1;
int depth;
const char *deepen_since;
const struct string_list *deepen_not;
const char *uploadpack;
const char *receivepack;
struct push_cas_option *cas;
+ struct list_objects_filter_options filter_options;
};
enum transport_family {
@@ -159,6 +163,18 @@ void transport_check_allowed(const char *type);
/* Send push certificates */
#define TRANS_OPT_PUSH_CERT "pushcert"
+/* Indicate that these objects are being fetched by a promisor */
+#define TRANS_OPT_FROM_PROMISOR "from-promisor"
+
+/*
+ * Indicate that only the objects wanted need to be fetched, not their
+ * dependents
+ */
+#define TRANS_OPT_NO_DEPENDENTS "no-dependents"
+
+/* Filter objects for partial clone and fetch */
+#define TRANS_OPT_LIST_OBJECTS_FILTER "filter"
+
/**
* Returns 0 if the option was used, non-zero otherwise. Prints a
* message to stderr if the option is not used.
diff --git a/unpack-trees.c b/unpack-trees.c
index bdedabc..b2b839a 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -15,6 +15,7 @@
#include "submodule.h"
#include "submodule-config.h"
#include "fsmonitor.h"
+#include "fetch-object.h"
/*
* Error messages expected by scripts out of plumbing commands such as
@@ -370,6 +371,27 @@ static int check_updates(struct unpack_trees_options *o)
load_gitmodules_file(index, &state);
enable_delayed_checkout(&state);
+ if (repository_format_partial_clone && o->update && !o->dry_run) {
+ /*
+ * Prefetch the objects that are to be checked out in the loop
+ * below.
+ */
+ struct oid_array to_fetch = OID_ARRAY_INIT;
+ int fetch_if_missing_store = fetch_if_missing;
+ fetch_if_missing = 0;
+ for (i = 0; i < index->cache_nr; i++) {
+ struct cache_entry *ce = index->cache[i];
+ if ((ce->ce_flags & CE_UPDATE) &&
+ !S_ISGITLINK(ce->ce_mode)) {
+ if (!has_object_file(&ce->oid))
+ oid_array_append(&to_fetch, &ce->oid);
+ }
+ }
+ if (to_fetch.nr)
+ fetch_objects(repository_format_partial_clone,
+ &to_fetch);
+ fetch_if_missing = fetch_if_missing_store;
+ }
for (i = 0; i < index->cache_nr; i++) {
struct cache_entry *ce = index->cache[i];
diff --git a/upload-pack.c b/upload-pack.c
index d5de181..f51b6cf 100644
--- a/upload-pack.c
+++ b/upload-pack.c
@@ -10,6 +10,8 @@
#include "diff.h"
#include "revision.h"
#include "list-objects.h"
+#include "list-objects-filter.h"
+#include "list-objects-filter-options.h"
#include "run-command.h"
#include "connect.h"
#include "sigchain.h"
@@ -19,6 +21,7 @@
#include "argv-array.h"
#include "prio-queue.h"
#include "protocol.h"
+#include "quote.h"
static const char * const upload_pack_usage[] = {
N_("git upload-pack [<options>] <dir>"),
@@ -65,6 +68,10 @@ static int advertise_refs;
static int stateless_rpc;
static const char *pack_objects_hook;
+static int filter_capability_requested;
+static int filter_advertise;
+static struct list_objects_filter_options filter_options;
+
static void reset_timeout(void)
{
alarm(timeout);
@@ -132,6 +139,17 @@ static void create_pack_file(void)
argv_array_push(&pack_objects.args, "--delta-base-offset");
if (use_include_tag)
argv_array_push(&pack_objects.args, "--include-tag");
+ if (filter_options.filter_spec) {
+ if (pack_objects.use_shell) {
+ struct strbuf buf = STRBUF_INIT;
+ sq_quote_buf(&buf, filter_options.filter_spec);
+ argv_array_pushf(&pack_objects.args, "--filter=%s", buf.buf);
+ strbuf_release(&buf);
+ } else {
+ argv_array_pushf(&pack_objects.args, "--filter=%s",
+ filter_options.filter_spec);
+ }
+ }
pack_objects.in = -1;
pack_objects.out = -1;
@@ -795,6 +813,12 @@ static void receive_needs(void)
deepen_rev_list = 1;
continue;
}
+ if (skip_prefix(line, "filter ", &arg)) {
+ if (!filter_capability_requested)
+ die("git upload-pack: filtering capability not negotiated");
+ parse_list_objects_filter(&filter_options, arg);
+ continue;
+ }
if (!skip_prefix(line, "want ", &arg) ||
get_oid_hex(arg, &oid_buf))
die("git upload-pack: protocol error, "
@@ -822,6 +846,8 @@ static void receive_needs(void)
no_progress = 1;
if (parse_feature_request(features, "include-tag"))
use_include_tag = 1;
+ if (parse_feature_request(features, "filter"))
+ filter_capability_requested = 1;
o = parse_object(&oid_buf);
if (!o) {
@@ -941,7 +967,7 @@ static int send_ref(const char *refname, const struct object_id *oid,
struct strbuf symref_info = STRBUF_INIT;
format_symref_info(&symref_info, cb_data);
- packet_write_fmt(1, "%s %s%c%s%s%s%s%s agent=%s\n",
+ packet_write_fmt(1, "%s %s%c%s%s%s%s%s%s agent=%s\n",
oid_to_hex(oid), refname_nons,
0, capabilities,
(allow_unadvertised_object_request & ALLOW_TIP_SHA1) ?
@@ -950,6 +976,7 @@ static int send_ref(const char *refname, const struct object_id *oid,
" allow-reachable-sha1-in-want" : "",
stateless_rpc ? " no-done" : "",
symref_info.buf,
+ filter_advertise ? " filter" : "",
git_user_agent_sanitized());
strbuf_release(&symref_info);
} else {
@@ -1028,6 +1055,8 @@ static int upload_pack_config(const char *var, const char *value, void *unused)
} else if (current_config_scope() != CONFIG_SCOPE_REPO) {
if (!strcmp("uploadpack.packobjectshook", var))
return git_config_string(&pack_objects_hook, var, value);
+ } else if (!strcmp("uploadpack.allowfilter", var)) {
+ filter_advertise = git_config_bool(var, value);
}
return parse_hide_refs_config(var, value, "uploadpack");
}
diff --git a/wrap-for-bin.sh b/wrap-for-bin.sh
index 22b6e49..5842408 100644
--- a/wrap-for-bin.sh
+++ b/wrap-for-bin.sh
@@ -14,7 +14,7 @@ else
GIT_TEMPLATE_DIR='@@BUILD_DIR@@/templates/blt'
export GIT_TEMPLATE_DIR
fi
-GITPERLLIB='@@BUILD_DIR@@/perl/blib/lib'"${GITPERLLIB:+:$GITPERLLIB}"
+GITPERLLIB='@@BUILD_DIR@@/perl/build/lib'"${GITPERLLIB:+:$GITPERLLIB}"
GIT_TEXTDOMAINDIR='@@BUILD_DIR@@/po/build/locale'
PATH='@@BUILD_DIR@@/bin-wrappers:'"$PATH"