summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/main.yml5
-rw-r--r--Documentation/Makefile1
-rw-r--r--Documentation/RelNotes/2.32.0.txt54
-rw-r--r--Documentation/config/clone.txt4
-rw-r--r--Documentation/config/pack.txt15
-rw-r--r--Documentation/git-clone.txt7
-rw-r--r--Documentation/git-commit.txt14
-rw-r--r--Documentation/git-format-patch.txt5
-rw-r--r--Documentation/git-multi-pack-index.txt14
-rw-r--r--Documentation/howto/coordinate-embargoed-releases.txt131
-rw-r--r--Documentation/technical/api-simple-ipc.txt105
-rw-r--r--Documentation/technical/multi-pack-index.txt5
-rw-r--r--Documentation/technical/pack-format.txt83
-rw-r--r--Documentation/user-manual.txt3
-rw-r--r--Makefile20
-rw-r--r--SECURITY.md51
-rw-r--r--apply.c1
-rw-r--r--builtin/checkout-index.c1
-rw-r--r--builtin/checkout.c1
-rw-r--r--builtin/clone.c35
-rw-r--r--builtin/column.c8
-rw-r--r--builtin/commit.c22
-rw-r--r--builtin/credential-cache--daemon.c3
-rw-r--r--builtin/credential-cache.c2
-rw-r--r--builtin/difftool.c1
-rw-r--r--builtin/fsck.c14
-rw-r--r--builtin/index-pack.c30
-rw-r--r--builtin/init-db.c32
-rw-r--r--builtin/log.c25
-rw-r--r--builtin/ls-remote.c4
-rw-r--r--builtin/mktag.c14
-rw-r--r--builtin/multi-pack-index.c182
-rw-r--r--builtin/pack-objects.c34
-rw-r--r--builtin/range-diff.c2
-rw-r--r--builtin/remote.c8
-rw-r--r--builtin/repack.c2
-rw-r--r--builtin/reset.c2
-rw-r--r--builtin/revert.c4
-rw-r--r--builtin/stash.c2
-rw-r--r--builtin/symbolic-ref.c4
-rw-r--r--builtin/unpack-objects.c3
-rw-r--r--builtin/worktree.c10
-rw-r--r--cache.h24
-rw-r--r--compat/precompose_utf8.c9
-rw-r--r--compat/precompose_utf8.h1
-rw-r--r--compat/simple-ipc/ipc-shared.c28
-rw-r--r--compat/simple-ipc/ipc-unix-socket.c999
-rw-r--r--compat/simple-ipc/ipc-win32.c751
-rw-r--r--config.mak.uname2
-rw-r--r--contrib/buildsystems/CMakeLists.txt34
-rw-r--r--convert.c154
-rw-r--r--convert.h96
-rw-r--r--csum-file.c33
-rw-r--r--daemon.c8
-rw-r--r--diffcore-rename.c63
-rw-r--r--diffcore.h1
-rw-r--r--entry.c85
-rw-r--r--entry.h59
-rw-r--r--fetch-pack.c43
-rw-r--r--fetch-pack.h1
-rw-r--r--fsck.c207
-rw-r--r--fsck.h127
-rw-r--r--git-compat-util.h5
-rw-r--r--git.c2
-rw-r--r--log-tree.c10
-rw-r--r--merge-ort.c234
-rw-r--r--midx.c219
-rw-r--r--midx.h11
-rw-r--r--pack-bitmap.c25
-rw-r--r--pack-bitmap.h4
-rw-r--r--pack-revindex.c126
-rw-r--r--pack-revindex.h53
-rw-r--r--pack-write.c36
-rw-r--r--pack.h1
-rw-r--r--packfile.c3
-rw-r--r--parse-options.c19
-rw-r--r--parse-options.h35
-rw-r--r--pkt-line.c59
-rw-r--r--pkt-line.h17
-rw-r--r--ref-filter.c2
-rw-r--r--revision.h2
-rw-r--r--sequencer.c52
-rw-r--r--sequencer.h6
-rw-r--r--setup.c28
-rw-r--r--simple-ipc.h239
-rw-r--r--t/helper/test-bitmap.c24
-rw-r--r--t/helper/test-read-midx.c24
-rw-r--r--t/helper/test-simple-ipc.c787
-rw-r--r--t/helper/test-tool.c2
-rw-r--r--t/helper/test-tool.h2
-rwxr-xr-xt/perf/p5310-pack-bitmaps.sh14
-rwxr-xr-xt/t0052-simple-ipc.sh122
-rwxr-xr-xt/t3206-range-diff.sh24
-rwxr-xr-xt/t3510-cherry-pick-sequence.sh32
-rwxr-xr-xt/t4014-format-patch.sh34
-rwxr-xr-xt/t5310-pack-bitmaps.sh38
-rwxr-xr-xt/t5319-multi-pack-index.sh42
-rwxr-xr-xt/t5601-clone.sh9
-rwxr-xr-xt/t5606-clone-options.sh27
-rwxr-xr-xt/t5611-clone-config.sh25
-rwxr-xr-xt/t6300-for-each-ref.sh10
-rwxr-xr-xt/t6423-merge-rename-directories.sh71
-rwxr-xr-xt/t7502-commit-porcelain.sh312
-rw-r--r--transport.c6
-rw-r--r--transport.h4
-rw-r--r--unix-socket.c53
-rw-r--r--unix-socket.h12
-rw-r--r--unix-stream-server.c125
-rw-r--r--unix-stream-server.h33
-rw-r--r--unpack-trees.c1
110 files changed, 6077 insertions, 632 deletions
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 5f2f884..73856ba 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -186,6 +186,11 @@ jobs:
## Unzip and remove the artifact
unzip artifacts.zip
rm artifacts.zip
+ - name: initialize vcpkg
+ uses: actions/checkout@v2
+ with:
+ repository: 'microsoft/vcpkg'
+ path: 'compat/vcbuild/vcpkg'
- name: download vcpkg artifacts
shell: powershell
run: |
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 81d1bf7..874a01d 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -76,6 +76,7 @@ SP_ARTICLES += howto/rebuild-from-update-hook
SP_ARTICLES += howto/rebase-from-internal-branch
SP_ARTICLES += howto/keep-canonical-history-correct
SP_ARTICLES += howto/maintain-git
+SP_ARTICLES += howto/coordinate-embargoed-releases
API_DOCS = $(patsubst %.txt,%,$(filter-out technical/api-index-skel.txt technical/api-index.txt, $(wildcard technical/api-*.txt)))
SP_ARTICLES += $(API_DOCS)
diff --git a/Documentation/RelNotes/2.32.0.txt b/Documentation/RelNotes/2.32.0.txt
index f39eede..5c329d5 100644
--- a/Documentation/RelNotes/2.32.0.txt
+++ b/Documentation/RelNotes/2.32.0.txt
@@ -47,6 +47,17 @@ UI, Workflows & Features
* "git send-email" learned to honor the core.hooksPath configuration.
+ * "git format-patch -v<n>" learned to allow a reroll count that is
+ not an integer.
+
+ * "git commit" learned "--trailer <key>[=<value>]" option; together
+ with the interpret-trailers command, this will make it easier to
+ support custom trailers.
+
+ * "git clone --reject-shallow" option fails the clone as soon as we
+ notice that we are cloning from a shallow repository.
+
+
Performance, Internal Implementation, Development Support etc.
@@ -65,6 +76,27 @@ Performance, Internal Implementation, Development Support etc.
* Reorganize Makefile to allow building git.o and other essential
objects without extra stuff needed only for testing.
+ * Preparatory API changes for parallel checkout.
+
+ * A simple IPC interface gets introduced to build services like
+ fsmonitor on top.
+
+ * Fsck API clean-up.
+
+ * SECURITY.md that is facing individual contributors and end users
+ has been introduced. Also a procedure to follow when preparing
+ embargoed releases has been spelled out.
+ (merge 09420b7648 js/security-md later to maint).
+
+ * Optimize "rev-list --use-bitmap-index --objects" corner case that
+ uses negative tags as the stopping points.
+
+ * CMake update for vsbuild.
+
+ * An on-disk reverse-index to map the in-pack location of an object
+ back to its object name across multiple packfiles is introduced.
+
+ * Generate [ec]tags under $(QUIET_GEN).
Fixes since v2.31
@@ -120,6 +152,28 @@ Fixes since v2.31
symbolic links, which has been corrected.
(merge fab78a0c3d mt/checkout-remove-nofollow later to maint).
+ * A few option description strings started with capital letters,
+ which were corrected.
+ (merge 5ee90326dc cc/downcase-opt-help later to maint).
+
+ * Plug or annotate remaining leaks that trigger while running the
+ very basic set of tests.
+ (merge 68ffe095a2 ah/plugleaks later to maint).
+
+ * The hashwrite() API uses a buffering mechanism to avoid calling
+ write(2) too frequently. This logic has been refactored to be
+ easier to understand.
+ (merge ddaf1f62e3 ds/clarify-hashwrite later to maint).
+
+ * "git cherry-pick/revert" with or without "--[no-]edit" did not spawn
+ the editor as expected (e.g. "revert --no-edit" after a conflict
+ still asked to edit the message), which has been corrected.
+ (merge 39edfd5cbc en/sequencer-edit-upon-conflict-fix later to maint).
+
+ * "git daemon" has been tightened against systems that take backslash
+ as directory separator.
+ (merge 9a7f1ce8b7 rs/daemon-sanitize-dir-sep later to maint).
+
* Other code cleanup, docfix, build fix, etc.
(merge f451960708 dl/cat-file-doc-cleanup later to maint).
(merge 12604a8d0c sv/t9801-test-path-is-file-cleanup later to maint).
diff --git a/Documentation/config/clone.txt b/Documentation/config/clone.txt
index 47de36a..7bcfbd1 100644
--- a/Documentation/config/clone.txt
+++ b/Documentation/config/clone.txt
@@ -2,3 +2,7 @@ clone.defaultRemoteName::
The name of the remote to create when cloning a repository. Defaults to
`origin`, and can be overridden by passing the `--origin` command-line
option to linkgit:git-clone[1].
+
+clone.rejectShallow::
+ Reject to clone a repository if it is a shallow one, can be overridden by
+ passing option `--reject-shallow` in command line. See linkgit:git-clone[1]
diff --git a/Documentation/config/pack.txt b/Documentation/config/pack.txt
index 3da4ea9..c0844d8 100644
--- a/Documentation/config/pack.txt
+++ b/Documentation/config/pack.txt
@@ -122,6 +122,21 @@ pack.useSparse::
commits contain certain types of direct renames. Default is
`true`.
+pack.preferBitmapTips::
+ When selecting which commits will receive bitmaps, prefer a
+ commit at the tip of any reference that is a suffix of any value
+ of this configuration over any other commits in the "selection
+ window".
++
+Note that setting this configuration to `refs/foo` does not mean that
+the commits at the tips of `refs/foo/bar` and `refs/foo/baz` will
+necessarily be selected. This is because commits are selected for
+bitmaps from within a series of windows of variable length.
++
+If a commit at the tip of any reference which is a suffix of any value
+of this configuration is seen in a window, it is immediately given
+preference over any other commit in that window.
+
pack.writeBitmaps (deprecated)::
This is a deprecated synonym for `repack.writeBitmaps`.
diff --git a/Documentation/git-clone.txt b/Documentation/git-clone.txt
index 02d9c19..3fe3810 100644
--- a/Documentation/git-clone.txt
+++ b/Documentation/git-clone.txt
@@ -15,7 +15,7 @@ SYNOPSIS
[--dissociate] [--separate-git-dir <git dir>]
[--depth <depth>] [--[no-]single-branch] [--no-tags]
[--recurse-submodules[=<pathspec>]] [--[no-]shallow-submodules]
- [--[no-]remote-submodules] [--jobs <n>] [--sparse]
+ [--[no-]remote-submodules] [--jobs <n>] [--sparse] [--[no-]reject-shallow]
[--filter=<filter>] [--] <repository>
[<directory>]
@@ -149,6 +149,11 @@ objects from the source repository into a pack in the cloned repository.
--no-checkout::
No checkout of HEAD is performed after the clone is complete.
+--[no-]reject-shallow::
+ Fail if the source repository is a shallow repository.
+ The 'clone.rejectShallow' configuration variable can be used to
+ specify the default.
+
--bare::
Make a 'bare' Git repository. That is, instead of
creating `<directory>` and placing the administrative
diff --git a/Documentation/git-commit.txt b/Documentation/git-commit.txt
index 3c69f46..340c5fb 100644
--- a/Documentation/git-commit.txt
+++ b/Documentation/git-commit.txt
@@ -14,7 +14,8 @@ SYNOPSIS
[--allow-empty-message] [--no-verify] [-e] [--author=<author>]
[--date=<date>] [--cleanup=<mode>] [--[no-]status]
[-i | -o] [--pathspec-from-file=<file> [--pathspec-file-nul]]
- [-S[<keyid>]] [--] [<pathspec>...]
+ [(--trailer <token>[(=|:)<value>])...] [-S[<keyid>]]
+ [--] [<pathspec>...]
DESCRIPTION
-----------
@@ -199,6 +200,17 @@ The `-m` option is mutually exclusive with `-c`, `-C`, and `-F`.
include::signoff-option.txt[]
+--trailer <token>[(=|:)<value>]::
+ Specify a (<token>, <value>) pair that should be applied as a
+ trailer. (e.g. `git commit --trailer "Signed-off-by:C O Mitter \
+ <committer@example.com>" --trailer "Helped-by:C O Mitter \
+ <committer@example.com>"` will add the "Signed-off-by" trailer
+ and the "Helped-by" trailer to the commit message.)
+ The `trailer.*` configuration variables
+ (linkgit:git-interpret-trailers[1]) can be used to define if
+ a duplicated trailer is omitted, where in the run of trailers
+ each trailer would appear, and other details.
+
-n::
--no-verify::
This option bypasses the pre-commit and commit-msg hooks.
diff --git a/Documentation/git-format-patch.txt b/Documentation/git-format-patch.txt
index 5cd8578..911da18 100644
--- a/Documentation/git-format-patch.txt
+++ b/Documentation/git-format-patch.txt
@@ -238,6 +238,11 @@ populated with placeholder text.
`--subject-prefix` option) has ` v<n>` appended to it. E.g.
`--reroll-count=4` may produce `v4-0001-add-makefile.patch`
file that has "Subject: [PATCH v4 1/20] Add makefile" in it.
+ `<n>` does not have to be an integer (e.g. "--reroll-count=4.4",
+ or "--reroll-count=4rev2" are allowed), but the downside of
+ using such a reroll-count is that the range-diff/interdiff
+ with the previous version does not state exactly which
+ version the new interation is compared against.
--to=<email>::
Add a `To:` header to the email headers. This is in addition
diff --git a/Documentation/git-multi-pack-index.txt b/Documentation/git-multi-pack-index.txt
index eb0caa0..ffd601b 100644
--- a/Documentation/git-multi-pack-index.txt
+++ b/Documentation/git-multi-pack-index.txt
@@ -9,7 +9,8 @@ git-multi-pack-index - Write and verify multi-pack-indexes
SYNOPSIS
--------
[verse]
-'git multi-pack-index' [--object-dir=<dir>] [--[no-]progress] <subcommand>
+'git multi-pack-index' [--object-dir=<dir>] [--[no-]progress]
+ [--preferred-pack=<pack>] <subcommand>
DESCRIPTION
-----------
@@ -30,7 +31,16 @@ OPTIONS
The following subcommands are available:
write::
- Write a new MIDX file.
+ Write a new MIDX file. The following options are available for
+ the `write` sub-command:
++
+--
+ --preferred-pack=<pack>::
+ Optionally specify the tie-breaking pack used when
+ multiple packs contain the same object. If not given,
+ ties are broken in favor of the pack with the lowest
+ mtime.
+--
verify::
Verify the contents of the MIDX file.
diff --git a/Documentation/howto/coordinate-embargoed-releases.txt b/Documentation/howto/coordinate-embargoed-releases.txt
new file mode 100644
index 0000000..601aae8
--- /dev/null
+++ b/Documentation/howto/coordinate-embargoed-releases.txt
@@ -0,0 +1,131 @@
+Content-type: text/asciidoc
+Abstract: When a critical vulnerability is discovered and fixed, we follow this
+ script to coordinate a public release.
+
+How we coordinate embargoed releases
+====================================
+
+To protect Git users from critical vulnerabilities, we do not just release
+fixed versions like regular maintenance releases. Instead, we coordinate
+releases with packagers, keeping the fixes under an embargo until the release
+date. That way, users will have a chance to upgrade on that date, no matter
+what Operating System or distribution they run.
+
+Open a Security Advisory draft
+------------------------------
+
+The first step is to https://github.com/git/git/security/advisories/new[open an
+advisory]. Technically, it is not necessary, but it is convenient and saves a
+bit of hassle. This advisory can also be used to obtain the CVE number and it
+will give us a private fork associated with it that can be used to collaborate
+on a fix.
+
+Release date of the embargoed version
+-------------------------------------
+
+If the vulnerability affects Windows users, we want to have our friends over at
+Visual Studio on board. This means we need to target a "Patch Tuesday" (i.e. a
+second Tuesday of the month), at the minimum three weeks from heads-up to
+coordinated release.
+
+If the vulnerability affects the server side, or can benefit from scans on the
+server side (i.e. if `git fsck` can detect an attack), it is important to give
+all involved Git repository hosting sites enough time to scan all of those
+repositories.
+
+Notifying the Linux distributions
+---------------------------------
+
+At most two weeks before release date, we need to send a notification to
+distros@vs.openwall.org, preferably less than 7 days before the release date.
+This will reach most (all?) Linux distributions. See an example below, and the
+guidelines for this mailing list at
+https://oss-security.openwall.org/wiki/mailing-lists/distros#how-to-use-the-lists[here].
+
+Once the version has been published, we send a note about that to oss-security.
+As an example, see https://www.openwall.com/lists/oss-security/2019/12/13/1[the
+v2.24.1 mail];
+https://oss-security.openwall.org/wiki/mailing-lists/oss-security[Here] are
+their guidelines.
+
+The mail to oss-security should also describe the exploit, and give credit to
+the reporter(s): security researchers still receive too little respect for the
+invaluable service they provide, and public credit goes a long way to keep them
+paid by their respective organizations.
+
+Technically, describing any exploit can be delayed up to 7 days, but we usually
+refrain from doing that, including it right away.
+
+As a courtesy we typically attach a Git bundle (as `.tar.xz` because the list
+will drop `.bundle` attachments) in the mail to distros@ so that the involved
+parties can take care of integrating/backporting them. This bundle is typically
+created using a command like this:
+
+ git bundle create cve-xxx.bundle ^origin/master vA.B.C vD.E.F
+ tar cJvf cve-xxx.bundle.tar.xz cve-xxx.bundle
+
+Example mail to distros@vs.openwall.org
+---------------------------------------
+
+....
+To: distros@vs.openwall.org
+Cc: git-security@googlegroups.com, <other people involved in the report/fix>
+Subject: [vs] Upcoming Git security fix release
+
+Team,
+
+The Git project will release new versions on <date> at 10am Pacific Time or
+soon thereafter. I have attached a Git bundle (embedded in a `.tar.xz` to avoid
+it being dropped) which you can fetch into a clone of
+https://github.com/git/git via `git fetch --tags /path/to/cve-xxx.bundle`,
+containing the tags for versions <versions>.
+
+You can verify with `git tag -v <tag>` that the versions were signed by
+the Git maintainer, using the same GPG key as e.g. v2.24.0.
+
+Please use these tags to prepare `git` packages for your various
+distributions, using the appropriate tagged versions. The added test cases
+help verify the correctness.
+
+The addressed issues are:
+
+<list of CVEs with a short description, typically copy/pasted from Git's
+release notes, usually demo exploit(s), too>
+
+Credit for finding the vulnerability goes to <reporter>, credit for fixing
+it goes to <developer>.
+
+Thanks,
+<name>
+
+....
+
+Example mail to oss-security@lists.openwall.com
+-----------------------------------------------
+
+....
+To: oss-security@lists.openwall.com
+Cc: git-security@googlegroups.com, <other people involved in the report/fix>
+Subject: git: <copy from security advisory>
+
+Team,
+
+The Git project released new versions on <date>, addressing <CVE>.
+
+All supported platforms are affected in one way or another, and all Git
+versions all the way back to <version> are affected. The fixed versions are:
+<versions>.
+
+Link to the announcement: <link to lore.kernel.org/git>
+
+We highly recommend to upgrade.
+
+The addressed issues are:
+* <list of CVEs and their explanations, along with demo exploits>
+
+Credit for finding the vulnerability goes to <reporter>, credit for fixing
+it goes to <developer>.
+
+Thanks,
+<name>
+....
diff --git a/Documentation/technical/api-simple-ipc.txt b/Documentation/technical/api-simple-ipc.txt
new file mode 100644
index 0000000..d79ad32
--- /dev/null
+++ b/Documentation/technical/api-simple-ipc.txt
@@ -0,0 +1,105 @@
+Simple-IPC API
+==============
+
+The Simple-IPC API is a collection of `ipc_` prefixed library routines
+and a basic communication protocol that allow an IPC-client process to
+send an application-specific IPC-request message to an IPC-server
+process and receive an application-specific IPC-response message.
+
+Communication occurs over a named pipe on Windows and a Unix domain
+socket on other platforms. IPC-clients and IPC-servers rendezvous at
+a previously agreed-to application-specific pathname (which is outside
+the scope of this design) that is local to the computer system.
+
+The IPC-server routines within the server application process create a
+thread pool to listen for connections and receive request messages
+from multiple concurrent IPC-clients. When received, these messages
+are dispatched up to the server application callbacks for handling.
+IPC-server routines then incrementally relay responses back to the
+IPC-client.
+
+The IPC-client routines within a client application process connect
+to the IPC-server and send a request message and wait for a response.
+When received, the response is returned back the caller.
+
+For example, the `fsmonitor--daemon` feature will be built as a server
+application on top of the IPC-server library routines. It will have
+threads watching for file system events and a thread pool waiting for
+client connections. Clients, such as `git status` will request a list
+of file system events since a point in time and the server will
+respond with a list of changed files and directories. The formats of
+the request and response are application-specific; the IPC-client and
+IPC-server routines treat them as opaque byte streams.
+
+
+Comparison with sub-process model
+---------------------------------
+
+The Simple-IPC mechanism differs from the existing `sub-process.c`
+model (Documentation/technical/long-running-process-protocol.txt) and
+used by applications like Git-LFS. In the LFS-style sub-process model
+the helper is started by the foreground process, communication happens
+via a pair of file descriptors bound to the stdin/stdout of the
+sub-process, the sub-process only serves the current foreground
+process, and the sub-process exits when the foreground process
+terminates.
+
+In the Simple-IPC model the server is a very long-running service. It
+can service many clients at the same time and has a private socket or
+named pipe connection to each active client. It might be started
+(on-demand) by the current client process or it might have been
+started by a previous client or by the OS at boot time. The server
+process is not associated with a terminal and it persists after
+clients terminate. Clients do not have access to the stdin/stdout of
+the server process and therefore must communicate over sockets or
+named pipes.
+
+
+Server startup and shutdown
+---------------------------
+
+How an application server based upon IPC-server is started is also
+outside the scope of the Simple-IPC design and is a property of the
+application using it. For example, the server might be started or
+restarted during routine maintenance operations, or it might be
+started as a system service during the system boot-up sequence, or it
+might be started on-demand by a foreground Git command when needed.
+
+Similarly, server shutdown is a property of the application using
+the simple-ipc routines. For example, the server might decide to
+shutdown when idle or only upon explicit request.
+
+
+Simple-IPC protocol
+-------------------
+
+The Simple-IPC protocol consists of a single request message from the
+client and an optional response message from the server. Both the
+client and server messages are unlimited in length and are terminated
+with a flush packet.
+
+The pkt-line routines (Documentation/technical/protocol-common.txt)
+are used to simplify buffer management during message generation,
+transmission, and reception. A flush packet is used to mark the end
+of the message. This allows the sender to incrementally generate and
+transmit the message. It allows the receiver to incrementally receive
+the message in chunks and to know when they have received the entire
+message.
+
+The actual byte format of the client request and server response
+messages are application specific. The IPC layer transmits and
+receives them as opaque byte buffers without any concern for the
+content within. It is the job of the calling application layer to
+understand the contents of the request and response messages.
+
+
+Summary
+-------
+
+Conceptually, the Simple-IPC protocol is similar to an HTTP REST
+request. Clients connect, make an application-specific and
+stateless request, receive an application-specific
+response, and disconnect. It is a one round trip facility for
+querying the server. The Simple-IPC routines hide the socket,
+named pipe, and thread pool details and allow the application
+layer to focus on the application at hand.
diff --git a/Documentation/technical/multi-pack-index.txt b/Documentation/technical/multi-pack-index.txt
index e8e377a..fb68897 100644
--- a/Documentation/technical/multi-pack-index.txt
+++ b/Documentation/technical/multi-pack-index.txt
@@ -43,8 +43,9 @@ Design Details
a change in format.
- The MIDX keeps only one record per object ID. If an object appears
- in multiple packfiles, then the MIDX selects the copy in the most-
- recently modified packfile.
+ in multiple packfiles, then the MIDX selects the copy in the
+ preferred packfile, otherwise selecting from the most-recently
+ modified packfile.
- If there exist packfiles in the pack directory not registered in
the MIDX, then those packfiles are loaded into the `packed_git`
diff --git a/Documentation/technical/pack-format.txt b/Documentation/technical/pack-format.txt
index 1faa949..8d2f42f 100644
--- a/Documentation/technical/pack-format.txt
+++ b/Documentation/technical/pack-format.txt
@@ -379,3 +379,86 @@ CHUNK DATA:
TRAILER:
Index checksum of the above contents.
+
+== multi-pack-index reverse indexes
+
+Similar to the pack-based reverse index, the multi-pack index can also
+be used to generate a reverse index.
+
+Instead of mapping between offset, pack-, and index position, this
+reverse index maps between an object's position within the MIDX, and
+that object's position within a pseudo-pack that the MIDX describes
+(i.e., the ith entry of the multi-pack reverse index holds the MIDX
+position of ith object in pseudo-pack order).
+
+To clarify the difference between these orderings, consider a multi-pack
+reachability bitmap (which does not yet exist, but is what we are
+building towards here). Each bit needs to correspond to an object in the
+MIDX, and so we need an efficient mapping from bit position to MIDX
+position.
+
+One solution is to let bits occupy the same position in the oid-sorted
+index stored by the MIDX. But because oids are effectively random, their
+resulting reachability bitmaps would have no locality, and thus compress
+poorly. (This is the reason that single-pack bitmaps use the pack
+ordering, and not the .idx ordering, for the same purpose.)
+
+So we'd like to define an ordering for the whole MIDX based around
+pack ordering, which has far better locality (and thus compresses more
+efficiently). We can think of a pseudo-pack created by the concatenation
+of all of the packs in the MIDX. E.g., if we had a MIDX with three packs
+(a, b, c), with 10, 15, and 20 objects respectively, we can imagine an
+ordering of the objects like:
+
+ |a,0|a,1|...|a,9|b,0|b,1|...|b,14|c,0|c,1|...|c,19|
+
+where the ordering of the packs is defined by the MIDX's pack list,
+and then the ordering of objects within each pack is the same as the
+order in the actual packfile.
+
+Given the list of packs and their counts of objects, you can
+naïvely reconstruct that pseudo-pack ordering (e.g., the object at
+position 27 must be (c,1) because packs "a" and "b" consumed 25 of the
+slots). But there's a catch. Objects may be duplicated between packs, in
+which case the MIDX only stores one pointer to the object (and thus we'd
+want only one slot in the bitmap).
+
+Callers could handle duplicates themselves by reading objects in order
+of their bit-position, but that's linear in the number of objects, and
+much too expensive for ordinary bitmap lookups. Building a reverse index
+solves this, since it is the logical inverse of the index, and that
+index has already removed duplicates. But, building a reverse index on
+the fly can be expensive. Since we already have an on-disk format for
+pack-based reverse indexes, let's reuse it for the MIDX's pseudo-pack,
+too.
+
+Objects from the MIDX are ordered as follows to string together the
+pseudo-pack. Let `pack(o)` return the pack from which `o` was selected
+by the MIDX, and define an ordering of packs based on their numeric ID
+(as stored by the MIDX). Let `offset(o)` return the object offset of `o`
+within `pack(o)`. Then, compare `o1` and `o2` as follows:
+
+ - If one of `pack(o1)` and `pack(o2)` is preferred and the other
+ is not, then the preferred one sorts first.
++
+(This is a detail that allows the MIDX bitmap to determine which
+pack should be used by the pack-reuse mechanism, since it can ask
+the MIDX for the pack containing the object at bit position 0).
+
+ - If `pack(o1) ≠ pack(o2)`, then sort the two objects in descending
+ order based on the pack ID.
+
+ - Otherwise, `pack(o1) = pack(o2)`, and the objects are sorted in
+ pack-order (i.e., `o1` sorts ahead of `o2` exactly when `offset(o1)
+ < offset(o2)`).
+
+In short, a MIDX's pseudo-pack is the de-duplicated concatenation of
+objects in packs stored by the MIDX, laid out in pack order, and the
+packs arranged in MIDX order (with the preferred pack coming first).
+
+Finally, note that the MIDX's reverse index is not stored as a chunk in
+the multi-pack-index itself. This is done because the reverse index
+includes the checksum of the pack or MIDX to which it belongs, which
+makes it impossible to write in the MIDX. To avoid races when rewriting
+the MIDX, a MIDX reverse index includes the MIDX's checksum in its
+filename (e.g., `multi-pack-index-xyz.rev`).
diff --git a/Documentation/user-manual.txt b/Documentation/user-manual.txt
index fd480b8..f9e54b8 100644
--- a/Documentation/user-manual.txt
+++ b/Documentation/user-manual.txt
@@ -1,5 +1,8 @@
= Git User Manual
+[preface]
+== Introduction
+
Git is a fast distributed revision control system.
This manual is designed to be readable by someone with basic UNIX
diff --git a/Makefile b/Makefile
index 55c8035..21c0bf1 100644
--- a/Makefile
+++ b/Makefile
@@ -693,6 +693,7 @@ X =
PROGRAMS += $(patsubst %.o,git-%$X,$(PROGRAM_OBJS))
TEST_BUILTINS_OBJS += test-advise.o
+TEST_BUILTINS_OBJS += test-bitmap.o
TEST_BUILTINS_OBJS += test-bloom.o
TEST_BUILTINS_OBJS += test-chmtime.o
TEST_BUILTINS_OBJS += test-config.o
@@ -744,6 +745,7 @@ TEST_BUILTINS_OBJS += test-serve-v2.o
TEST_BUILTINS_OBJS += test-sha1.o
TEST_BUILTINS_OBJS += test-sha256.o
TEST_BUILTINS_OBJS += test-sigchain.o
+TEST_BUILTINS_OBJS += test-simple-ipc.o
TEST_BUILTINS_OBJS += test-strcmp-offset.o
TEST_BUILTINS_OBJS += test-string-list.o
TEST_BUILTINS_OBJS += test-submodule-config.o
@@ -1679,6 +1681,14 @@ ifdef NO_UNIX_SOCKETS
BASIC_CFLAGS += -DNO_UNIX_SOCKETS
else
LIB_OBJS += unix-socket.o
+ LIB_OBJS += unix-stream-server.o
+ LIB_OBJS += compat/simple-ipc/ipc-shared.o
+ LIB_OBJS += compat/simple-ipc/ipc-unix-socket.o
+endif
+
+ifdef USE_WIN32_IPC
+ LIB_OBJS += compat/simple-ipc/ipc-shared.o
+ LIB_OBJS += compat/simple-ipc/ipc-win32.o
endif
ifdef NO_ICONV
@@ -2690,12 +2700,14 @@ FIND_SOURCE_FILES = ( \
)
$(ETAGS_TARGET): FORCE
- $(RM) $(ETAGS_TARGET)
- $(FIND_SOURCE_FILES) | xargs etags -a -o $(ETAGS_TARGET)
+ $(QUIET_GEN)$(RM) "$(ETAGS_TARGET)+" && \
+ $(FIND_SOURCE_FILES) | xargs etags -a -o "$(ETAGS_TARGET)+" && \
+ mv "$(ETAGS_TARGET)+" "$(ETAGS_TARGET)"
tags: FORCE
- $(RM) tags
- $(FIND_SOURCE_FILES) | xargs ctags -a
+ $(QUIET_GEN)$(RM) tags+ && \
+ $(FIND_SOURCE_FILES) | xargs ctags -a -o tags+ && \
+ mv tags+ tags
cscope:
$(RM) cscope*
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000..c720c2a
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,51 @@
+# Security Policy
+
+## Reporting a vulnerability
+
+Please send a detailed mail to git-security@googlegroups.com to
+report vulnerabilities in Git.
+
+Even when unsure whether the bug in question is an exploitable
+vulnerability, it is recommended to send the report to
+git-security@googlegroups.com (and obviously not to discuss the
+issue anywhere else).
+
+Vulnerabilities are expected to be discussed _only_ on that
+list, and not in public, until the official announcement on the
+Git mailing list on the release date.
+
+Examples for details to include:
+
+- Ideally a short description (or a script) to demonstrate an
+ exploit.
+- The affected platforms and scenarios (the vulnerability might
+ only affect setups with case-sensitive file systems, for
+ example).
+- The name and affiliation of the security researchers who are
+ involved in the discovery, if any.
+- Whether the vulnerability has already been disclosed.
+- How long an embargo would be required to be safe.
+
+## Supported Versions
+
+There are no official "Long Term Support" versions in Git.
+Instead, the maintenance track (i.e. the versions based on the
+most recently published feature release, also known as ".0"
+version) sees occasional updates with bug fixes.
+
+Fixes to vulnerabilities are made for the maintenance track for
+the latest feature release and merged up to the in-development
+branches. The Git project makes no formal guarantee for any
+older maintenance tracks to receive updates. In practice,
+though, critical vulnerability fixes are applied not only to the
+most recent track, but to at least a couple more maintenance
+tracks.
+
+This is typically done by making the fix on the oldest and still
+relevant maintenance track, and merging it upwards to newer and
+newer maintenance tracks.
+
+For example, v2.24.1 was released to address a couple of
+[CVEs](https://cve.mitre.org/), and at the same time v2.14.6,
+v2.15.4, v2.16.6, v2.17.3, v2.18.2, v2.19.3, v2.20.2, v2.21.1,
+v2.22.2 and v2.23.1 were released.
diff --git a/apply.c b/apply.c
index 6695a93..466f880 100644
--- a/apply.c
+++ b/apply.c
@@ -21,6 +21,7 @@
#include "quote.h"
#include "rerere.h"
#include "apply.h"
+#include "entry.h"
struct gitdiff_data {
struct strbuf *root;
diff --git a/builtin/checkout-index.c b/builtin/checkout-index.c
index 023e49e..c0bf4ac 100644
--- a/builtin/checkout-index.c
+++ b/builtin/checkout-index.c
@@ -11,6 +11,7 @@
#include "quote.h"
#include "cache-tree.h"
#include "parse-options.h"
+#include "entry.h"
#define CHECKOUT_ALL 4
static int nul_term_line;
diff --git a/builtin/checkout.c b/builtin/checkout.c
index 0e66390..4c696ef 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -26,6 +26,7 @@
#include "unpack-trees.h"
#include "wt-status.h"
#include "xdiff-interface.h"
+#include "entry.h"
static const char * const checkout_usage[] = {
N_("git checkout [<options>] <branch>"),
diff --git a/builtin/clone.c b/builtin/clone.c
index 51e844a..f6b0c48 100644
--- a/builtin/clone.c
+++ b/builtin/clone.c
@@ -50,6 +50,8 @@ static int option_no_checkout, option_bare, option_mirror, option_single_branch
static int option_local = -1, option_no_hardlinks, option_shared;
static int option_no_tags;
static int option_shallow_submodules;
+static int option_reject_shallow = -1; /* unspecified */
+static int config_reject_shallow = -1; /* unspecified */
static int deepen;
static char *option_template, *option_depth, *option_since;
static char *option_origin = NULL;
@@ -90,6 +92,8 @@ static struct option builtin_clone_options[] = {
OPT__VERBOSITY(&option_verbosity),
OPT_BOOL(0, "progress", &option_progress,
N_("force progress reporting")),
+ OPT_BOOL(0, "reject-shallow", &option_reject_shallow,
+ N_("don't clone shallow repository")),
OPT_BOOL('n', "no-checkout", &option_no_checkout,
N_("don't create a checkout")),
OPT_BOOL(0, "bare", &option_bare, N_("create a bare repository")),
@@ -858,6 +862,9 @@ static int git_clone_config(const char *k, const char *v, void *cb)
free(remote_name);
remote_name = xstrdup(v);
}
+ if (!strcmp(k, "clone.rejectshallow"))
+ config_reject_shallow = git_config_bool(k, v);
+
return git_default_config(k, v, cb);
}
@@ -963,11 +970,12 @@ static int path_exists(const char *path)
int cmd_clone(int argc, const char **argv, const char *prefix)
{
int is_bundle = 0, is_local;
+ int reject_shallow = 0;
const char *repo_name, *repo, *work_tree, *git_dir;
- char *path, *dir, *display_repo = NULL;
+ char *path = NULL, *dir, *display_repo = NULL;
int dest_exists, real_dest_exists = 0;
const struct ref *refs, *remote_head;
- const struct ref *remote_head_points_at;
+ struct ref *remote_head_points_at = NULL;
const struct ref *our_head_points_at;
struct ref *mapped_refs;
const struct ref *ref;
@@ -1017,9 +1025,10 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
repo_name = argv[0];
path = get_repo_path(repo_name, &is_bundle);
- if (path)
+ if (path) {
+ FREE_AND_NULL(path);
repo = absolute_pathdup(repo_name);
- else if (strchr(repo_name, ':')) {
+ } else if (strchr(repo_name, ':')) {
repo = repo_name;
display_repo = transport_anonymize_url(repo);
} else
@@ -1157,6 +1166,15 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
git_config(git_clone_config, NULL);
/*
+ * If option_reject_shallow is specified from CLI option,
+ * ignore config_reject_shallow from git_clone_config.
+ */
+ if (config_reject_shallow != -1)
+ reject_shallow = config_reject_shallow;
+ if (option_reject_shallow != -1)
+ reject_shallow = option_reject_shallow;
+
+ /*
* apply the remote name provided by --origin only after this second
* call to git_config, to ensure it overrides all config-based values.
*/
@@ -1216,6 +1234,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
if (filter_options.choice)
warning(_("--filter is ignored in local clones; use file:// instead."));
if (!access(mkpath("%s/shallow", path), F_OK)) {
+ if (reject_shallow)
+ die(_("source repository is shallow, reject to clone."));
if (option_local > 0)
warning(_("source repository is shallow, ignoring --local"));
is_local = 0;
@@ -1227,6 +1247,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
transport_set_option(transport, TRANS_OPT_KEEP, "yes");
+ if (reject_shallow)
+ transport_set_option(transport, TRANS_OPT_REJECT_SHALLOW, "1");
if (option_depth)
transport_set_option(transport, TRANS_OPT_DEPTH,
option_depth);
@@ -1393,6 +1415,11 @@ cleanup:
strbuf_release(&reflog_msg);
strbuf_release(&branch_top);
strbuf_release(&key);
+ free_refs(mapped_refs);
+ free_refs(remote_head_points_at);
+ free(dir);
+ free(path);
+ UNLEAK(repo);
junk_mode = JUNK_LEAVE_ALL;
strvec_clear(&transport_ls_refs_options.ref_prefixes);
diff --git a/builtin/column.c b/builtin/column.c
index e815e14..40d4b3b 100644
--- a/builtin/column.c
+++ b/builtin/column.c
@@ -27,10 +27,10 @@ int cmd_column(int argc, const char **argv, const char *prefix)
OPT_STRING(0, "command", &real_command, N_("name"), N_("lookup config vars")),
OPT_COLUMN(0, "mode", &colopts, N_("layout to use")),
OPT_INTEGER(0, "raw-mode", &colopts, N_("layout to use")),
- OPT_INTEGER(0, "width", &copts.width, N_("Maximum width")),
- OPT_STRING(0, "indent", &copts.indent, N_("string"), N_("Padding space on left border")),
- OPT_INTEGER(0, "nl", &copts.nl, N_("Padding space on right border")),
- OPT_INTEGER(0, "padding", &copts.padding, N_("Padding space between columns")),
+ OPT_INTEGER(0, "width", &copts.width, N_("maximum width")),
+ OPT_STRING(0, "indent", &copts.indent, N_("string"), N_("padding space on left border")),
+ OPT_INTEGER(0, "nl", &copts.nl, N_("padding space on right border")),
+ OPT_INTEGER(0, "padding", &copts.padding, N_("padding space between columns")),
OPT_END()
};
diff --git a/builtin/commit.c b/builtin/commit.c
index d513858..55d50a8 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -114,6 +114,7 @@ static int config_commit_verbose = -1; /* unspecified */
static int no_post_rewrite, allow_empty_message, pathspec_file_nul;
static char *untracked_files_arg, *force_date, *ignore_submodule_arg, *ignored_arg;
static char *sign_commit, *pathspec_from_file;
+static struct strvec trailer_args = STRVEC_INIT;
/*
* The default commit message cleanup mode will remove the lines
@@ -132,6 +133,14 @@ static struct strbuf message = STRBUF_INIT;
static enum wt_status_format status_format = STATUS_FORMAT_UNSPECIFIED;
+static int opt_pass_trailer(const struct option *opt, const char *arg, int unset)
+{
+ BUG_ON_OPT_NEG(unset);
+
+ strvec_pushl(&trailer_args, "--trailer", arg, NULL);
+ return 0;
+}
+
static int opt_parse_porcelain(const struct option *opt, const char *arg, int unset)
{
enum wt_status_format *value = (enum wt_status_format *)opt->value;
@@ -994,6 +1003,18 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
fclose(s->fp);
+ if (trailer_args.nr) {
+ struct child_process run_trailer = CHILD_PROCESS_INIT;
+
+ strvec_pushl(&run_trailer.args, "interpret-trailers",
+ "--in-place", git_path_commit_editmsg(), NULL);
+ strvec_pushv(&run_trailer.args, trailer_args.v);
+ run_trailer.git_cmd = 1;
+ if (run_command(&run_trailer))
+ die(_("unable to pass trailers to --trailers"));
+ strvec_clear(&trailer_args);
+ }
+
/*
* Reject an attempt to record a non-merge empty commit without
* explicit --allow-empty. In the cherry-pick case, it may be
@@ -1596,6 +1617,7 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
OPT_STRING(0, "fixup", &fixup_message, N_("[(amend|reword):]commit"), N_("use autosquash formatted message to fixup or amend/reword specified commit")),
OPT_STRING(0, "squash", &squash_message, N_("commit"), N_("use autosquash formatted message to squash specified commit")),
OPT_BOOL(0, "reset-author", &renew_authorship, N_("the commit is authored by me now (used with -C/-c/--amend)")),
+ OPT_CALLBACK_F(0, "trailer", NULL, N_("trailer"), N_("add custom trailer(s)"), PARSE_OPT_NONEG, opt_pass_trailer),
OPT_BOOL('s', "signoff", &signoff, N_("add a Signed-off-by trailer")),
OPT_FILENAME('t', "template", &template_file, N_("use specified template file")),
OPT_BOOL('e', "edit", &edit_flag, N_("force edit of commit")),
diff --git a/builtin/credential-cache--daemon.c b/builtin/credential-cache--daemon.c
index c61f123..4c6c89a 100644
--- a/builtin/credential-cache--daemon.c
+++ b/builtin/credential-cache--daemon.c
@@ -203,9 +203,10 @@ static int serve_cache_loop(int fd)
static void serve_cache(const char *socket_path, int debug)
{
+ struct unix_stream_listen_opts opts = UNIX_STREAM_LISTEN_OPTS_INIT;
int fd;
- fd = unix_stream_listen(socket_path);
+ fd = unix_stream_listen(socket_path, &opts);
if (fd < 0)
die_errno("unable to bind to '%s'", socket_path);
diff --git a/builtin/credential-cache.c b/builtin/credential-cache.c
index 9b3f709..76a6ba3 100644
--- a/builtin/credential-cache.c
+++ b/builtin/credential-cache.c
@@ -14,7 +14,7 @@
static int send_request(const char *socket, const struct strbuf *out)
{
int got_data = 0;
- int fd = unix_stream_connect(socket);
+ int fd = unix_stream_connect(socket, 0);
if (fd < 0)
return -1;
diff --git a/builtin/difftool.c b/builtin/difftool.c
index 6e18e62..ef25729 100644
--- a/builtin/difftool.c
+++ b/builtin/difftool.c
@@ -23,6 +23,7 @@
#include "lockfile.h"
#include "object-store.h"
#include "dir.h"
+#include "entry.h"
static int trust_exit_code;
diff --git a/builtin/fsck.c b/builtin/fsck.c
index 821e779..70ff958 100644
--- a/builtin/fsck.c
+++ b/builtin/fsck.c
@@ -71,11 +71,6 @@ static const char *printable_type(const struct object_id *oid,
return ret;
}
-static int fsck_config(const char *var, const char *value, void *cb)
-{
- return fsck_config_internal(var, value, cb, &fsck_obj_options);
-}
-
static int objerror(struct object *obj, const char *err)
{
errors_found |= ERROR_OBJECT;
@@ -89,7 +84,9 @@ static int objerror(struct object *obj, const char *err)
static int fsck_error_func(struct fsck_options *o,
const struct object_id *oid,
enum object_type object_type,
- int msg_type, const char *message)
+ enum fsck_msg_type msg_type,
+ enum fsck_msg_id msg_id,
+ const char *message)
{
switch (msg_type) {
case FSCK_WARN:
@@ -197,7 +194,8 @@ static int traverse_reachable(void)
return !!result;
}
-static int mark_used(struct object *obj, int type, void *data, struct fsck_options *options)
+static int mark_used(struct object *obj, enum object_type object_type,
+ void *data, struct fsck_options *options)
{
if (!obj)
return 1;
@@ -803,7 +801,7 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
if (name_objects)
fsck_enable_object_names(&fsck_walk_options);
- git_config(fsck_config, NULL);
+ git_config(git_fsck_config, &fsck_obj_options);
if (connectivity_only) {
for_each_loose_object(mark_loose_for_connectivity, NULL, 0);
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index 2189968..15507b5 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -120,7 +120,7 @@ static int nr_threads;
static int from_stdin;
static int strict;
static int do_fsck_object;
-static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
+static struct fsck_options fsck_options = FSCK_OPTIONS_MISSING_GITMODULES;
static int verbose;
static int show_resolving_progress;
static int show_stat;
@@ -212,7 +212,8 @@ static void cleanup_thread(void)
free(thread_data);
}
-static int mark_link(struct object *obj, int type, void *data, struct fsck_options *options)
+static int mark_link(struct object *obj, enum object_type type,
+ void *data, struct fsck_options *options)
{
if (!obj)
return -1;
@@ -1712,22 +1713,6 @@ static void show_pack_info(int stat_only)
}
}
-static int print_dangling_gitmodules(struct fsck_options *o,
- const struct object_id *oid,
- enum object_type object_type,
- int msg_type, const char *message)
-{
- /*
- * NEEDSWORK: Plumb the MSG_ID (from fsck.c) here and use it
- * instead of relying on this string check.
- */
- if (starts_with(message, "gitmodulesMissing")) {
- printf("%s\n", oid_to_hex(oid));
- return 0;
- }
- return fsck_error_function(o, oid, object_type, msg_type, message);
-}
-
int cmd_index_pack(int argc, const char **argv, const char *prefix)
{
int i, fix_thin_pack = 0, verify = 0, stat_only = 0, rev_index;
@@ -1948,13 +1933,8 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
else
close(input_fd);
- if (do_fsck_object) {
- struct fsck_options fo = fsck_options;
-
- fo.error_func = print_dangling_gitmodules;
- if (fsck_finish(&fo))
- die(_("fsck error in pack objects"));
- }
+ if (do_fsck_object && fsck_finish(&fsck_options))
+ die(_("fsck error in pack objects"));
free(objects);
strbuf_release(&index_name_buf);
diff --git a/builtin/init-db.c b/builtin/init-db.c
index f82efe4..c19b35f 100644
--- a/builtin/init-db.c
+++ b/builtin/init-db.c
@@ -25,7 +25,6 @@
static int init_is_bare_repository = 0;
static int init_shared_repository = -1;
-static const char *init_db_template_dir;
static void copy_templates_1(struct strbuf *path, struct strbuf *template_path,
DIR *dir)
@@ -94,7 +93,7 @@ static void copy_templates_1(struct strbuf *path, struct strbuf *template_path,
}
}
-static void copy_templates(const char *template_dir)
+static void copy_templates(const char *template_dir, const char *init_template_dir)
{
struct strbuf path = STRBUF_INIT;
struct strbuf template_path = STRBUF_INIT;
@@ -107,7 +106,7 @@ static void copy_templates(const char *template_dir)
if (!template_dir)
template_dir = getenv(TEMPLATE_DIR_ENVIRONMENT);
if (!template_dir)
- template_dir = init_db_template_dir;
+ template_dir = init_template_dir;
if (!template_dir)
template_dir = to_free = system_path(DEFAULT_GIT_TEMPLATE_DIR);
if (!template_dir[0]) {
@@ -154,17 +153,6 @@ free_return:
clear_repository_format(&template_format);
}
-static int git_init_db_config(const char *k, const char *v, void *cb)
-{
- if (!strcmp(k, "init.templatedir"))
- return git_config_pathname(&init_db_template_dir, k, v);
-
- if (starts_with(k, "core."))
- return platform_core_config(k, v, cb);
-
- return 0;
-}
-
/*
* If the git_dir is not directly inside the working tree, then git will not
* find it by default, and we need to set the worktree explicitly.
@@ -212,12 +200,9 @@ static int create_default_files(const char *template_path,
int reinit;
int filemode;
struct strbuf err = STRBUF_INIT;
+ const char *init_template_dir = NULL;
const char *work_tree = get_git_work_tree();
- /* Just look for `init.templatedir` */
- init_db_template_dir = NULL; /* re-set in case it was set before */
- git_config(git_init_db_config, NULL);
-
/*
* First copy the templates -- we might have the default
* config file there, in which case we would want to read
@@ -227,7 +212,8 @@ static int create_default_files(const char *template_path,
* values (since we've just potentially changed what's available on
* disk).
*/
- copy_templates(template_path);
+ git_config_get_value("init.templatedir", &init_template_dir);
+ copy_templates(template_path, init_template_dir);
git_config_clear();
reset_shared_repository();
git_config(git_default_config, NULL);
@@ -422,8 +408,8 @@ int init_db(const char *git_dir, const char *real_git_dir,
}
startup_info->have_repository = 1;
- /* Just look for `core.hidedotfiles` */
- git_config(git_init_db_config, NULL);
+ /* Ensure `core.hidedotfiles` is processed */
+ git_config(platform_core_config, NULL);
safe_create_dir(git_dir, 0);
@@ -575,8 +561,10 @@ int cmd_init_db(int argc, const char **argv, const char *prefix)
if (real_git_dir && !is_absolute_path(real_git_dir))
real_git_dir = real_pathdup(real_git_dir, 1);
- if (template_dir && *template_dir && !is_absolute_path(template_dir))
+ if (template_dir && *template_dir && !is_absolute_path(template_dir)) {
template_dir = absolute_pathdup(template_dir);
+ UNLEAK(template_dir);
+ }
if (argc == 1) {
int mkdir_tried = 0;
diff --git a/builtin/log.c b/builtin/log.c
index 980de59..8acd285 100644
--- a/builtin/log.c
+++ b/builtin/log.c
@@ -1662,13 +1662,19 @@ static void print_bases(struct base_tree_info *bases, FILE *file)
oidclr(&bases->base_commit);
}
-static const char *diff_title(struct strbuf *sb, int reroll_count,
- const char *generic, const char *rerolled)
+static const char *diff_title(struct strbuf *sb,
+ const char *reroll_count,
+ const char *generic,
+ const char *rerolled)
{
- if (reroll_count <= 0)
+ int v;
+
+ /* RFC may be v0, so allow -v1 to diff against v0 */
+ if (reroll_count && !strtol_i(reroll_count, 10, &v) &&
+ v >= 1)
+ strbuf_addf(sb, rerolled, v - 1);
+ else
strbuf_addstr(sb, generic);
- else /* RFC may be v0, so allow -v1 to diff against v0 */
- strbuf_addf(sb, rerolled, reroll_count - 1);
return sb->buf;
}
@@ -1717,7 +1723,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
struct strbuf buf = STRBUF_INIT;
int use_patch_format = 0;
int quiet = 0;
- int reroll_count = -1;
+ const char *reroll_count = NULL;
char *cover_from_description_arg = NULL;
char *branch_name = NULL;
char *base_commit = NULL;
@@ -1751,7 +1757,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
N_("use <sfx> instead of '.patch'")),
OPT_INTEGER(0, "start-number", &start_number,
N_("start numbering patches at <n> instead of 1")),
- OPT_INTEGER('v', "reroll-count", &reroll_count,
+ OPT_STRING('v', "reroll-count", &reroll_count, N_("reroll-count"),
N_("mark the series as Nth re-roll")),
OPT_INTEGER(0, "filename-max-length", &fmt_patch_name_max,
N_("max length of output filename")),
@@ -1862,9 +1868,10 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
if (cover_from_description_arg)
cover_from_description_mode = parse_cover_from_description(cover_from_description_arg);
- if (0 < reroll_count) {
+ if (reroll_count) {
struct strbuf sprefix = STRBUF_INIT;
- strbuf_addf(&sprefix, "%s v%d",
+
+ strbuf_addf(&sprefix, "%s v%s",
rev.subject_prefix, reroll_count);
rev.reroll_count = reroll_count;
rev.subject_prefix = strbuf_detach(&sprefix, NULL);
diff --git a/builtin/ls-remote.c b/builtin/ls-remote.c
index abfa984..1794548 100644
--- a/builtin/ls-remote.c
+++ b/builtin/ls-remote.c
@@ -124,8 +124,6 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
int hash_algo = hash_algo_by_ptr(transport_get_hash_algo(transport));
repo_set_hash_algo(the_repository, hash_algo);
}
- if (transport_disconnect(transport))
- return 1;
if (!dest && !quiet)
fprintf(stderr, "From %s\n", *remote->url);
@@ -151,5 +149,7 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
}
ref_array_clear(&ref_array);
+ if (transport_disconnect(transport))
+ return 1;
return status;
}
diff --git a/builtin/mktag.c b/builtin/mktag.c
index 41a399a..dddcccd 100644
--- a/builtin/mktag.c
+++ b/builtin/mktag.c
@@ -14,15 +14,12 @@ static int option_strict = 1;
static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
-static int mktag_config(const char *var, const char *value, void *cb)
-{
- return fsck_config_internal(var, value, cb, &fsck_options);
-}
-
static int mktag_fsck_error_func(struct fsck_options *o,
const struct object_id *oid,
enum object_type object_type,
- int msg_type, const char *message)
+ enum fsck_msg_type msg_type,
+ enum fsck_msg_id msg_id,
+ const char *message)
{
switch (msg_type) {
case FSCK_WARN:
@@ -91,9 +88,10 @@ int cmd_mktag(int argc, const char **argv, const char *prefix)
die_errno(_("could not read from stdin"));
fsck_options.error_func = mktag_fsck_error_func;
- fsck_set_msg_type(&fsck_options, "extraheaderentry", "warn");
+ fsck_set_msg_type_from_ids(&fsck_options, FSCK_MSG_EXTRA_HEADER_ENTRY,
+ FSCK_WARN);
/* config might set fsck.extraHeaderEntry=* again */
- git_config(mktag_config, NULL);
+ git_config(git_fsck_config, &fsck_options);
if (fsck_tag_standalone(NULL, buf.buf, buf.len, &fsck_options,
&tagged_oid, &tagged_type))
die(_("tag on stdin did not pass our strict fsck check"));
diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c
index 5bf88cd..5d3ea44 100644
--- a/builtin/multi-pack-index.c
+++ b/builtin/multi-pack-index.c
@@ -4,67 +4,181 @@
#include "parse-options.h"
#include "midx.h"
#include "trace2.h"
+#include "object-store.h"
+#define BUILTIN_MIDX_WRITE_USAGE \
+ N_("git multi-pack-index [<options>] write [--preferred-pack=<pack>]")
+
+#define BUILTIN_MIDX_VERIFY_USAGE \
+ N_("git multi-pack-index [<options>] verify")
+
+#define BUILTIN_MIDX_EXPIRE_USAGE \
+ N_("git multi-pack-index [<options>] expire")
+
+#define BUILTIN_MIDX_REPACK_USAGE \
+ N_("git multi-pack-index [<options>] repack [--batch-size=<size>]")
+
+static char const * const builtin_multi_pack_index_write_usage[] = {
+ BUILTIN_MIDX_WRITE_USAGE,
+ NULL
+};
+static char const * const builtin_multi_pack_index_verify_usage[] = {
+ BUILTIN_MIDX_VERIFY_USAGE,
+ NULL
+};
+static char const * const builtin_multi_pack_index_expire_usage[] = {
+ BUILTIN_MIDX_EXPIRE_USAGE,
+ NULL
+};
+static char const * const builtin_multi_pack_index_repack_usage[] = {
+ BUILTIN_MIDX_REPACK_USAGE,
+ NULL
+};
static char const * const builtin_multi_pack_index_usage[] = {
- N_("git multi-pack-index [<options>] (write|verify|expire|repack --batch-size=<size>)"),
+ BUILTIN_MIDX_WRITE_USAGE,
+ BUILTIN_MIDX_VERIFY_USAGE,
+ BUILTIN_MIDX_EXPIRE_USAGE,
+ BUILTIN_MIDX_REPACK_USAGE,
NULL
};
static struct opts_multi_pack_index {
const char *object_dir;
+ const char *preferred_pack;
unsigned long batch_size;
- int progress;
+ unsigned flags;
} opts;
-int cmd_multi_pack_index(int argc, const char **argv,
- const char *prefix)
+static struct option common_opts[] = {
+ OPT_FILENAME(0, "object-dir", &opts.object_dir,
+ N_("object directory containing set of packfile and pack-index pairs")),
+ OPT_BIT(0, "progress", &opts.flags, N_("force progress reporting"), MIDX_PROGRESS),
+ OPT_END(),
+};
+
+static struct option *add_common_options(struct option *prev)
{
- unsigned flags = 0;
+ return parse_options_concat(common_opts, prev);
+}
+
+static int cmd_multi_pack_index_write(int argc, const char **argv)
+{
+ struct option *options;
+ static struct option builtin_multi_pack_index_write_options[] = {
+ OPT_STRING(0, "preferred-pack", &opts.preferred_pack,
+ N_("preferred-pack"),
+ N_("pack for reuse when computing a multi-pack bitmap")),
+ OPT_END(),
+ };
+
+ options = add_common_options(builtin_multi_pack_index_write_options);
+
+ trace2_cmd_mode(argv[0]);
- static struct option builtin_multi_pack_index_options[] = {
- OPT_FILENAME(0, "object-dir", &opts.object_dir,
- N_("object directory containing set of packfile and pack-index pairs")),
- OPT_BOOL(0, "progress", &opts.progress, N_("force progress reporting")),
+ argc = parse_options(argc, argv, NULL,
+ options, builtin_multi_pack_index_write_usage,
+ PARSE_OPT_KEEP_UNKNOWN);
+ if (argc)
+ usage_with_options(builtin_multi_pack_index_write_usage,
+ options);
+
+ FREE_AND_NULL(options);
+
+ return write_midx_file(opts.object_dir, opts.preferred_pack,
+ opts.flags);
+}
+
+static int cmd_multi_pack_index_verify(int argc, const char **argv)
+{
+ struct option *options = common_opts;
+
+ trace2_cmd_mode(argv[0]);
+
+ argc = parse_options(argc, argv, NULL,
+ options, builtin_multi_pack_index_verify_usage,
+ PARSE_OPT_KEEP_UNKNOWN);
+ if (argc)
+ usage_with_options(builtin_multi_pack_index_verify_usage,
+ options);
+
+ return verify_midx_file(the_repository, opts.object_dir, opts.flags);
+}
+
+static int cmd_multi_pack_index_expire(int argc, const char **argv)
+{
+ struct option *options = common_opts;
+
+ trace2_cmd_mode(argv[0]);
+
+ argc = parse_options(argc, argv, NULL,
+ options, builtin_multi_pack_index_expire_usage,
+ PARSE_OPT_KEEP_UNKNOWN);
+ if (argc)
+ usage_with_options(builtin_multi_pack_index_expire_usage,
+ options);
+
+ return expire_midx_packs(the_repository, opts.object_dir, opts.flags);
+}
+
+static int cmd_multi_pack_index_repack(int argc, const char **argv)
+{
+ struct option *options;
+ static struct option builtin_multi_pack_index_repack_options[] = {
OPT_MAGNITUDE(0, "batch-size", &opts.batch_size,
N_("during repack, collect pack-files of smaller size into a batch that is larger than this size")),
OPT_END(),
};
+ options = add_common_options(builtin_multi_pack_index_repack_options);
+
+ trace2_cmd_mode(argv[0]);
+
+ argc = parse_options(argc, argv, NULL,
+ options,
+ builtin_multi_pack_index_repack_usage,
+ PARSE_OPT_KEEP_UNKNOWN);
+ if (argc)
+ usage_with_options(builtin_multi_pack_index_repack_usage,
+ options);
+
+ FREE_AND_NULL(options);
+
+ return midx_repack(the_repository, opts.object_dir,
+ (size_t)opts.batch_size, opts.flags);
+}
+
+int cmd_multi_pack_index(int argc, const char **argv,
+ const char *prefix)
+{
+ struct option *builtin_multi_pack_index_options = common_opts;
+
git_config(git_default_config, NULL);
- opts.progress = isatty(2);
+ if (isatty(2))
+ opts.flags |= MIDX_PROGRESS;
argc = parse_options(argc, argv, prefix,
builtin_multi_pack_index_options,
- builtin_multi_pack_index_usage, 0);
+ builtin_multi_pack_index_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
if (!opts.object_dir)
opts.object_dir = get_object_directory();
- if (opts.progress)
- flags |= MIDX_PROGRESS;
if (argc == 0)
+ goto usage;
+
+ if (!strcmp(argv[0], "repack"))
+ return cmd_multi_pack_index_repack(argc, argv);
+ else if (!strcmp(argv[0], "write"))
+ return cmd_multi_pack_index_write(argc, argv);
+ else if (!strcmp(argv[0], "verify"))
+ return cmd_multi_pack_index_verify(argc, argv);
+ else if (!strcmp(argv[0], "expire"))
+ return cmd_multi_pack_index_expire(argc, argv);
+ else {
+usage:
+ error(_("unrecognized subcommand: %s"), argv[0]);
usage_with_options(builtin_multi_pack_index_usage,
builtin_multi_pack_index_options);
-
- if (argc > 1) {
- die(_("too many arguments"));
- return 1;
}
-
- trace2_cmd_mode(argv[0]);
-
- if (!strcmp(argv[0], "repack"))
- return midx_repack(the_repository, opts.object_dir,
- (size_t)opts.batch_size, flags);
- if (opts.batch_size)
- die(_("--batch-size option is only for 'repack' subcommand"));
-
- if (!strcmp(argv[0], "write"))
- return write_midx_file(opts.object_dir, flags);
- if (!strcmp(argv[0], "verify"))
- return verify_midx_file(the_repository, opts.object_dir, flags);
- if (!strcmp(argv[0], "expire"))
- return expire_midx_packs(the_repository, opts.object_dir, flags);
-
- die(_("unrecognized subcommand: %s"), argv[0]);
}
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 525c2d8..a1e33d7 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -3547,6 +3547,37 @@ static void record_recent_commit(struct commit *commit, void *data)
oid_array_append(&recent_objects, &commit->object.oid);
}
+static int mark_bitmap_preferred_tip(const char *refname,
+ const struct object_id *oid, int flags,
+ void *_data)
+{
+ struct object_id peeled;
+ struct object *object;
+
+ if (!peel_iterated_oid(oid, &peeled))
+ oid = &peeled;
+
+ object = parse_object_or_die(oid, refname);
+ if (object->type == OBJ_COMMIT)
+ object->flags |= NEEDS_BITMAP;
+
+ return 0;
+}
+
+static void mark_bitmap_preferred_tips(void)
+{
+ struct string_list_item *item;
+ const struct string_list *preferred_tips;
+
+ preferred_tips = bitmap_preferred_tips(the_repository);
+ if (!preferred_tips)
+ return;
+
+ for_each_string_list_item(item, preferred_tips) {
+ for_each_ref_in(item->string, mark_bitmap_preferred_tip, NULL);
+ }
+}
+
static void get_object_list(int ac, const char **av)
{
struct rev_info revs;
@@ -3601,6 +3632,9 @@ static void get_object_list(int ac, const char **av)
if (use_delta_islands)
load_delta_islands(the_repository, progress);
+ if (write_bitmap_index)
+ mark_bitmap_preferred_tips();
+
if (prepare_revision_walk(&revs))
die(_("revision walk setup failed"));
mark_edges_uninteresting(&revs, show_edge, sparse);
diff --git a/builtin/range-diff.c b/builtin/range-diff.c
index 78bc9fa..5031884 100644
--- a/builtin/range-diff.c
+++ b/builtin/range-diff.c
@@ -25,7 +25,7 @@ int cmd_range_diff(int argc, const char **argv, const char *prefix)
struct option range_diff_options[] = {
OPT_INTEGER(0, "creation-factor",
&range_diff_opts.creation_factor,
- N_("Percentage by which creation is weighted")),
+ N_("percentage by which creation is weighted")),
OPT_BOOL(0, "no-dual-color", &simple_color,
N_("use simple diff colors")),
OPT_PASSTHRU_ARGV(0, "notes", &other_arg,
diff --git a/builtin/remote.c b/builtin/remote.c
index 717b662..7f88e6c 100644
--- a/builtin/remote.c
+++ b/builtin/remote.c
@@ -938,9 +938,6 @@ static int get_remote_ref_states(const char *name,
struct ref_states *states,
int query)
{
- struct transport *transport;
- const struct ref *remote_refs;
-
states->remote = remote_get(name);
if (!states->remote)
return error(_("No such remote: '%s'"), name);
@@ -948,10 +945,12 @@ static int get_remote_ref_states(const char *name,
read_branches();
if (query) {
+ struct transport *transport;
+ const struct ref *remote_refs;
+
transport = transport_get(states->remote, states->remote->url_nr > 0 ?
states->remote->url[0] : NULL);
remote_refs = transport_get_remote_refs(transport, NULL);
- transport_disconnect(transport);
states->queried = 1;
if (query & GET_REF_STATES)
@@ -960,6 +959,7 @@ static int get_remote_ref_states(const char *name,
get_head_names(remote_refs, states);
if (query & GET_PUSH_REF_STATES)
get_push_ref_states(remote_refs, states);
+ transport_disconnect(transport);
} else {
for_each_ref(append_ref_to_tracked_list, states);
string_list_sort(&states->tracked);
diff --git a/builtin/repack.c b/builtin/repack.c
index 6ce2556..2847fdf 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -721,7 +721,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
remove_temporary_files();
if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0))
- write_midx_file(get_object_directory(), 0);
+ write_midx_file(get_object_directory(), NULL, 0);
string_list_clear(&names, 0);
string_list_clear(&rollback, 0);
diff --git a/builtin/reset.c b/builtin/reset.c
index c635b06..43e855c 100644
--- a/builtin/reset.c
+++ b/builtin/reset.c
@@ -425,7 +425,7 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
dwim_ref(rev, strlen(rev), &dummy, &ref, 0);
if (ref && !starts_with(ref, "refs/"))
- ref = NULL;
+ FREE_AND_NULL(ref);
err = reset_index(ref, &oid, reset_type, quiet);
if (reset_type == KEEP && !err)
diff --git a/builtin/revert.c b/builtin/revert.c
index 314a86c..237f2f1 100644
--- a/builtin/revert.c
+++ b/builtin/revert.c
@@ -182,7 +182,7 @@ static int run_sequencer(int argc, const char **argv, struct replay_opts *opts)
"--signoff", opts->signoff,
"--no-commit", opts->no_commit,
"-x", opts->record_origin,
- "--edit", opts->edit,
+ "--edit", opts->edit > 0,
NULL);
if (cmd) {
@@ -230,8 +230,6 @@ int cmd_revert(int argc, const char **argv, const char *prefix)
struct replay_opts opts = REPLAY_OPTS_INIT;
int res;
- if (isatty(0))
- opts.edit = 1;
opts.action = REPLAY_REVERT;
sequencer_init_config(&opts);
res = run_sequencer(argc, argv, &opts);
diff --git a/builtin/stash.c b/builtin/stash.c
index 3477e94..c56fed3 100644
--- a/builtin/stash.c
+++ b/builtin/stash.c
@@ -10,11 +10,13 @@
#include "strvec.h"
#include "run-command.h"
#include "dir.h"
+#include "entry.h"
#include "rerere.h"
#include "revision.h"
#include "log-tree.h"
#include "diffcore.h"
#include "exec-cmd.h"
+#include "entry.h"
#define INCLUDE_ALL_FILES 2
diff --git a/builtin/symbolic-ref.c b/builtin/symbolic-ref.c
index 80237f0..e547a08 100644
--- a/builtin/symbolic-ref.c
+++ b/builtin/symbolic-ref.c
@@ -24,9 +24,11 @@ static int check_symref(const char *HEAD, int quiet, int shorten, int print)
return 1;
}
if (print) {
+ char *to_free = NULL;
if (shorten)
- refname = shorten_unambiguous_ref(refname, 0);
+ refname = to_free = shorten_unambiguous_ref(refname, 0);
puts(refname);
+ free(to_free);
}
return 0;
}
diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c
index a4ba2eb..4a70b17 100644
--- a/builtin/unpack-objects.c
+++ b/builtin/unpack-objects.c
@@ -187,7 +187,8 @@ static void write_cached_object(struct object *obj, struct obj_buffer *obj_buf)
* that have reachability requirements and calls this function.
* Verify its reachability and validity recursively and write it out.
*/
-static int check_object(struct object *obj, int type, void *data, struct fsck_options *options)
+static int check_object(struct object *obj, enum object_type type,
+ void *data, struct fsck_options *options)
{
struct obj_buffer *obj_buf;
diff --git a/builtin/worktree.c b/builtin/worktree.c
index 1cd5c20..8771453 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -446,16 +446,18 @@ static void print_preparing_worktree_line(int detach,
static const char *dwim_branch(const char *path, const char **new_branch)
{
int n;
+ int branch_exists;
const char *s = worktree_basename(path, &n);
const char *branchname = xstrndup(s, n);
struct strbuf ref = STRBUF_INIT;
UNLEAK(branchname);
- if (!strbuf_check_branch_ref(&ref, branchname) &&
- ref_exists(ref.buf)) {
- strbuf_release(&ref);
+
+ branch_exists = !strbuf_check_branch_ref(&ref, branchname) &&
+ ref_exists(ref.buf);
+ strbuf_release(&ref);
+ if (branch_exists)
return branchname;
- }
*new_branch = branchname;
if (guess_remote) {
diff --git a/cache.h b/cache.h
index 57f2285..148d9ab 100644
--- a/cache.h
+++ b/cache.h
@@ -1621,30 +1621,6 @@ const char *show_ident_date(const struct ident_split *id,
*/
int ident_cmp(const struct ident_split *, const struct ident_split *);
-struct checkout {
- struct index_state *istate;
- const char *base_dir;
- int base_dir_len;
- struct delayed_checkout *delayed_checkout;
- struct checkout_metadata meta;
- unsigned force:1,
- quiet:1,
- not_new:1,
- clone:1,
- refresh_cache:1;
-};
-#define CHECKOUT_INIT { NULL, "" }
-
-#define TEMPORARY_FILENAME_LENGTH 25
-int checkout_entry(struct cache_entry *ce, const struct checkout *state, char *topath, int *nr_checkouts);
-void enable_delayed_checkout(struct checkout *state);
-int finish_delayed_checkout(struct checkout *state, int *nr_checkouts);
-/*
- * Unlink the last component and schedule the leading directories for
- * removal, such that empty directories get removed.
- */
-void unlink_entry(const struct cache_entry *ce);
-
struct cache_def {
struct strbuf path;
int flags;
diff --git a/compat/precompose_utf8.c b/compat/precompose_utf8.c
index ec56056..cce1d57 100644
--- a/compat/precompose_utf8.c
+++ b/compat/precompose_utf8.c
@@ -60,10 +60,12 @@ void probe_utf8_pathname_composition(void)
strbuf_release(&path);
}
-static inline const char *precompose_string_if_needed(const char *in)
+const char *precompose_string_if_needed(const char *in)
{
size_t inlen;
size_t outlen;
+ if (!in)
+ return NULL;
if (has_non_ascii(in, (size_t)-1, &inlen)) {
iconv_t ic_prec;
char *out;
@@ -96,10 +98,7 @@ const char *precompose_argv_prefix(int argc, const char **argv, const char *pref
argv[i] = precompose_string_if_needed(argv[i]);
i++;
}
- if (prefix) {
- prefix = precompose_string_if_needed(prefix);
- }
- return prefix;
+ return precompose_string_if_needed(prefix);
}
diff --git a/compat/precompose_utf8.h b/compat/precompose_utf8.h
index d70b846..fea06cf 100644
--- a/compat/precompose_utf8.h
+++ b/compat/precompose_utf8.h
@@ -29,6 +29,7 @@ typedef struct {
} PREC_DIR;
const char *precompose_argv_prefix(int argc, const char **argv, const char *prefix);
+const char *precompose_string_if_needed(const char *in);
void probe_utf8_pathname_composition(void);
PREC_DIR *precompose_utf8_opendir(const char *dirname);
diff --git a/compat/simple-ipc/ipc-shared.c b/compat/simple-ipc/ipc-shared.c
new file mode 100644
index 0000000..1edec81
--- /dev/null
+++ b/compat/simple-ipc/ipc-shared.c
@@ -0,0 +1,28 @@
+#include "cache.h"
+#include "simple-ipc.h"
+#include "strbuf.h"
+#include "pkt-line.h"
+#include "thread-utils.h"
+
+#ifdef SUPPORTS_SIMPLE_IPC
+
+int ipc_server_run(const char *path, const struct ipc_server_opts *opts,
+ ipc_server_application_cb *application_cb,
+ void *application_data)
+{
+ struct ipc_server_data *server_data = NULL;
+ int ret;
+
+ ret = ipc_server_run_async(&server_data, path, opts,
+ application_cb, application_data);
+ if (ret)
+ return ret;
+
+ ret = ipc_server_await(server_data);
+
+ ipc_server_free(server_data);
+
+ return ret;
+}
+
+#endif /* SUPPORTS_SIMPLE_IPC */
diff --git a/compat/simple-ipc/ipc-unix-socket.c b/compat/simple-ipc/ipc-unix-socket.c
new file mode 100644
index 0000000..38689b2
--- /dev/null
+++ b/compat/simple-ipc/ipc-unix-socket.c
@@ -0,0 +1,999 @@
+#include "cache.h"
+#include "simple-ipc.h"
+#include "strbuf.h"
+#include "pkt-line.h"
+#include "thread-utils.h"
+#include "unix-socket.h"
+#include "unix-stream-server.h"
+
+#ifdef NO_UNIX_SOCKETS
+#error compat/simple-ipc/ipc-unix-socket.c requires Unix sockets
+#endif
+
+enum ipc_active_state ipc_get_active_state(const char *path)
+{
+ enum ipc_active_state state = IPC_STATE__OTHER_ERROR;
+ struct ipc_client_connect_options options
+ = IPC_CLIENT_CONNECT_OPTIONS_INIT;
+ struct stat st;
+ struct ipc_client_connection *connection_test = NULL;
+
+ options.wait_if_busy = 0;
+ options.wait_if_not_found = 0;
+
+ if (lstat(path, &st) == -1) {
+ switch (errno) {
+ case ENOENT:
+ case ENOTDIR:
+ return IPC_STATE__NOT_LISTENING;
+ default:
+ return IPC_STATE__INVALID_PATH;
+ }
+ }
+
+ /* also complain if a plain file is in the way */
+ if ((st.st_mode & S_IFMT) != S_IFSOCK)
+ return IPC_STATE__INVALID_PATH;
+
+ /*
+ * Just because the filesystem has a S_IFSOCK type inode
+ * at `path`, doesn't mean it that there is a server listening.
+ * Ping it to be sure.
+ */
+ state = ipc_client_try_connect(path, &options, &connection_test);
+ ipc_client_close_connection(connection_test);
+
+ return state;
+}
+
+/*
+ * Retry frequency when trying to connect to a server.
+ *
+ * This value should be short enough that we don't seriously delay our
+ * caller, but not fast enough that our spinning puts pressure on the
+ * system.
+ */
+#define WAIT_STEP_MS (50)
+
+/*
+ * Try to connect to the server. If the server is just starting up or
+ * is very busy, we may not get a connection the first time.
+ */
+static enum ipc_active_state connect_to_server(
+ const char *path,
+ int timeout_ms,
+ const struct ipc_client_connect_options *options,
+ int *pfd)
+{
+ int k;
+
+ *pfd = -1;
+
+ for (k = 0; k < timeout_ms; k += WAIT_STEP_MS) {
+ int fd = unix_stream_connect(path, options->uds_disallow_chdir);
+
+ if (fd != -1) {
+ *pfd = fd;
+ return IPC_STATE__LISTENING;
+ }
+
+ if (errno == ENOENT) {
+ if (!options->wait_if_not_found)
+ return IPC_STATE__PATH_NOT_FOUND;
+
+ goto sleep_and_try_again;
+ }
+
+ if (errno == ETIMEDOUT) {
+ if (!options->wait_if_busy)
+ return IPC_STATE__NOT_LISTENING;
+
+ goto sleep_and_try_again;
+ }
+
+ if (errno == ECONNREFUSED) {
+ if (!options->wait_if_busy)
+ return IPC_STATE__NOT_LISTENING;
+
+ goto sleep_and_try_again;
+ }
+
+ return IPC_STATE__OTHER_ERROR;
+
+ sleep_and_try_again:
+ sleep_millisec(WAIT_STEP_MS);
+ }
+
+ return IPC_STATE__NOT_LISTENING;
+}
+
+/*
+ * The total amount of time that we are willing to wait when trying to
+ * connect to a server.
+ *
+ * When the server is first started, it might take a little while for
+ * it to become ready to service requests. Likewise, the server may
+ * be very (temporarily) busy and not respond to our connections.
+ *
+ * We should gracefully and silently handle those conditions and try
+ * again for a reasonable time period.
+ *
+ * The value chosen here should be long enough for the server
+ * to reliably heal from the above conditions.
+ */
+#define MY_CONNECTION_TIMEOUT_MS (1000)
+
+enum ipc_active_state ipc_client_try_connect(
+ const char *path,
+ const struct ipc_client_connect_options *options,
+ struct ipc_client_connection **p_connection)
+{
+ enum ipc_active_state state = IPC_STATE__OTHER_ERROR;
+ int fd = -1;
+
+ *p_connection = NULL;
+
+ trace2_region_enter("ipc-client", "try-connect", NULL);
+ trace2_data_string("ipc-client", NULL, "try-connect/path", path);
+
+ state = connect_to_server(path, MY_CONNECTION_TIMEOUT_MS,
+ options, &fd);
+
+ trace2_data_intmax("ipc-client", NULL, "try-connect/state",
+ (intmax_t)state);
+ trace2_region_leave("ipc-client", "try-connect", NULL);
+
+ if (state == IPC_STATE__LISTENING) {
+ (*p_connection) = xcalloc(1, sizeof(struct ipc_client_connection));
+ (*p_connection)->fd = fd;
+ }
+
+ return state;
+}
+
+void ipc_client_close_connection(struct ipc_client_connection *connection)
+{
+ if (!connection)
+ return;
+
+ if (connection->fd != -1)
+ close(connection->fd);
+
+ free(connection);
+}
+
+int ipc_client_send_command_to_connection(
+ struct ipc_client_connection *connection,
+ const char *message, struct strbuf *answer)
+{
+ int ret = 0;
+
+ strbuf_setlen(answer, 0);
+
+ trace2_region_enter("ipc-client", "send-command", NULL);
+
+ if (write_packetized_from_buf_no_flush(message, strlen(message),
+ connection->fd) < 0 ||
+ packet_flush_gently(connection->fd) < 0) {
+ ret = error(_("could not send IPC command"));
+ goto done;
+ }
+
+ if (read_packetized_to_strbuf(
+ connection->fd, answer,
+ PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR) < 0) {
+ ret = error(_("could not read IPC response"));
+ goto done;
+ }
+
+done:
+ trace2_region_leave("ipc-client", "send-command", NULL);
+ return ret;
+}
+
+int ipc_client_send_command(const char *path,
+ const struct ipc_client_connect_options *options,
+ const char *message, struct strbuf *answer)
+{
+ int ret = -1;
+ enum ipc_active_state state;
+ struct ipc_client_connection *connection = NULL;
+
+ state = ipc_client_try_connect(path, options, &connection);
+
+ if (state != IPC_STATE__LISTENING)
+ return ret;
+
+ ret = ipc_client_send_command_to_connection(connection, message, answer);
+
+ ipc_client_close_connection(connection);
+
+ return ret;
+}
+
+static int set_socket_blocking_flag(int fd, int make_nonblocking)
+{
+ int flags;
+
+ flags = fcntl(fd, F_GETFL, NULL);
+
+ if (flags < 0)
+ return -1;
+
+ if (make_nonblocking)
+ flags |= O_NONBLOCK;
+ else
+ flags &= ~O_NONBLOCK;
+
+ return fcntl(fd, F_SETFL, flags);
+}
+
+/*
+ * Magic numbers used to annotate callback instance data.
+ * These are used to help guard against accidentally passing the
+ * wrong instance data across multiple levels of callbacks (which
+ * is easy to do if there are `void*` arguments).
+ */
+enum magic {
+ MAGIC_SERVER_REPLY_DATA,
+ MAGIC_WORKER_THREAD_DATA,
+ MAGIC_ACCEPT_THREAD_DATA,
+ MAGIC_SERVER_DATA,
+};
+
+struct ipc_server_reply_data {
+ enum magic magic;
+ int fd;
+ struct ipc_worker_thread_data *worker_thread_data;
+};
+
+struct ipc_worker_thread_data {
+ enum magic magic;
+ struct ipc_worker_thread_data *next_thread;
+ struct ipc_server_data *server_data;
+ pthread_t pthread_id;
+};
+
+struct ipc_accept_thread_data {
+ enum magic magic;
+ struct ipc_server_data *server_data;
+
+ struct unix_ss_socket *server_socket;
+
+ int fd_send_shutdown;
+ int fd_wait_shutdown;
+ pthread_t pthread_id;
+};
+
+/*
+ * With unix-sockets, the conceptual "ipc-server" is implemented as a single
+ * controller "accept-thread" thread and a pool of "worker-thread" threads.
+ * The former does the usual `accept()` loop and dispatches connections
+ * to an idle worker thread. The worker threads wait in an idle loop for
+ * a new connection, communicate with the client and relay data to/from
+ * the `application_cb` and then wait for another connection from the
+ * server thread. This avoids the overhead of constantly creating and
+ * destroying threads.
+ */
+struct ipc_server_data {
+ enum magic magic;
+ ipc_server_application_cb *application_cb;
+ void *application_data;
+ struct strbuf buf_path;
+
+ struct ipc_accept_thread_data *accept_thread;
+ struct ipc_worker_thread_data *worker_thread_list;
+
+ pthread_mutex_t work_available_mutex;
+ pthread_cond_t work_available_cond;
+
+ /*
+ * Accepted but not yet processed client connections are kept
+ * in a circular buffer FIFO. The queue is empty when the
+ * positions are equal.
+ */
+ int *fifo_fds;
+ int queue_size;
+ int back_pos;
+ int front_pos;
+
+ int shutdown_requested;
+ int is_stopped;
+};
+
+/*
+ * Remove and return the oldest queued connection.
+ *
+ * Returns -1 if empty.
+ */
+static int fifo_dequeue(struct ipc_server_data *server_data)
+{
+ /* ASSERT holding mutex */
+
+ int fd;
+
+ if (server_data->back_pos == server_data->front_pos)
+ return -1;
+
+ fd = server_data->fifo_fds[server_data->front_pos];
+ server_data->fifo_fds[server_data->front_pos] = -1;
+
+ server_data->front_pos++;
+ if (server_data->front_pos == server_data->queue_size)
+ server_data->front_pos = 0;
+
+ return fd;
+}
+
+/*
+ * Push a new fd onto the back of the queue.
+ *
+ * Drop it and return -1 if queue is already full.
+ */
+static int fifo_enqueue(struct ipc_server_data *server_data, int fd)
+{
+ /* ASSERT holding mutex */
+
+ int next_back_pos;
+
+ next_back_pos = server_data->back_pos + 1;
+ if (next_back_pos == server_data->queue_size)
+ next_back_pos = 0;
+
+ if (next_back_pos == server_data->front_pos) {
+ /* Queue is full. Just drop it. */
+ close(fd);
+ return -1;
+ }
+
+ server_data->fifo_fds[server_data->back_pos] = fd;
+ server_data->back_pos = next_back_pos;
+
+ return fd;
+}
+
+/*
+ * Wait for a connection to be queued to the FIFO and return it.
+ *
+ * Returns -1 if someone has already requested a shutdown.
+ */
+static int worker_thread__wait_for_connection(
+ struct ipc_worker_thread_data *worker_thread_data)
+{
+ /* ASSERT NOT holding mutex */
+
+ struct ipc_server_data *server_data = worker_thread_data->server_data;
+ int fd = -1;
+
+ pthread_mutex_lock(&server_data->work_available_mutex);
+ for (;;) {
+ if (server_data->shutdown_requested)
+ break;
+
+ fd = fifo_dequeue(server_data);
+ if (fd >= 0)
+ break;
+
+ pthread_cond_wait(&server_data->work_available_cond,
+ &server_data->work_available_mutex);
+ }
+ pthread_mutex_unlock(&server_data->work_available_mutex);
+
+ return fd;
+}
+
+/*
+ * Forward declare our reply callback function so that any compiler
+ * errors are reported when we actually define the function (in addition
+ * to any errors reported when we try to pass this callback function as
+ * a parameter in a function call). The former are easier to understand.
+ */
+static ipc_server_reply_cb do_io_reply_callback;
+
+/*
+ * Relay application's response message to the client process.
+ * (We do not flush at this point because we allow the caller
+ * to chunk data to the client thru us.)
+ */
+static int do_io_reply_callback(struct ipc_server_reply_data *reply_data,
+ const char *response, size_t response_len)
+{
+ if (reply_data->magic != MAGIC_SERVER_REPLY_DATA)
+ BUG("reply_cb called with wrong instance data");
+
+ return write_packetized_from_buf_no_flush(response, response_len,
+ reply_data->fd);
+}
+
+/* A randomly chosen value. */
+#define MY_WAIT_POLL_TIMEOUT_MS (10)
+
+/*
+ * If the client hangs up without sending any data on the wire, just
+ * quietly close the socket and ignore this client.
+ *
+ * This worker thread is committed to reading the IPC request data
+ * from the client at the other end of this fd. Wait here for the
+ * client to actually put something on the wire -- because if the
+ * client just does a ping (connect and hangup without sending any
+ * data), our use of the pkt-line read routines will spew an error
+ * message.
+ *
+ * Return -1 if the client hung up.
+ * Return 0 if data (possibly incomplete) is ready.
+ */
+static int worker_thread__wait_for_io_start(
+ struct ipc_worker_thread_data *worker_thread_data,
+ int fd)
+{
+ struct ipc_server_data *server_data = worker_thread_data->server_data;
+ struct pollfd pollfd[1];
+ int result;
+
+ for (;;) {
+ pollfd[0].fd = fd;
+ pollfd[0].events = POLLIN;
+
+ result = poll(pollfd, 1, MY_WAIT_POLL_TIMEOUT_MS);
+ if (result < 0) {
+ if (errno == EINTR)
+ continue;
+ goto cleanup;
+ }
+
+ if (result == 0) {
+ /* a timeout */
+
+ int in_shutdown;
+
+ pthread_mutex_lock(&server_data->work_available_mutex);
+ in_shutdown = server_data->shutdown_requested;
+ pthread_mutex_unlock(&server_data->work_available_mutex);
+
+ /*
+ * If a shutdown is already in progress and this
+ * client has not started talking yet, just drop it.
+ */
+ if (in_shutdown)
+ goto cleanup;
+ continue;
+ }
+
+ if (pollfd[0].revents & POLLHUP)
+ goto cleanup;
+
+ if (pollfd[0].revents & POLLIN)
+ return 0;
+
+ goto cleanup;
+ }
+
+cleanup:
+ close(fd);
+ return -1;
+}
+
+/*
+ * Receive the request/command from the client and pass it to the
+ * registered request-callback. The request-callback will compose
+ * a response and call our reply-callback to send it to the client.
+ */
+static int worker_thread__do_io(
+ struct ipc_worker_thread_data *worker_thread_data,
+ int fd)
+{
+ /* ASSERT NOT holding lock */
+
+ struct strbuf buf = STRBUF_INIT;
+ struct ipc_server_reply_data reply_data;
+ int ret = 0;
+
+ reply_data.magic = MAGIC_SERVER_REPLY_DATA;
+ reply_data.worker_thread_data = worker_thread_data;
+
+ reply_data.fd = fd;
+
+ ret = read_packetized_to_strbuf(
+ reply_data.fd, &buf,
+ PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR);
+ if (ret >= 0) {
+ ret = worker_thread_data->server_data->application_cb(
+ worker_thread_data->server_data->application_data,
+ buf.buf, do_io_reply_callback, &reply_data);
+
+ packet_flush_gently(reply_data.fd);
+ }
+ else {
+ /*
+ * The client probably disconnected/shutdown before it
+ * could send a well-formed message. Ignore it.
+ */
+ }
+
+ strbuf_release(&buf);
+ close(reply_data.fd);
+
+ return ret;
+}
+
+/*
+ * Block SIGPIPE on the current thread (so that we get EPIPE from
+ * write() rather than an actual signal).
+ *
+ * Note that using sigchain_push() and _pop() to control SIGPIPE
+ * around our IO calls is not thread safe:
+ * [] It uses a global stack of handler frames.
+ * [] It uses ALLOC_GROW() to resize it.
+ * [] Finally, according to the `signal(2)` man-page:
+ * "The effects of `signal()` in a multithreaded process are unspecified."
+ */
+static void thread_block_sigpipe(sigset_t *old_set)
+{
+ sigset_t new_set;
+
+ sigemptyset(&new_set);
+ sigaddset(&new_set, SIGPIPE);
+
+ sigemptyset(old_set);
+ pthread_sigmask(SIG_BLOCK, &new_set, old_set);
+}
+
+/*
+ * Thread proc for an IPC worker thread. It handles a series of
+ * connections from clients. It pulls the next fd from the queue
+ * processes it, and then waits for the next client.
+ *
+ * Block SIGPIPE in this worker thread for the life of the thread.
+ * This avoids stray (and sometimes delayed) SIGPIPE signals caused
+ * by client errors and/or when we are under extremely heavy IO load.
+ *
+ * This means that the application callback will have SIGPIPE blocked.
+ * The callback should not change it.
+ */
+static void *worker_thread_proc(void *_worker_thread_data)
+{
+ struct ipc_worker_thread_data *worker_thread_data = _worker_thread_data;
+ struct ipc_server_data *server_data = worker_thread_data->server_data;
+ sigset_t old_set;
+ int fd, io;
+ int ret;
+
+ trace2_thread_start("ipc-worker");
+
+ thread_block_sigpipe(&old_set);
+
+ for (;;) {
+ fd = worker_thread__wait_for_connection(worker_thread_data);
+ if (fd == -1)
+ break; /* in shutdown */
+
+ io = worker_thread__wait_for_io_start(worker_thread_data, fd);
+ if (io == -1)
+ continue; /* client hung up without sending anything */
+
+ ret = worker_thread__do_io(worker_thread_data, fd);
+
+ if (ret == SIMPLE_IPC_QUIT) {
+ trace2_data_string("ipc-worker", NULL, "queue_stop_async",
+ "application_quit");
+ /*
+ * The application layer is telling the ipc-server
+ * layer to shutdown.
+ *
+ * We DO NOT have a response to send to the client.
+ *
+ * Queue an async stop (to stop the other threads) and
+ * allow this worker thread to exit now (no sense waiting
+ * for the thread-pool shutdown signal).
+ *
+ * Other non-idle worker threads are allowed to finish
+ * responding to their current clients.
+ */
+ ipc_server_stop_async(server_data);
+ break;
+ }
+ }
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+/* A randomly chosen value. */
+#define MY_ACCEPT_POLL_TIMEOUT_MS (60 * 1000)
+
+/*
+ * Accept a new client connection on our socket. This uses non-blocking
+ * IO so that we can also wait for shutdown requests on our socket-pair
+ * without actually spinning on a fast timeout.
+ */
+static int accept_thread__wait_for_connection(
+ struct ipc_accept_thread_data *accept_thread_data)
+{
+ struct pollfd pollfd[2];
+ int result;
+
+ for (;;) {
+ pollfd[0].fd = accept_thread_data->fd_wait_shutdown;
+ pollfd[0].events = POLLIN;
+
+ pollfd[1].fd = accept_thread_data->server_socket->fd_socket;
+ pollfd[1].events = POLLIN;
+
+ result = poll(pollfd, 2, MY_ACCEPT_POLL_TIMEOUT_MS);
+ if (result < 0) {
+ if (errno == EINTR)
+ continue;
+ return result;
+ }
+
+ if (result == 0) {
+ /* a timeout */
+
+ /*
+ * If someone deletes or force-creates a new unix
+ * domain socket at our path, all future clients
+ * will be routed elsewhere and we silently starve.
+ * If that happens, just queue a shutdown.
+ */
+ if (unix_ss_was_stolen(
+ accept_thread_data->server_socket)) {
+ trace2_data_string("ipc-accept", NULL,
+ "queue_stop_async",
+ "socket_stolen");
+ ipc_server_stop_async(
+ accept_thread_data->server_data);
+ }
+ continue;
+ }
+
+ if (pollfd[0].revents & POLLIN) {
+ /* shutdown message queued to socketpair */
+ return -1;
+ }
+
+ if (pollfd[1].revents & POLLIN) {
+ /* a connection is available on server_socket */
+
+ int client_fd =
+ accept(accept_thread_data->server_socket->fd_socket,
+ NULL, NULL);
+ if (client_fd >= 0)
+ return client_fd;
+
+ /*
+ * An error here is unlikely -- it probably
+ * indicates that the connecting process has
+ * already dropped the connection.
+ */
+ continue;
+ }
+
+ BUG("unandled poll result errno=%d r[0]=%d r[1]=%d",
+ errno, pollfd[0].revents, pollfd[1].revents);
+ }
+}
+
+/*
+ * Thread proc for the IPC server "accept thread". This waits for
+ * an incoming socket connection, appends it to the queue of available
+ * connections, and notifies a worker thread to process it.
+ *
+ * Block SIGPIPE in this thread for the life of the thread. This
+ * avoids any stray SIGPIPE signals when closing pipe fds under
+ * extremely heavy loads (such as when the fifo queue is full and we
+ * drop incomming connections).
+ */
+static void *accept_thread_proc(void *_accept_thread_data)
+{
+ struct ipc_accept_thread_data *accept_thread_data = _accept_thread_data;
+ struct ipc_server_data *server_data = accept_thread_data->server_data;
+ sigset_t old_set;
+
+ trace2_thread_start("ipc-accept");
+
+ thread_block_sigpipe(&old_set);
+
+ for (;;) {
+ int client_fd = accept_thread__wait_for_connection(
+ accept_thread_data);
+
+ pthread_mutex_lock(&server_data->work_available_mutex);
+ if (server_data->shutdown_requested) {
+ pthread_mutex_unlock(&server_data->work_available_mutex);
+ if (client_fd >= 0)
+ close(client_fd);
+ break;
+ }
+
+ if (client_fd < 0) {
+ /* ignore transient accept() errors */
+ }
+ else {
+ fifo_enqueue(server_data, client_fd);
+ pthread_cond_broadcast(&server_data->work_available_cond);
+ }
+ pthread_mutex_unlock(&server_data->work_available_mutex);
+ }
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+/*
+ * We can't predict the connection arrival rate relative to the worker
+ * processing rate, therefore we allow the "accept-thread" to queue up
+ * a generous number of connections, since we'd rather have the client
+ * not unnecessarily timeout if we can avoid it. (The assumption is
+ * that this will be used for FSMonitor and a few second wait on a
+ * connection is better than having the client timeout and do the full
+ * computation itself.)
+ *
+ * The FIFO queue size is set to a multiple of the worker pool size.
+ * This value chosen at random.
+ */
+#define FIFO_SCALE (100)
+
+/*
+ * The backlog value for `listen(2)`. This doesn't need to huge,
+ * rather just large enough for our "accept-thread" to wake up and
+ * queue incoming connections onto the FIFO without the kernel
+ * dropping any.
+ *
+ * This value chosen at random.
+ */
+#define LISTEN_BACKLOG (50)
+
+static int create_listener_socket(
+ const char *path,
+ const struct ipc_server_opts *ipc_opts,
+ struct unix_ss_socket **new_server_socket)
+{
+ struct unix_ss_socket *server_socket = NULL;
+ struct unix_stream_listen_opts uslg_opts = UNIX_STREAM_LISTEN_OPTS_INIT;
+ int ret;
+
+ uslg_opts.listen_backlog_size = LISTEN_BACKLOG;
+ uslg_opts.disallow_chdir = ipc_opts->uds_disallow_chdir;
+
+ ret = unix_ss_create(path, &uslg_opts, -1, &server_socket);
+ if (ret)
+ return ret;
+
+ if (set_socket_blocking_flag(server_socket->fd_socket, 1)) {
+ int saved_errno = errno;
+ unix_ss_free(server_socket);
+ errno = saved_errno;
+ return -1;
+ }
+
+ *new_server_socket = server_socket;
+
+ trace2_data_string("ipc-server", NULL, "listen-with-lock", path);
+ return 0;
+}
+
+static int setup_listener_socket(
+ const char *path,
+ const struct ipc_server_opts *ipc_opts,
+ struct unix_ss_socket **new_server_socket)
+{
+ int ret, saved_errno;
+
+ trace2_region_enter("ipc-server", "create-listener_socket", NULL);
+
+ ret = create_listener_socket(path, ipc_opts, new_server_socket);
+
+ saved_errno = errno;
+ trace2_region_leave("ipc-server", "create-listener_socket", NULL);
+ errno = saved_errno;
+
+ return ret;
+}
+
+/*
+ * Start IPC server in a pool of background threads.
+ */
+int ipc_server_run_async(struct ipc_server_data **returned_server_data,
+ const char *path, const struct ipc_server_opts *opts,
+ ipc_server_application_cb *application_cb,
+ void *application_data)
+{
+ struct unix_ss_socket *server_socket = NULL;
+ struct ipc_server_data *server_data;
+ int sv[2];
+ int k;
+ int ret;
+ int nr_threads = opts->nr_threads;
+
+ *returned_server_data = NULL;
+
+ /*
+ * Create a socketpair and set sv[1] to non-blocking. This
+ * will used to send a shutdown message to the accept-thread
+ * and allows the accept-thread to wait on EITHER a client
+ * connection or a shutdown request without spinning.
+ */
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, sv) < 0)
+ return -1;
+
+ if (set_socket_blocking_flag(sv[1], 1)) {
+ int saved_errno = errno;
+ close(sv[0]);
+ close(sv[1]);
+ errno = saved_errno;
+ return -1;
+ }
+
+ ret = setup_listener_socket(path, opts, &server_socket);
+ if (ret) {
+ int saved_errno = errno;
+ close(sv[0]);
+ close(sv[1]);
+ errno = saved_errno;
+ return ret;
+ }
+
+ server_data = xcalloc(1, sizeof(*server_data));
+ server_data->magic = MAGIC_SERVER_DATA;
+ server_data->application_cb = application_cb;
+ server_data->application_data = application_data;
+ strbuf_init(&server_data->buf_path, 0);
+ strbuf_addstr(&server_data->buf_path, path);
+
+ if (nr_threads < 1)
+ nr_threads = 1;
+
+ pthread_mutex_init(&server_data->work_available_mutex, NULL);
+ pthread_cond_init(&server_data->work_available_cond, NULL);
+
+ server_data->queue_size = nr_threads * FIFO_SCALE;
+ CALLOC_ARRAY(server_data->fifo_fds, server_data->queue_size);
+
+ server_data->accept_thread =
+ xcalloc(1, sizeof(*server_data->accept_thread));
+ server_data->accept_thread->magic = MAGIC_ACCEPT_THREAD_DATA;
+ server_data->accept_thread->server_data = server_data;
+ server_data->accept_thread->server_socket = server_socket;
+ server_data->accept_thread->fd_send_shutdown = sv[0];
+ server_data->accept_thread->fd_wait_shutdown = sv[1];
+
+ if (pthread_create(&server_data->accept_thread->pthread_id, NULL,
+ accept_thread_proc, server_data->accept_thread))
+ die_errno(_("could not start accept_thread '%s'"), path);
+
+ for (k = 0; k < nr_threads; k++) {
+ struct ipc_worker_thread_data *wtd;
+
+ wtd = xcalloc(1, sizeof(*wtd));
+ wtd->magic = MAGIC_WORKER_THREAD_DATA;
+ wtd->server_data = server_data;
+
+ if (pthread_create(&wtd->pthread_id, NULL, worker_thread_proc,
+ wtd)) {
+ if (k == 0)
+ die(_("could not start worker[0] for '%s'"),
+ path);
+ /*
+ * Limp along with the thread pool that we have.
+ */
+ break;
+ }
+
+ wtd->next_thread = server_data->worker_thread_list;
+ server_data->worker_thread_list = wtd;
+ }
+
+ *returned_server_data = server_data;
+ return 0;
+}
+
+/*
+ * Gently tell the IPC server treads to shutdown.
+ * Can be run on any thread.
+ */
+int ipc_server_stop_async(struct ipc_server_data *server_data)
+{
+ /* ASSERT NOT holding mutex */
+
+ int fd;
+
+ if (!server_data)
+ return 0;
+
+ trace2_region_enter("ipc-server", "server-stop-async", NULL);
+
+ pthread_mutex_lock(&server_data->work_available_mutex);
+
+ server_data->shutdown_requested = 1;
+
+ /*
+ * Write a byte to the shutdown socket pair to wake up the
+ * accept-thread.
+ */
+ if (write(server_data->accept_thread->fd_send_shutdown, "Q", 1) < 0)
+ error_errno("could not write to fd_send_shutdown");
+
+ /*
+ * Drain the queue of existing connections.
+ */
+ while ((fd = fifo_dequeue(server_data)) != -1)
+ close(fd);
+
+ /*
+ * Gently tell worker threads to stop processing new connections
+ * and exit. (This does not abort in-process conversations.)
+ */
+ pthread_cond_broadcast(&server_data->work_available_cond);
+
+ pthread_mutex_unlock(&server_data->work_available_mutex);
+
+ trace2_region_leave("ipc-server", "server-stop-async", NULL);
+
+ return 0;
+}
+
+/*
+ * Wait for all IPC server threads to stop.
+ */
+int ipc_server_await(struct ipc_server_data *server_data)
+{
+ pthread_join(server_data->accept_thread->pthread_id, NULL);
+
+ if (!server_data->shutdown_requested)
+ BUG("ipc-server: accept-thread stopped for '%s'",
+ server_data->buf_path.buf);
+
+ while (server_data->worker_thread_list) {
+ struct ipc_worker_thread_data *wtd =
+ server_data->worker_thread_list;
+
+ pthread_join(wtd->pthread_id, NULL);
+
+ server_data->worker_thread_list = wtd->next_thread;
+ free(wtd);
+ }
+
+ server_data->is_stopped = 1;
+
+ return 0;
+}
+
+void ipc_server_free(struct ipc_server_data *server_data)
+{
+ struct ipc_accept_thread_data * accept_thread_data;
+
+ if (!server_data)
+ return;
+
+ if (!server_data->is_stopped)
+ BUG("cannot free ipc-server while running for '%s'",
+ server_data->buf_path.buf);
+
+ accept_thread_data = server_data->accept_thread;
+ if (accept_thread_data) {
+ unix_ss_free(accept_thread_data->server_socket);
+
+ if (accept_thread_data->fd_send_shutdown != -1)
+ close(accept_thread_data->fd_send_shutdown);
+ if (accept_thread_data->fd_wait_shutdown != -1)
+ close(accept_thread_data->fd_wait_shutdown);
+
+ free(server_data->accept_thread);
+ }
+
+ while (server_data->worker_thread_list) {
+ struct ipc_worker_thread_data *wtd =
+ server_data->worker_thread_list;
+
+ server_data->worker_thread_list = wtd->next_thread;
+ free(wtd);
+ }
+
+ pthread_cond_destroy(&server_data->work_available_cond);
+ pthread_mutex_destroy(&server_data->work_available_mutex);
+
+ strbuf_release(&server_data->buf_path);
+
+ free(server_data->fifo_fds);
+ free(server_data);
+}
diff --git a/compat/simple-ipc/ipc-win32.c b/compat/simple-ipc/ipc-win32.c
new file mode 100644
index 0000000..8f89c02
--- /dev/null
+++ b/compat/simple-ipc/ipc-win32.c
@@ -0,0 +1,751 @@
+#include "cache.h"
+#include "simple-ipc.h"
+#include "strbuf.h"
+#include "pkt-line.h"
+#include "thread-utils.h"
+
+#ifndef GIT_WINDOWS_NATIVE
+#error This file can only be compiled on Windows
+#endif
+
+static int initialize_pipe_name(const char *path, wchar_t *wpath, size_t alloc)
+{
+ int off = 0;
+ struct strbuf realpath = STRBUF_INIT;
+
+ if (!strbuf_realpath(&realpath, path, 0))
+ return -1;
+
+ off = swprintf(wpath, alloc, L"\\\\.\\pipe\\");
+ if (xutftowcs(wpath + off, realpath.buf, alloc - off) < 0)
+ return -1;
+
+ /* Handle drive prefix */
+ if (wpath[off] && wpath[off + 1] == L':') {
+ wpath[off + 1] = L'_';
+ off += 2;
+ }
+
+ for (; wpath[off]; off++)
+ if (wpath[off] == L'/')
+ wpath[off] = L'\\';
+
+ strbuf_release(&realpath);
+ return 0;
+}
+
+static enum ipc_active_state get_active_state(wchar_t *pipe_path)
+{
+ if (WaitNamedPipeW(pipe_path, NMPWAIT_USE_DEFAULT_WAIT))
+ return IPC_STATE__LISTENING;
+
+ if (GetLastError() == ERROR_SEM_TIMEOUT)
+ return IPC_STATE__NOT_LISTENING;
+
+ if (GetLastError() == ERROR_FILE_NOT_FOUND)
+ return IPC_STATE__PATH_NOT_FOUND;
+
+ return IPC_STATE__OTHER_ERROR;
+}
+
+enum ipc_active_state ipc_get_active_state(const char *path)
+{
+ wchar_t pipe_path[MAX_PATH];
+
+ if (initialize_pipe_name(path, pipe_path, ARRAY_SIZE(pipe_path)) < 0)
+ return IPC_STATE__INVALID_PATH;
+
+ return get_active_state(pipe_path);
+}
+
+#define WAIT_STEP_MS (50)
+
+static enum ipc_active_state connect_to_server(
+ const wchar_t *wpath,
+ DWORD timeout_ms,
+ const struct ipc_client_connect_options *options,
+ int *pfd)
+{
+ DWORD t_start_ms, t_waited_ms;
+ DWORD step_ms;
+ HANDLE hPipe = INVALID_HANDLE_VALUE;
+ DWORD mode = PIPE_READMODE_BYTE;
+ DWORD gle;
+
+ *pfd = -1;
+
+ for (;;) {
+ hPipe = CreateFileW(wpath, GENERIC_READ | GENERIC_WRITE,
+ 0, NULL, OPEN_EXISTING, 0, NULL);
+ if (hPipe != INVALID_HANDLE_VALUE)
+ break;
+
+ gle = GetLastError();
+
+ switch (gle) {
+ case ERROR_FILE_NOT_FOUND:
+ if (!options->wait_if_not_found)
+ return IPC_STATE__PATH_NOT_FOUND;
+ if (!timeout_ms)
+ return IPC_STATE__PATH_NOT_FOUND;
+
+ step_ms = (timeout_ms < WAIT_STEP_MS) ?
+ timeout_ms : WAIT_STEP_MS;
+ sleep_millisec(step_ms);
+
+ timeout_ms -= step_ms;
+ break; /* try again */
+
+ case ERROR_PIPE_BUSY:
+ if (!options->wait_if_busy)
+ return IPC_STATE__NOT_LISTENING;
+ if (!timeout_ms)
+ return IPC_STATE__NOT_LISTENING;
+
+ t_start_ms = (DWORD)(getnanotime() / 1000000);
+
+ if (!WaitNamedPipeW(wpath, timeout_ms)) {
+ if (GetLastError() == ERROR_SEM_TIMEOUT)
+ return IPC_STATE__NOT_LISTENING;
+
+ return IPC_STATE__OTHER_ERROR;
+ }
+
+ /*
+ * A pipe server instance became available.
+ * Race other client processes to connect to
+ * it.
+ *
+ * But first decrement our overall timeout so
+ * that we don't starve if we keep losing the
+ * race. But also guard against special
+ * NPMWAIT_ values (0 and -1).
+ */
+ t_waited_ms = (DWORD)(getnanotime() / 1000000) - t_start_ms;
+ if (t_waited_ms < timeout_ms)
+ timeout_ms -= t_waited_ms;
+ else
+ timeout_ms = 1;
+ break; /* try again */
+
+ default:
+ return IPC_STATE__OTHER_ERROR;
+ }
+ }
+
+ if (!SetNamedPipeHandleState(hPipe, &mode, NULL, NULL)) {
+ CloseHandle(hPipe);
+ return IPC_STATE__OTHER_ERROR;
+ }
+
+ *pfd = _open_osfhandle((intptr_t)hPipe, O_RDWR|O_BINARY);
+ if (*pfd < 0) {
+ CloseHandle(hPipe);
+ return IPC_STATE__OTHER_ERROR;
+ }
+
+ /* fd now owns hPipe */
+
+ return IPC_STATE__LISTENING;
+}
+
+/*
+ * The default connection timeout for Windows clients.
+ *
+ * This is not currently part of the ipc_ API (nor the config settings)
+ * because of differences between Windows and other platforms.
+ *
+ * This value was chosen at random.
+ */
+#define WINDOWS_CONNECTION_TIMEOUT_MS (30000)
+
+enum ipc_active_state ipc_client_try_connect(
+ const char *path,
+ const struct ipc_client_connect_options *options,
+ struct ipc_client_connection **p_connection)
+{
+ wchar_t wpath[MAX_PATH];
+ enum ipc_active_state state = IPC_STATE__OTHER_ERROR;
+ int fd = -1;
+
+ *p_connection = NULL;
+
+ trace2_region_enter("ipc-client", "try-connect", NULL);
+ trace2_data_string("ipc-client", NULL, "try-connect/path", path);
+
+ if (initialize_pipe_name(path, wpath, ARRAY_SIZE(wpath)) < 0)
+ state = IPC_STATE__INVALID_PATH;
+ else
+ state = connect_to_server(wpath, WINDOWS_CONNECTION_TIMEOUT_MS,
+ options, &fd);
+
+ trace2_data_intmax("ipc-client", NULL, "try-connect/state",
+ (intmax_t)state);
+ trace2_region_leave("ipc-client", "try-connect", NULL);
+
+ if (state == IPC_STATE__LISTENING) {
+ (*p_connection) = xcalloc(1, sizeof(struct ipc_client_connection));
+ (*p_connection)->fd = fd;
+ }
+
+ return state;
+}
+
+void ipc_client_close_connection(struct ipc_client_connection *connection)
+{
+ if (!connection)
+ return;
+
+ if (connection->fd != -1)
+ close(connection->fd);
+
+ free(connection);
+}
+
+int ipc_client_send_command_to_connection(
+ struct ipc_client_connection *connection,
+ const char *message, struct strbuf *answer)
+{
+ int ret = 0;
+
+ strbuf_setlen(answer, 0);
+
+ trace2_region_enter("ipc-client", "send-command", NULL);
+
+ if (write_packetized_from_buf_no_flush(message, strlen(message),
+ connection->fd) < 0 ||
+ packet_flush_gently(connection->fd) < 0) {
+ ret = error(_("could not send IPC command"));
+ goto done;
+ }
+
+ FlushFileBuffers((HANDLE)_get_osfhandle(connection->fd));
+
+ if (read_packetized_to_strbuf(
+ connection->fd, answer,
+ PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR) < 0) {
+ ret = error(_("could not read IPC response"));
+ goto done;
+ }
+
+done:
+ trace2_region_leave("ipc-client", "send-command", NULL);
+ return ret;
+}
+
+int ipc_client_send_command(const char *path,
+ const struct ipc_client_connect_options *options,
+ const char *message, struct strbuf *response)
+{
+ int ret = -1;
+ enum ipc_active_state state;
+ struct ipc_client_connection *connection = NULL;
+
+ state = ipc_client_try_connect(path, options, &connection);
+
+ if (state != IPC_STATE__LISTENING)
+ return ret;
+
+ ret = ipc_client_send_command_to_connection(connection, message, response);
+
+ ipc_client_close_connection(connection);
+
+ return ret;
+}
+
+/*
+ * Duplicate the given pipe handle and wrap it in a file descriptor so
+ * that we can use pkt-line on it.
+ */
+static int dup_fd_from_pipe(const HANDLE pipe)
+{
+ HANDLE process = GetCurrentProcess();
+ HANDLE handle;
+ int fd;
+
+ if (!DuplicateHandle(process, pipe, process, &handle, 0, FALSE,
+ DUPLICATE_SAME_ACCESS)) {
+ errno = err_win_to_posix(GetLastError());
+ return -1;
+ }
+
+ fd = _open_osfhandle((intptr_t)handle, O_RDWR|O_BINARY);
+ if (fd < 0) {
+ errno = err_win_to_posix(GetLastError());
+ CloseHandle(handle);
+ return -1;
+ }
+
+ /*
+ * `handle` is now owned by `fd` and will be automatically closed
+ * when the descriptor is closed.
+ */
+
+ return fd;
+}
+
+/*
+ * Magic numbers used to annotate callback instance data.
+ * These are used to help guard against accidentally passing the
+ * wrong instance data across multiple levels of callbacks (which
+ * is easy to do if there are `void*` arguments).
+ */
+enum magic {
+ MAGIC_SERVER_REPLY_DATA,
+ MAGIC_SERVER_THREAD_DATA,
+ MAGIC_SERVER_DATA,
+};
+
+struct ipc_server_reply_data {
+ enum magic magic;
+ int fd;
+ struct ipc_server_thread_data *server_thread_data;
+};
+
+struct ipc_server_thread_data {
+ enum magic magic;
+ struct ipc_server_thread_data *next_thread;
+ struct ipc_server_data *server_data;
+ pthread_t pthread_id;
+ HANDLE hPipe;
+};
+
+/*
+ * On Windows, the conceptual "ipc-server" is implemented as a pool of
+ * n idential/peer "server-thread" threads. That is, there is no
+ * hierarchy of threads; and therefore no controller thread managing
+ * the pool. Each thread has an independent handle to the named pipe,
+ * receives incoming connections, processes the client, and re-uses
+ * the pipe for the next client connection.
+ *
+ * Therefore, the "ipc-server" only needs to maintain a list of the
+ * spawned threads for eventual "join" purposes.
+ *
+ * A single "stop-event" is visible to all of the server threads to
+ * tell them to shutdown (when idle).
+ */
+struct ipc_server_data {
+ enum magic magic;
+ ipc_server_application_cb *application_cb;
+ void *application_data;
+ struct strbuf buf_path;
+ wchar_t wpath[MAX_PATH];
+
+ HANDLE hEventStopRequested;
+ struct ipc_server_thread_data *thread_list;
+ int is_stopped;
+};
+
+enum connect_result {
+ CR_CONNECTED = 0,
+ CR_CONNECT_PENDING,
+ CR_CONNECT_ERROR,
+ CR_WAIT_ERROR,
+ CR_SHUTDOWN,
+};
+
+static enum connect_result queue_overlapped_connect(
+ struct ipc_server_thread_data *server_thread_data,
+ OVERLAPPED *lpo)
+{
+ if (ConnectNamedPipe(server_thread_data->hPipe, lpo))
+ goto failed;
+
+ switch (GetLastError()) {
+ case ERROR_IO_PENDING:
+ return CR_CONNECT_PENDING;
+
+ case ERROR_PIPE_CONNECTED:
+ SetEvent(lpo->hEvent);
+ return CR_CONNECTED;
+
+ default:
+ break;
+ }
+
+failed:
+ error(_("ConnectNamedPipe failed for '%s' (%lu)"),
+ server_thread_data->server_data->buf_path.buf,
+ GetLastError());
+ return CR_CONNECT_ERROR;
+}
+
+/*
+ * Use Windows Overlapped IO to wait for a connection or for our event
+ * to be signalled.
+ */
+static enum connect_result wait_for_connection(
+ struct ipc_server_thread_data *server_thread_data,
+ OVERLAPPED *lpo)
+{
+ enum connect_result r;
+ HANDLE waitHandles[2];
+ DWORD dwWaitResult;
+
+ r = queue_overlapped_connect(server_thread_data, lpo);
+ if (r != CR_CONNECT_PENDING)
+ return r;
+
+ waitHandles[0] = server_thread_data->server_data->hEventStopRequested;
+ waitHandles[1] = lpo->hEvent;
+
+ dwWaitResult = WaitForMultipleObjects(2, waitHandles, FALSE, INFINITE);
+ switch (dwWaitResult) {
+ case WAIT_OBJECT_0 + 0:
+ return CR_SHUTDOWN;
+
+ case WAIT_OBJECT_0 + 1:
+ ResetEvent(lpo->hEvent);
+ return CR_CONNECTED;
+
+ default:
+ return CR_WAIT_ERROR;
+ }
+}
+
+/*
+ * Forward declare our reply callback function so that any compiler
+ * errors are reported when we actually define the function (in addition
+ * to any errors reported when we try to pass this callback function as
+ * a parameter in a function call). The former are easier to understand.
+ */
+static ipc_server_reply_cb do_io_reply_callback;
+
+/*
+ * Relay application's response message to the client process.
+ * (We do not flush at this point because we allow the caller
+ * to chunk data to the client thru us.)
+ */
+static int do_io_reply_callback(struct ipc_server_reply_data *reply_data,
+ const char *response, size_t response_len)
+{
+ if (reply_data->magic != MAGIC_SERVER_REPLY_DATA)
+ BUG("reply_cb called with wrong instance data");
+
+ return write_packetized_from_buf_no_flush(response, response_len,
+ reply_data->fd);
+}
+
+/*
+ * Receive the request/command from the client and pass it to the
+ * registered request-callback. The request-callback will compose
+ * a response and call our reply-callback to send it to the client.
+ *
+ * Simple-IPC only contains one round trip, so we flush and close
+ * here after the response.
+ */
+static int do_io(struct ipc_server_thread_data *server_thread_data)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct ipc_server_reply_data reply_data;
+ int ret = 0;
+
+ reply_data.magic = MAGIC_SERVER_REPLY_DATA;
+ reply_data.server_thread_data = server_thread_data;
+
+ reply_data.fd = dup_fd_from_pipe(server_thread_data->hPipe);
+ if (reply_data.fd < 0)
+ return error(_("could not create fd from pipe for '%s'"),
+ server_thread_data->server_data->buf_path.buf);
+
+ ret = read_packetized_to_strbuf(
+ reply_data.fd, &buf,
+ PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR);
+ if (ret >= 0) {
+ ret = server_thread_data->server_data->application_cb(
+ server_thread_data->server_data->application_data,
+ buf.buf, do_io_reply_callback, &reply_data);
+
+ packet_flush_gently(reply_data.fd);
+
+ FlushFileBuffers((HANDLE)_get_osfhandle((reply_data.fd)));
+ }
+ else {
+ /*
+ * The client probably disconnected/shutdown before it
+ * could send a well-formed message. Ignore it.
+ */
+ }
+
+ strbuf_release(&buf);
+ close(reply_data.fd);
+
+ return ret;
+}
+
+/*
+ * Handle IPC request and response with this connected client. And reset
+ * the pipe to prepare for the next client.
+ */
+static int use_connection(struct ipc_server_thread_data *server_thread_data)
+{
+ int ret;
+
+ ret = do_io(server_thread_data);
+
+ FlushFileBuffers(server_thread_data->hPipe);
+ DisconnectNamedPipe(server_thread_data->hPipe);
+
+ return ret;
+}
+
+/*
+ * Thread proc for an IPC server worker thread. It handles a series of
+ * connections from clients. It cleans and reuses the hPipe between each
+ * client.
+ */
+static void *server_thread_proc(void *_server_thread_data)
+{
+ struct ipc_server_thread_data *server_thread_data = _server_thread_data;
+ HANDLE hEventConnected = INVALID_HANDLE_VALUE;
+ OVERLAPPED oConnect;
+ enum connect_result cr;
+ int ret;
+
+ assert(server_thread_data->hPipe != INVALID_HANDLE_VALUE);
+
+ trace2_thread_start("ipc-server");
+ trace2_data_string("ipc-server", NULL, "pipe",
+ server_thread_data->server_data->buf_path.buf);
+
+ hEventConnected = CreateEventW(NULL, TRUE, FALSE, NULL);
+
+ memset(&oConnect, 0, sizeof(oConnect));
+ oConnect.hEvent = hEventConnected;
+
+ for (;;) {
+ cr = wait_for_connection(server_thread_data, &oConnect);
+
+ switch (cr) {
+ case CR_SHUTDOWN:
+ goto finished;
+
+ case CR_CONNECTED:
+ ret = use_connection(server_thread_data);
+ if (ret == SIMPLE_IPC_QUIT) {
+ ipc_server_stop_async(
+ server_thread_data->server_data);
+ goto finished;
+ }
+ if (ret > 0) {
+ /*
+ * Ignore (transient) IO errors with this
+ * client and reset for the next client.
+ */
+ }
+ break;
+
+ case CR_CONNECT_PENDING:
+ /* By construction, this should not happen. */
+ BUG("ipc-server[%s]: unexpeced CR_CONNECT_PENDING",
+ server_thread_data->server_data->buf_path.buf);
+
+ case CR_CONNECT_ERROR:
+ case CR_WAIT_ERROR:
+ /*
+ * Ignore these theoretical errors.
+ */
+ DisconnectNamedPipe(server_thread_data->hPipe);
+ break;
+
+ default:
+ BUG("unandled case after wait_for_connection");
+ }
+ }
+
+finished:
+ CloseHandle(server_thread_data->hPipe);
+ CloseHandle(hEventConnected);
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+static HANDLE create_new_pipe(wchar_t *wpath, int is_first)
+{
+ HANDLE hPipe;
+ DWORD dwOpenMode, dwPipeMode;
+ LPSECURITY_ATTRIBUTES lpsa = NULL;
+
+ dwOpenMode = PIPE_ACCESS_INBOUND | PIPE_ACCESS_OUTBOUND |
+ FILE_FLAG_OVERLAPPED;
+
+ dwPipeMode = PIPE_TYPE_MESSAGE | PIPE_READMODE_BYTE | PIPE_WAIT |
+ PIPE_REJECT_REMOTE_CLIENTS;
+
+ if (is_first) {
+ dwOpenMode |= FILE_FLAG_FIRST_PIPE_INSTANCE;
+
+ /*
+ * On Windows, the first server pipe instance gets to
+ * set the ACL / Security Attributes on the named
+ * pipe; subsequent instances inherit and cannot
+ * change them.
+ *
+ * TODO Should we allow the application layer to
+ * specify security attributes, such as `LocalService`
+ * or `LocalSystem`, when we create the named pipe?
+ * This question is probably not important when the
+ * daemon is started by a foreground user process and
+ * only needs to talk to the current user, but may be
+ * if the daemon is run via the Control Panel as a
+ * System Service.
+ */
+ }
+
+ hPipe = CreateNamedPipeW(wpath, dwOpenMode, dwPipeMode,
+ PIPE_UNLIMITED_INSTANCES, 1024, 1024, 0, lpsa);
+
+ return hPipe;
+}
+
+int ipc_server_run_async(struct ipc_server_data **returned_server_data,
+ const char *path, const struct ipc_server_opts *opts,
+ ipc_server_application_cb *application_cb,
+ void *application_data)
+{
+ struct ipc_server_data *server_data;
+ wchar_t wpath[MAX_PATH];
+ HANDLE hPipeFirst = INVALID_HANDLE_VALUE;
+ int k;
+ int ret = 0;
+ int nr_threads = opts->nr_threads;
+
+ *returned_server_data = NULL;
+
+ ret = initialize_pipe_name(path, wpath, ARRAY_SIZE(wpath));
+ if (ret < 0) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ hPipeFirst = create_new_pipe(wpath, 1);
+ if (hPipeFirst == INVALID_HANDLE_VALUE) {
+ errno = EADDRINUSE;
+ return -2;
+ }
+
+ server_data = xcalloc(1, sizeof(*server_data));
+ server_data->magic = MAGIC_SERVER_DATA;
+ server_data->application_cb = application_cb;
+ server_data->application_data = application_data;
+ server_data->hEventStopRequested = CreateEvent(NULL, TRUE, FALSE, NULL);
+ strbuf_init(&server_data->buf_path, 0);
+ strbuf_addstr(&server_data->buf_path, path);
+ wcscpy(server_data->wpath, wpath);
+
+ if (nr_threads < 1)
+ nr_threads = 1;
+
+ for (k = 0; k < nr_threads; k++) {
+ struct ipc_server_thread_data *std;
+
+ std = xcalloc(1, sizeof(*std));
+ std->magic = MAGIC_SERVER_THREAD_DATA;
+ std->server_data = server_data;
+ std->hPipe = INVALID_HANDLE_VALUE;
+
+ std->hPipe = (k == 0)
+ ? hPipeFirst
+ : create_new_pipe(server_data->wpath, 0);
+
+ if (std->hPipe == INVALID_HANDLE_VALUE) {
+ /*
+ * If we've reached a pipe instance limit for
+ * this path, just use fewer threads.
+ */
+ free(std);
+ break;
+ }
+
+ if (pthread_create(&std->pthread_id, NULL,
+ server_thread_proc, std)) {
+ /*
+ * Likewise, if we're out of threads, just use
+ * fewer threads than requested.
+ *
+ * However, we just give up if we can't even get
+ * one thread. This should not happen.
+ */
+ if (k == 0)
+ die(_("could not start thread[0] for '%s'"),
+ path);
+
+ CloseHandle(std->hPipe);
+ free(std);
+ break;
+ }
+
+ std->next_thread = server_data->thread_list;
+ server_data->thread_list = std;
+ }
+
+ *returned_server_data = server_data;
+ return 0;
+}
+
+int ipc_server_stop_async(struct ipc_server_data *server_data)
+{
+ if (!server_data)
+ return 0;
+
+ /*
+ * Gently tell all of the ipc_server threads to shutdown.
+ * This will be seen the next time they are idle (and waiting
+ * for a connection).
+ *
+ * We DO NOT attempt to force them to drop an active connection.
+ */
+ SetEvent(server_data->hEventStopRequested);
+ return 0;
+}
+
+int ipc_server_await(struct ipc_server_data *server_data)
+{
+ DWORD dwWaitResult;
+
+ if (!server_data)
+ return 0;
+
+ dwWaitResult = WaitForSingleObject(server_data->hEventStopRequested, INFINITE);
+ if (dwWaitResult != WAIT_OBJECT_0)
+ return error(_("wait for hEvent failed for '%s'"),
+ server_data->buf_path.buf);
+
+ while (server_data->thread_list) {
+ struct ipc_server_thread_data *std = server_data->thread_list;
+
+ pthread_join(std->pthread_id, NULL);
+
+ server_data->thread_list = std->next_thread;
+ free(std);
+ }
+
+ server_data->is_stopped = 1;
+
+ return 0;
+}
+
+void ipc_server_free(struct ipc_server_data *server_data)
+{
+ if (!server_data)
+ return;
+
+ if (!server_data->is_stopped)
+ BUG("cannot free ipc-server while running for '%s'",
+ server_data->buf_path.buf);
+
+ strbuf_release(&server_data->buf_path);
+
+ if (server_data->hEventStopRequested != INVALID_HANDLE_VALUE)
+ CloseHandle(server_data->hEventStopRequested);
+
+ while (server_data->thread_list) {
+ struct ipc_server_thread_data *std = server_data->thread_list;
+
+ server_data->thread_list = std->next_thread;
+ free(std);
+ }
+
+ free(server_data);
+}
diff --git a/config.mak.uname b/config.mak.uname
index d204c20..cb443b4 100644
--- a/config.mak.uname
+++ b/config.mak.uname
@@ -424,6 +424,7 @@ ifeq ($(uname_S),Windows)
RUNTIME_PREFIX = YesPlease
HAVE_WPGMPTR = YesWeDo
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
+ USE_WIN32_IPC = YesPlease
USE_WIN32_MMAP = YesPlease
MMAP_PREVENTS_DELETE = UnfortunatelyYes
# USE_NED_ALLOCATOR = YesPlease
@@ -600,6 +601,7 @@ ifneq (,$(findstring MINGW,$(uname_S)))
RUNTIME_PREFIX = YesPlease
HAVE_WPGMPTR = YesWeDo
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
+ USE_WIN32_IPC = YesPlease
USE_WIN32_MMAP = YesPlease
MMAP_PREVENTS_DELETE = UnfortunatelyYes
USE_NED_ALLOCATOR = YesPlease
diff --git a/contrib/buildsystems/CMakeLists.txt b/contrib/buildsystems/CMakeLists.txt
index ac3dbc0..75ed198 100644
--- a/contrib/buildsystems/CMakeLists.txt
+++ b/contrib/buildsystems/CMakeLists.txt
@@ -58,6 +58,10 @@ if(WIN32)
# In the vcpkg edition, we need this to be able to link to libcurl
set(CURL_NO_CURL_CMAKE ON)
+
+ # Copy the necessary vcpkg DLLs (like iconv) to the install dir
+ set(X_VCPKG_APPLOCAL_DEPS_INSTALL ON)
+ set(CMAKE_TOOLCHAIN_FILE ${VCPKG_DIR}/scripts/buildsystems/vcpkg.cmake CACHE STRING "Vcpkg toolchain file")
endif()
find_program(SH_EXE sh PATHS "C:/Program Files/Git/bin")
@@ -243,7 +247,13 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux")
add_compile_definitions(PROCFS_EXECUTABLE_PATH="/proc/self/exe" HAVE_DEV_TTY )
- list(APPEND compat_SOURCES unix-socket.c)
+ list(APPEND compat_SOURCES unix-socket.c unix-stream-server.c)
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
+ list(APPEND compat_SOURCES compat/simple-ipc/ipc-shared.c compat/simple-ipc/ipc-win32.c)
+else()
+ list(APPEND compat_SOURCES compat/simple-ipc/ipc-shared.c compat/simple-ipc/ipc-unix-socket.c)
endif()
set(EXE_EXTENSION ${CMAKE_EXECUTABLE_SUFFIX})
@@ -685,13 +695,17 @@ endif()
parse_makefile_for_executables(git_builtin_extra "BUILT_INS")
+option(SKIP_DASHED_BUILT_INS "Skip hardlinking the dashed versions of the built-ins")
+
#Creating hardlinks
+if(NOT SKIP_DASHED_BUILT_INS)
foreach(s ${git_SOURCES} ${git_builtin_extra})
string(REPLACE "${CMAKE_SOURCE_DIR}/builtin/" "" s ${s})
string(REPLACE ".c" "" s ${s})
file(APPEND ${CMAKE_BINARY_DIR}/CreateLinks.cmake "file(CREATE_LINK git${EXE_EXTENSION} git-${s}${EXE_EXTENSION})\n")
list(APPEND git_links ${CMAKE_BINARY_DIR}/git-${s}${EXE_EXTENSION})
endforeach()
+endif()
if(CURL_FOUND)
set(remote_exes
@@ -807,15 +821,19 @@ list(TRANSFORM git_shell_scripts PREPEND "${CMAKE_BINARY_DIR}/")
list(TRANSFORM git_perl_scripts PREPEND "${CMAKE_BINARY_DIR}/")
#install
-install(TARGETS git git-shell
+foreach(program ${PROGRAMS_BUILT})
+if(program STREQUAL "git" OR program STREQUAL "git-shell")
+install(TARGETS ${program}
RUNTIME DESTINATION bin)
+else()
+install(TARGETS ${program}
+ RUNTIME DESTINATION libexec/git-core)
+endif()
+endforeach()
+
install(PROGRAMS ${CMAKE_BINARY_DIR}/git-cvsserver
DESTINATION bin)
-list(REMOVE_ITEM PROGRAMS_BUILT git git-shell)
-install(TARGETS ${PROGRAMS_BUILT}
- RUNTIME DESTINATION libexec/git-core)
-
set(bin_links
git-receive-pack git-upload-archive git-upload-pack)
@@ -828,12 +846,12 @@ install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/bin/git-shell${EXE_EXTENS
foreach(b ${git_links})
string(REPLACE "${CMAKE_BINARY_DIR}" "" b ${b})
- install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/bin/git${EXE_EXTENSION} ${CMAKE_INSTALL_PREFIX}/libexec/git-core/${b}${EXE_EXTENSION})")
+ install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/bin/git${EXE_EXTENSION} ${CMAKE_INSTALL_PREFIX}/libexec/git-core/${b})")
endforeach()
foreach(b ${git_http_links})
string(REPLACE "${CMAKE_BINARY_DIR}" "" b ${b})
- install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/libexec/git-core/git-remote-http${EXE_EXTENSION} ${CMAKE_INSTALL_PREFIX}/libexec/git-core/${b}${EXE_EXTENSION})")
+ install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/libexec/git-core/git-remote-http${EXE_EXTENSION} ${CMAKE_INSTALL_PREFIX}/libexec/git-core/${b})")
endforeach()
install(PROGRAMS ${git_shell_scripts} ${git_perl_scripts} ${CMAKE_BINARY_DIR}/git-p4
diff --git a/convert.c b/convert.c
index 2d3a5a7..45ac75f 100644
--- a/convert.c
+++ b/convert.c
@@ -24,17 +24,6 @@
#define CONVERT_STAT_BITS_TXT_CRLF 0x2
#define CONVERT_STAT_BITS_BIN 0x4
-enum crlf_action {
- CRLF_UNDEFINED,
- CRLF_BINARY,
- CRLF_TEXT,
- CRLF_TEXT_INPUT,
- CRLF_TEXT_CRLF,
- CRLF_AUTO,
- CRLF_AUTO_INPUT,
- CRLF_AUTO_CRLF
-};
-
struct text_stat {
/* NUL, CR, LF and CRLF counts */
unsigned nul, lonecr, lonelf, crlf;
@@ -172,7 +161,7 @@ static int text_eol_is_crlf(void)
return 0;
}
-static enum eol output_eol(enum crlf_action crlf_action)
+static enum eol output_eol(enum convert_crlf_action crlf_action)
{
switch (crlf_action) {
case CRLF_BINARY:
@@ -246,7 +235,7 @@ static int has_crlf_in_index(const struct index_state *istate, const char *path)
}
static int will_convert_lf_to_crlf(struct text_stat *stats,
- enum crlf_action crlf_action)
+ enum convert_crlf_action crlf_action)
{
if (output_eol(crlf_action) != EOL_CRLF)
return 0;
@@ -499,7 +488,7 @@ static int encode_to_worktree(const char *path, const char *src, size_t src_len,
static int crlf_to_git(const struct index_state *istate,
const char *path, const char *src, size_t len,
struct strbuf *buf,
- enum crlf_action crlf_action, int conv_flags)
+ enum convert_crlf_action crlf_action, int conv_flags)
{
struct text_stat stats;
char *dst;
@@ -585,8 +574,8 @@ static int crlf_to_git(const struct index_state *istate,
return 1;
}
-static int crlf_to_worktree(const char *src, size_t len,
- struct strbuf *buf, enum crlf_action crlf_action)
+static int crlf_to_worktree(const char *src, size_t len, struct strbuf *buf,
+ enum convert_crlf_action crlf_action)
{
char *to_free = NULL;
struct text_stat stats;
@@ -884,9 +873,13 @@ static int apply_multi_file_filter(const char *path, const char *src, size_t len
goto done;
if (fd >= 0)
- err = write_packetized_from_fd(fd, process->in);
+ err = write_packetized_from_fd_no_flush(fd, process->in);
else
- err = write_packetized_from_buf(src, len, process->in);
+ err = write_packetized_from_buf_no_flush(src, len, process->in);
+ if (err)
+ goto done;
+
+ err = packet_flush_gently(process->in);
if (err)
goto done;
@@ -903,7 +896,8 @@ static int apply_multi_file_filter(const char *path, const char *src, size_t len
if (err)
goto done;
- err = read_packetized_to_strbuf(process->out, &nbuf) < 0;
+ err = read_packetized_to_strbuf(process->out, &nbuf,
+ PACKET_READ_GENTLE_ON_EOF) < 0;
if (err)
goto done;
@@ -1247,7 +1241,7 @@ static const char *git_path_check_encoding(struct attr_check_item *check)
return value;
}
-static enum crlf_action git_path_check_crlf(struct attr_check_item *check)
+static enum convert_crlf_action git_path_check_crlf(struct attr_check_item *check)
{
const char *value = check->value;
@@ -1297,18 +1291,10 @@ static int git_path_check_ident(struct attr_check_item *check)
return !!ATTR_TRUE(value);
}
-struct conv_attrs {
- struct convert_driver *drv;
- enum crlf_action attr_action; /* What attr says */
- enum crlf_action crlf_action; /* When no attr is set, use core.autocrlf */
- int ident;
- const char *working_tree_encoding; /* Supported encoding or default encoding if NULL */
-};
-
static struct attr_check *check;
-static void convert_attrs(const struct index_state *istate,
- struct conv_attrs *ca, const char *path)
+void convert_attrs(const struct index_state *istate,
+ struct conv_attrs *ca, const char *path)
{
struct attr_check_item *ccheck = NULL;
@@ -1465,19 +1451,16 @@ void convert_to_git_filter_fd(const struct index_state *istate,
ident_to_git(dst->buf, dst->len, dst, ca.ident);
}
-static int convert_to_working_tree_internal(const struct index_state *istate,
- const char *path, const char *src,
- size_t len, struct strbuf *dst,
- int normalizing,
- const struct checkout_metadata *meta,
- struct delayed_checkout *dco)
+static int convert_to_working_tree_ca_internal(const struct conv_attrs *ca,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ int normalizing,
+ const struct checkout_metadata *meta,
+ struct delayed_checkout *dco)
{
int ret = 0, ret_filter = 0;
- struct conv_attrs ca;
- convert_attrs(istate, &ca, path);
-
- ret |= ident_to_worktree(src, len, dst, ca.ident);
+ ret |= ident_to_worktree(src, len, dst, ca->ident);
if (ret) {
src = dst->buf;
len = dst->len;
@@ -1487,49 +1470,56 @@ static int convert_to_working_tree_internal(const struct index_state *istate,
* is a smudge or process filter (even if the process filter doesn't
* support smudge). The filters might expect CRLFs.
*/
- if ((ca.drv && (ca.drv->smudge || ca.drv->process)) || !normalizing) {
- ret |= crlf_to_worktree(src, len, dst, ca.crlf_action);
+ if ((ca->drv && (ca->drv->smudge || ca->drv->process)) || !normalizing) {
+ ret |= crlf_to_worktree(src, len, dst, ca->crlf_action);
if (ret) {
src = dst->buf;
len = dst->len;
}
}
- ret |= encode_to_worktree(path, src, len, dst, ca.working_tree_encoding);
+ ret |= encode_to_worktree(path, src, len, dst, ca->working_tree_encoding);
if (ret) {
src = dst->buf;
len = dst->len;
}
ret_filter = apply_filter(
- path, src, len, -1, dst, ca.drv, CAP_SMUDGE, meta, dco);
- if (!ret_filter && ca.drv && ca.drv->required)
- die(_("%s: smudge filter %s failed"), path, ca.drv->name);
+ path, src, len, -1, dst, ca->drv, CAP_SMUDGE, meta, dco);
+ if (!ret_filter && ca->drv && ca->drv->required)
+ die(_("%s: smudge filter %s failed"), path, ca->drv->name);
return ret | ret_filter;
}
-int async_convert_to_working_tree(const struct index_state *istate,
- const char *path, const char *src,
- size_t len, struct strbuf *dst,
- const struct checkout_metadata *meta,
- void *dco)
+int async_convert_to_working_tree_ca(const struct conv_attrs *ca,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ const struct checkout_metadata *meta,
+ void *dco)
{
- return convert_to_working_tree_internal(istate, path, src, len, dst, 0, meta, dco);
+ return convert_to_working_tree_ca_internal(ca, path, src, len, dst, 0,
+ meta, dco);
}
-int convert_to_working_tree(const struct index_state *istate,
- const char *path, const char *src,
- size_t len, struct strbuf *dst,
- const struct checkout_metadata *meta)
+int convert_to_working_tree_ca(const struct conv_attrs *ca,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ const struct checkout_metadata *meta)
{
- return convert_to_working_tree_internal(istate, path, src, len, dst, 0, meta, NULL);
+ return convert_to_working_tree_ca_internal(ca, path, src, len, dst, 0,
+ meta, NULL);
}
int renormalize_buffer(const struct index_state *istate, const char *path,
const char *src, size_t len, struct strbuf *dst)
{
- int ret = convert_to_working_tree_internal(istate, path, src, len, dst, 1, NULL, NULL);
+ struct conv_attrs ca;
+ int ret;
+
+ convert_attrs(istate, &ca, path);
+ ret = convert_to_working_tree_ca_internal(&ca, path, src, len, dst, 1,
+ NULL, NULL);
if (ret) {
src = dst->buf;
len = dst->len;
@@ -1956,34 +1946,25 @@ static struct stream_filter *ident_filter(const struct object_id *oid)
}
/*
- * Return an appropriately constructed filter for the path, or NULL if
+ * Return an appropriately constructed filter for the given ca, or NULL if
* the contents cannot be filtered without reading the whole thing
* in-core.
*
* Note that you would be crazy to set CRLF, smudge/clean or ident to a
* large binary blob you would want us not to slurp into the memory!
*/
-struct stream_filter *get_stream_filter(const struct index_state *istate,
- const char *path,
- const struct object_id *oid)
+struct stream_filter *get_stream_filter_ca(const struct conv_attrs *ca,
+ const struct object_id *oid)
{
- struct conv_attrs ca;
struct stream_filter *filter = NULL;
- convert_attrs(istate, &ca, path);
- if (ca.drv && (ca.drv->process || ca.drv->smudge || ca.drv->clean))
+ if (classify_conv_attrs(ca) != CA_CLASS_STREAMABLE)
return NULL;
- if (ca.working_tree_encoding)
- return NULL;
-
- if (ca.crlf_action == CRLF_AUTO || ca.crlf_action == CRLF_AUTO_CRLF)
- return NULL;
-
- if (ca.ident)
+ if (ca->ident)
filter = ident_filter(oid);
- if (output_eol(ca.crlf_action) == EOL_CRLF)
+ if (output_eol(ca->crlf_action) == EOL_CRLF)
filter = cascade_filter(filter, lf_to_crlf_filter());
else
filter = cascade_filter(filter, &null_filter_singleton);
@@ -1991,6 +1972,15 @@ struct stream_filter *get_stream_filter(const struct index_state *istate,
return filter;
}
+struct stream_filter *get_stream_filter(const struct index_state *istate,
+ const char *path,
+ const struct object_id *oid)
+{
+ struct conv_attrs ca;
+ convert_attrs(istate, &ca, path);
+ return get_stream_filter_ca(&ca, oid);
+}
+
void free_stream_filter(struct stream_filter *filter)
{
filter->vtbl->free(filter);
@@ -2024,3 +2014,21 @@ void clone_checkout_metadata(struct checkout_metadata *dst,
if (blob)
oidcpy(&dst->blob, blob);
}
+
+enum conv_attrs_classification classify_conv_attrs(const struct conv_attrs *ca)
+{
+ if (ca->drv) {
+ if (ca->drv->process)
+ return CA_CLASS_INCORE_PROCESS;
+ if (ca->drv->smudge || ca->drv->clean)
+ return CA_CLASS_INCORE_FILTER;
+ }
+
+ if (ca->working_tree_encoding)
+ return CA_CLASS_INCORE;
+
+ if (ca->crlf_action == CRLF_AUTO || ca->crlf_action == CRLF_AUTO_CRLF)
+ return CA_CLASS_INCORE;
+
+ return CA_CLASS_STREAMABLE;
+}
diff --git a/convert.h b/convert.h
index e29d102..43e567a 100644
--- a/convert.h
+++ b/convert.h
@@ -63,6 +63,30 @@ struct checkout_metadata {
struct object_id blob;
};
+enum convert_crlf_action {
+ CRLF_UNDEFINED,
+ CRLF_BINARY,
+ CRLF_TEXT,
+ CRLF_TEXT_INPUT,
+ CRLF_TEXT_CRLF,
+ CRLF_AUTO,
+ CRLF_AUTO_INPUT,
+ CRLF_AUTO_CRLF
+};
+
+struct convert_driver;
+
+struct conv_attrs {
+ struct convert_driver *drv;
+ enum convert_crlf_action attr_action; /* What attr says */
+ enum convert_crlf_action crlf_action; /* When no attr is set, use core.autocrlf */
+ int ident;
+ const char *working_tree_encoding; /* Supported encoding or default encoding if NULL */
+};
+
+void convert_attrs(const struct index_state *istate,
+ struct conv_attrs *ca, const char *path);
+
extern enum eol core_eol;
extern char *check_roundtrip_encoding;
const char *get_cached_convert_stats_ascii(const struct index_state *istate,
@@ -75,15 +99,34 @@ const char *get_convert_attr_ascii(const struct index_state *istate,
int convert_to_git(const struct index_state *istate,
const char *path, const char *src, size_t len,
struct strbuf *dst, int conv_flags);
-int convert_to_working_tree(const struct index_state *istate,
- const char *path, const char *src,
- size_t len, struct strbuf *dst,
- const struct checkout_metadata *meta);
-int async_convert_to_working_tree(const struct index_state *istate,
- const char *path, const char *src,
- size_t len, struct strbuf *dst,
- const struct checkout_metadata *meta,
- void *dco);
+int convert_to_working_tree_ca(const struct conv_attrs *ca,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ const struct checkout_metadata *meta);
+int async_convert_to_working_tree_ca(const struct conv_attrs *ca,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ const struct checkout_metadata *meta,
+ void *dco);
+static inline int convert_to_working_tree(const struct index_state *istate,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ const struct checkout_metadata *meta)
+{
+ struct conv_attrs ca;
+ convert_attrs(istate, &ca, path);
+ return convert_to_working_tree_ca(&ca, path, src, len, dst, meta);
+}
+static inline int async_convert_to_working_tree(const struct index_state *istate,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ const struct checkout_metadata *meta,
+ void *dco)
+{
+ struct conv_attrs ca;
+ convert_attrs(istate, &ca, path);
+ return async_convert_to_working_tree_ca(&ca, path, src, len, dst, meta, dco);
+}
int async_query_available_blobs(const char *cmd,
struct string_list *available_paths);
int renormalize_buffer(const struct index_state *istate,
@@ -136,6 +179,8 @@ struct stream_filter; /* opaque */
struct stream_filter *get_stream_filter(const struct index_state *istate,
const char *path,
const struct object_id *);
+struct stream_filter *get_stream_filter_ca(const struct conv_attrs *ca,
+ const struct object_id *oid);
void free_stream_filter(struct stream_filter *);
int is_null_stream_filter(struct stream_filter *);
@@ -155,4 +200,37 @@ int stream_filter(struct stream_filter *,
const char *input, size_t *isize_p,
char *output, size_t *osize_p);
+enum conv_attrs_classification {
+ /*
+ * The blob must be loaded into a buffer before it can be
+ * smudged. All smudging is done in-proc.
+ */
+ CA_CLASS_INCORE,
+
+ /*
+ * The blob must be loaded into a buffer, but uses a
+ * single-file driver filter, such as rot13.
+ */
+ CA_CLASS_INCORE_FILTER,
+
+ /*
+ * The blob must be loaded into a buffer, but uses a
+ * long-running driver process, such as LFS. This might or
+ * might not use delayed operations. (The important thing is
+ * that there is a single subordinate long-running process
+ * handling all associated blobs and in case of delayed
+ * operations, may hold per-blob state.)
+ */
+ CA_CLASS_INCORE_PROCESS,
+
+ /*
+ * The blob can be streamed and smudged without needing to
+ * completely read it into a buffer.
+ */
+ CA_CLASS_STREAMABLE,
+};
+
+enum conv_attrs_classification classify_conv_attrs(
+ const struct conv_attrs *ca);
+
#endif /* CONVERT_H */
diff --git a/csum-file.c b/csum-file.c
index 0f35fa5..7510950 100644
--- a/csum-file.c
+++ b/csum-file.c
@@ -89,32 +89,35 @@ int finalize_hashfile(struct hashfile *f, unsigned char *result, unsigned int fl
void hashwrite(struct hashfile *f, const void *buf, unsigned int count)
{
while (count) {
- unsigned offset = f->offset;
- unsigned left = sizeof(f->buffer) - offset;
+ unsigned left = sizeof(f->buffer) - f->offset;
unsigned nr = count > left ? left : count;
- const void *data;
if (f->do_crc)
f->crc32 = crc32(f->crc32, buf, nr);
if (nr == sizeof(f->buffer)) {
- /* process full buffer directly without copy */
- data = buf;
+ /*
+ * Flush a full batch worth of data directly
+ * from the input, skipping the memcpy() to
+ * the hashfile's buffer. In this block,
+ * f->offset is necessarily zero.
+ */
+ the_hash_algo->update_fn(&f->ctx, buf, nr);
+ flush(f, buf, nr);
} else {
- memcpy(f->buffer + offset, buf, nr);
- data = f->buffer;
+ /*
+ * Copy to the hashfile's buffer, flushing only
+ * if it became full.
+ */
+ memcpy(f->buffer + f->offset, buf, nr);
+ f->offset += nr;
+ left -= nr;
+ if (!left)
+ hashflush(f);
}
count -= nr;
- offset += nr;
buf = (char *) buf + nr;
- left -= nr;
- if (!left) {
- the_hash_algo->update_fn(&f->ctx, data, offset);
- flush(f, data, offset);
- offset = 0;
- }
- f->offset = offset;
}
}
diff --git a/daemon.c b/daemon.c
index 3435319..5c4cbad 100644
--- a/daemon.c
+++ b/daemon.c
@@ -566,14 +566,14 @@ static void parse_host_and_port(char *hostport, char **host,
/*
* Sanitize a string from the client so that it's OK to be inserted into a
- * filesystem path. Specifically, we disallow slashes, runs of "..", and
- * trailing and leading dots, which means that the client cannot escape
- * our base path via ".." traversal.
+ * filesystem path. Specifically, we disallow directory separators, runs
+ * of "..", and trailing and leading dots, which means that the client
+ * cannot escape our base path via ".." traversal.
*/
static void sanitize_client(struct strbuf *out, const char *in)
{
for (; *in; in++) {
- if (*in == '/')
+ if (is_dir_sep(*in))
continue;
if (*in == '.' && (!out->len || out->buf[out->len - 1] == '.'))
continue;
diff --git a/diffcore-rename.c b/diffcore-rename.c
index e2ed648..36a98f9 100644
--- a/diffcore-rename.c
+++ b/diffcore-rename.c
@@ -527,6 +527,7 @@ static void update_dir_rename_counts(struct dir_rename_info *info,
}
static void initialize_dir_rename_info(struct dir_rename_info *info,
+ struct strset *relevant_sources,
struct strset *dirs_removed,
struct strmap *dir_rename_count)
{
@@ -534,7 +535,7 @@ static void initialize_dir_rename_info(struct dir_rename_info *info,
struct strmap_entry *entry;
int i;
- if (!dirs_removed) {
+ if (!dirs_removed && !relevant_sources) {
info->setup = 0;
return;
}
@@ -549,7 +550,20 @@ static void initialize_dir_rename_info(struct dir_rename_info *info,
strmap_init_with_options(&info->dir_rename_guess, NULL, 0);
/* Setup info->relevant_source_dirs */
- info->relevant_source_dirs = dirs_removed;
+ info->relevant_source_dirs = NULL;
+ if (dirs_removed || !relevant_sources) {
+ info->relevant_source_dirs = dirs_removed; /* might be NULL */
+ } else {
+ info->relevant_source_dirs = xmalloc(sizeof(struct strintmap));
+ strset_init(info->relevant_source_dirs);
+ strset_for_each_entry(relevant_sources, &iter, entry) {
+ char *dirname = get_dirname(entry->key);
+ if (!dirs_removed ||
+ strset_contains(dirs_removed, dirname))
+ strset_add(info->relevant_source_dirs, dirname);
+ free(dirname);
+ }
+ }
/*
* Loop setting up both info->idx_map, and doing setup of
@@ -627,6 +641,13 @@ static void cleanup_dir_rename_info(struct dir_rename_info *info,
/* dir_rename_guess */
strmap_clear(&info->dir_rename_guess, 1);
+ /* relevant_source_dirs */
+ if (info->relevant_source_dirs &&
+ info->relevant_source_dirs != dirs_removed) {
+ strset_clear(info->relevant_source_dirs);
+ FREE_AND_NULL(info->relevant_source_dirs);
+ }
+
/* dir_rename_count */
if (!keep_dir_rename_count) {
partial_clear_dir_rename_count(info->dir_rename_count);
@@ -749,6 +770,7 @@ static int idx_possible_rename(char *filename, struct dir_rename_info *info)
static int find_basename_matches(struct diff_options *options,
int minimum_score,
struct dir_rename_info *info,
+ struct strset *relevant_sources,
struct strset *dirs_removed)
{
/*
@@ -839,6 +861,11 @@ static int find_basename_matches(struct diff_options *options,
intptr_t src_index;
intptr_t dst_index;
+ /* Skip irrelevant sources */
+ if (relevant_sources &&
+ !strset_contains(relevant_sources, filename))
+ continue;
+
/*
* If the basename is unique among remaining sources, then
* src_index will equal 'i' and we can attempt to match it
@@ -991,11 +1018,12 @@ static int find_renames(struct diff_score *mx,
return count;
}
-static void remove_unneeded_paths_from_src(int detecting_copies)
+static void remove_unneeded_paths_from_src(int detecting_copies,
+ struct strset *interesting)
{
int i, new_num_src;
- if (detecting_copies)
+ if (detecting_copies && !interesting)
return; /* nothing to remove */
if (break_idx)
return; /* culling incompatible with break detection */
@@ -1022,12 +1050,18 @@ static void remove_unneeded_paths_from_src(int detecting_copies)
* from rename_src here.
*/
for (i = 0, new_num_src = 0; i < rename_src_nr; i++) {
+ struct diff_filespec *one = rename_src[i].p->one;
+
/*
* renames are stored in rename_dst, so if a rename has
* already been detected using this source, we can just
* remove the source knowing rename_dst has its info.
*/
- if (rename_src[i].p->one->rename_used)
+ if (!detecting_copies && one->rename_used)
+ continue;
+
+ /* If we don't care about the source path, skip it */
+ if (interesting && !strset_contains(interesting, one->path))
continue;
if (new_num_src < i)
@@ -1040,6 +1074,7 @@ static void remove_unneeded_paths_from_src(int detecting_copies)
}
void diffcore_rename_extended(struct diff_options *options,
+ struct strset *relevant_sources,
struct strset *dirs_removed,
struct strmap *dir_rename_count)
{
@@ -1060,6 +1095,8 @@ void diffcore_rename_extended(struct diff_options *options,
want_copies = (detect_rename == DIFF_DETECT_COPY);
if (dirs_removed && (break_idx || want_copies))
BUG("dirs_removed incompatible with break/copy detection");
+ if (break_idx && relevant_sources)
+ BUG("break detection incompatible with source specification");
if (!minimum_score)
minimum_score = DEFAULT_RENAME_SCORE;
@@ -1127,9 +1164,10 @@ void diffcore_rename_extended(struct diff_options *options,
/*
* Cull sources:
* - remove ones corresponding to exact renames
+ * - remove ones not found in relevant_sources
*/
trace2_region_enter("diff", "cull after exact", options->repo);
- remove_unneeded_paths_from_src(want_copies);
+ remove_unneeded_paths_from_src(want_copies, relevant_sources);
trace2_region_leave("diff", "cull after exact", options->repo);
} else {
/* Determine minimum score to match basenames */
@@ -1148,12 +1186,12 @@ void diffcore_rename_extended(struct diff_options *options,
* - remove ones involved in renames (found via exact match)
*/
trace2_region_enter("diff", "cull after exact", options->repo);
- remove_unneeded_paths_from_src(want_copies);
+ remove_unneeded_paths_from_src(want_copies, NULL);
trace2_region_leave("diff", "cull after exact", options->repo);
/* Preparation for basename-driven matching. */
trace2_region_enter("diff", "dir rename setup", options->repo);
- initialize_dir_rename_info(&info,
+ initialize_dir_rename_info(&info, relevant_sources,
dirs_removed, dir_rename_count);
trace2_region_leave("diff", "dir rename setup", options->repo);
@@ -1161,15 +1199,18 @@ void diffcore_rename_extended(struct diff_options *options,
trace2_region_enter("diff", "basename matches", options->repo);
rename_count += find_basename_matches(options,
min_basename_score,
- &info, dirs_removed);
+ &info,
+ relevant_sources,
+ dirs_removed);
trace2_region_leave("diff", "basename matches", options->repo);
/*
* Cull sources, again:
* - remove ones involved in renames (found via basenames)
+ * - remove ones not found in relevant_sources
*/
trace2_region_enter("diff", "cull basename", options->repo);
- remove_unneeded_paths_from_src(want_copies);
+ remove_unneeded_paths_from_src(want_copies, relevant_sources);
trace2_region_leave("diff", "cull basename", options->repo);
}
@@ -1341,5 +1382,5 @@ void diffcore_rename_extended(struct diff_options *options,
void diffcore_rename(struct diff_options *options)
{
- diffcore_rename_extended(options, NULL, NULL);
+ diffcore_rename_extended(options, NULL, NULL, NULL);
}
diff --git a/diffcore.h b/diffcore.h
index b9a230a..d76982f 100644
--- a/diffcore.h
+++ b/diffcore.h
@@ -166,6 +166,7 @@ void partial_clear_dir_rename_count(struct strmap *dir_rename_count);
void diffcore_break(struct repository *, int);
void diffcore_rename(struct diff_options *);
void diffcore_rename_extended(struct diff_options *options,
+ struct strset *relevant_sources,
struct strset *dirs_removed,
struct strmap *dir_rename_count);
void diffcore_merge_broken(void);
diff --git a/entry.c b/entry.c
index 33ce80c..2dc94ba 100644
--- a/entry.c
+++ b/entry.c
@@ -6,6 +6,7 @@
#include "submodule.h"
#include "progress.h"
#include "fsmonitor.h"
+#include "entry.h"
static void create_directories(const char *path, int path_len,
const struct checkout *state)
@@ -83,7 +84,7 @@ static int create_file(const char *path, unsigned int mode)
return open(path, O_WRONLY | O_CREAT | O_EXCL, mode);
}
-static void *read_blob_entry(const struct cache_entry *ce, unsigned long *size)
+void *read_blob_entry(const struct cache_entry *ce, unsigned long *size)
{
enum object_type type;
void *blob_data = read_object_file(&ce->oid, &type, size);
@@ -108,7 +109,7 @@ static int open_output_fd(char *path, const struct cache_entry *ce, int to_tempf
}
}
-static int fstat_output(int fd, const struct checkout *state, struct stat *st)
+int fstat_checkout_output(int fd, const struct checkout *state, struct stat *st)
{
/* use fstat() only when path == ce->name */
if (fstat_is_reliable() &&
@@ -131,7 +132,7 @@ static int streaming_write_entry(const struct cache_entry *ce, char *path,
return -1;
result |= stream_blob_to_fd(fd, &ce->oid, filter, 1);
- *fstat_done = fstat_output(fd, state, statbuf);
+ *fstat_done = fstat_checkout_output(fd, state, statbuf);
result |= close(fd);
if (result)
@@ -250,8 +251,21 @@ int finish_delayed_checkout(struct checkout *state, int *nr_checkouts)
return errs;
}
-static int write_entry(struct cache_entry *ce,
- char *path, const struct checkout *state, int to_tempfile)
+void update_ce_after_write(const struct checkout *state, struct cache_entry *ce,
+ struct stat *st)
+{
+ if (state->refresh_cache) {
+ assert(state->istate);
+ fill_stat_cache_info(state->istate, ce, st);
+ ce->ce_flags |= CE_UPDATE_IN_BASE;
+ mark_fsmonitor_invalid(state->istate, ce);
+ state->istate->cache_changed |= CE_ENTRY_CHANGED;
+ }
+}
+
+/* Note: ca is used (and required) iff the entry refers to a regular file. */
+static int write_entry(struct cache_entry *ce, char *path, struct conv_attrs *ca,
+ const struct checkout *state, int to_tempfile)
{
unsigned int ce_mode_s_ifmt = ce->ce_mode & S_IFMT;
struct delayed_checkout *dco = state->delayed_checkout;
@@ -268,8 +282,7 @@ static int write_entry(struct cache_entry *ce,
clone_checkout_metadata(&meta, &state->meta, &ce->oid);
if (ce_mode_s_ifmt == S_IFREG) {
- struct stream_filter *filter = get_stream_filter(state->istate, ce->name,
- &ce->oid);
+ struct stream_filter *filter = get_stream_filter_ca(ca, &ce->oid);
if (filter &&
!streaming_write_entry(ce, path, filter,
state, to_tempfile,
@@ -316,14 +329,17 @@ static int write_entry(struct cache_entry *ce,
* Convert from git internal format to working tree format
*/
if (dco && dco->state != CE_NO_DELAY) {
- ret = async_convert_to_working_tree(state->istate, ce->name, new_blob,
- size, &buf, &meta, dco);
+ ret = async_convert_to_working_tree_ca(ca, ce->name,
+ new_blob, size,
+ &buf, &meta, dco);
if (ret && string_list_has_string(&dco->paths, ce->name)) {
free(new_blob);
goto delayed;
}
- } else
- ret = convert_to_working_tree(state->istate, ce->name, new_blob, size, &buf, &meta);
+ } else {
+ ret = convert_to_working_tree_ca(ca, ce->name, new_blob,
+ size, &buf, &meta);
+ }
if (ret) {
free(new_blob);
@@ -345,7 +361,7 @@ static int write_entry(struct cache_entry *ce,
wrote = write_in_full(fd, new_blob, size);
if (!to_tempfile)
- fstat_done = fstat_output(fd, state, &st);
+ fstat_done = fstat_checkout_output(fd, state, &st);
close(fd);
free(new_blob);
if (wrote < 0)
@@ -370,15 +386,10 @@ static int write_entry(struct cache_entry *ce,
finish:
if (state->refresh_cache) {
- assert(state->istate);
- if (!fstat_done)
- if (lstat(ce->name, &st) < 0)
- return error_errno("unable to stat just-written file %s",
- ce->name);
- fill_stat_cache_info(state->istate, ce, &st);
- ce->ce_flags |= CE_UPDATE_IN_BASE;
- mark_fsmonitor_invalid(state->istate, ce);
- state->istate->cache_changed |= CE_ENTRY_CHANGED;
+ if (!fstat_done && lstat(ce->name, &st) < 0)
+ return error_errno("unable to stat just-written file %s",
+ ce->name);
+ update_ce_after_write(state, ce , &st);
}
delayed:
return 0;
@@ -429,19 +440,13 @@ static void mark_colliding_entries(const struct checkout *state,
}
}
-/*
- * Write the contents from ce out to the working tree.
- *
- * When topath[] is not NULL, instead of writing to the working tree
- * file named by ce, a temporary file is created by this function and
- * its name is returned in topath[], which must be able to hold at
- * least TEMPORARY_FILENAME_LENGTH bytes long.
- */
-int checkout_entry(struct cache_entry *ce, const struct checkout *state,
- char *topath, int *nr_checkouts)
+int checkout_entry_ca(struct cache_entry *ce, struct conv_attrs *ca,
+ const struct checkout *state, char *topath,
+ int *nr_checkouts)
{
static struct strbuf path = STRBUF_INIT;
struct stat st;
+ struct conv_attrs ca_buf;
if (ce->ce_flags & CE_WT_REMOVE) {
if (topath)
@@ -454,8 +459,13 @@ int checkout_entry(struct cache_entry *ce, const struct checkout *state,
return 0;
}
- if (topath)
- return write_entry(ce, topath, state, 1);
+ if (topath) {
+ if (S_ISREG(ce->ce_mode) && !ca) {
+ convert_attrs(state->istate, &ca_buf, ce->name);
+ ca = &ca_buf;
+ }
+ return write_entry(ce, topath, ca, state, 1);
+ }
strbuf_reset(&path);
strbuf_add(&path, state->base_dir, state->base_dir_len);
@@ -517,9 +527,16 @@ int checkout_entry(struct cache_entry *ce, const struct checkout *state,
return 0;
create_directories(path.buf, path.len, state);
+
if (nr_checkouts)
(*nr_checkouts)++;
- return write_entry(ce, path.buf, state, 0);
+
+ if (S_ISREG(ce->ce_mode) && !ca) {
+ convert_attrs(state->istate, &ca_buf, ce->name);
+ ca = &ca_buf;
+ }
+
+ return write_entry(ce, path.buf, ca, state, 0);
}
void unlink_entry(const struct cache_entry *ce)
diff --git a/entry.h b/entry.h
new file mode 100644
index 0000000..b8c0e17
--- /dev/null
+++ b/entry.h
@@ -0,0 +1,59 @@
+#ifndef ENTRY_H
+#define ENTRY_H
+
+#include "cache.h"
+#include "convert.h"
+
+struct checkout {
+ struct index_state *istate;
+ const char *base_dir;
+ int base_dir_len;
+ struct delayed_checkout *delayed_checkout;
+ struct checkout_metadata meta;
+ unsigned force:1,
+ quiet:1,
+ not_new:1,
+ clone:1,
+ refresh_cache:1;
+};
+#define CHECKOUT_INIT { NULL, "" }
+
+#define TEMPORARY_FILENAME_LENGTH 25
+/*
+ * Write the contents from ce out to the working tree.
+ *
+ * When topath[] is not NULL, instead of writing to the working tree
+ * file named by ce, a temporary file is created by this function and
+ * its name is returned in topath[], which must be able to hold at
+ * least TEMPORARY_FILENAME_LENGTH bytes long.
+ *
+ * With checkout_entry_ca(), callers can optionally pass a preloaded
+ * conv_attrs struct (to avoid reloading it), when ce refers to a
+ * regular file. If ca is NULL, the attributes will be loaded
+ * internally when (and if) needed.
+ */
+int checkout_entry_ca(struct cache_entry *ce, struct conv_attrs *ca,
+ const struct checkout *state, char *topath,
+ int *nr_checkouts);
+static inline int checkout_entry(struct cache_entry *ce,
+ const struct checkout *state, char *topath,
+ int *nr_checkouts)
+{
+ return checkout_entry_ca(ce, NULL, state, topath, nr_checkouts);
+}
+
+void enable_delayed_checkout(struct checkout *state);
+int finish_delayed_checkout(struct checkout *state, int *nr_checkouts);
+
+/*
+ * Unlink the last component and schedule the leading directories for
+ * removal, such that empty directories get removed.
+ */
+void unlink_entry(const struct cache_entry *ce);
+
+void *read_blob_entry(const struct cache_entry *ce, unsigned long *size);
+int fstat_checkout_output(int fd, const struct checkout *state, struct stat *st);
+void update_ce_after_write(const struct checkout *state, struct cache_entry *ce,
+ struct stat *st);
+
+#endif /* ENTRY_H */
diff --git a/fetch-pack.c b/fetch-pack.c
index fb04a76..6e68276 100644
--- a/fetch-pack.c
+++ b/fetch-pack.c
@@ -38,6 +38,7 @@ static int server_supports_filtering;
static int advertise_sid;
static struct shallow_lock shallow_lock;
static const char *alternate_shallow_file;
+static struct fsck_options fsck_options = FSCK_OPTIONS_MISSING_GITMODULES;
static struct strbuf fsck_msg_types = STRBUF_INIT;
static struct string_list uri_protocols = STRING_LIST_INIT_DUP;
@@ -987,22 +988,6 @@ static int cmp_ref_by_name(const void *a_, const void *b_)
return strcmp(a->name, b->name);
}
-static void fsck_gitmodules_oids(struct oidset *gitmodules_oids)
-{
- struct oidset_iter iter;
- const struct object_id *oid;
- struct fsck_options fo = FSCK_OPTIONS_STRICT;
-
- if (!oidset_size(gitmodules_oids))
- return;
-
- oidset_iter_init(gitmodules_oids, &iter);
- while ((oid = oidset_iter_next(&iter)))
- register_found_gitmodules(oid);
- if (fsck_finish(&fo))
- die("fsck failed");
-}
-
static struct ref *do_fetch_pack(struct fetch_pack_args *args,
int fd[2],
const struct ref *orig_ref,
@@ -1017,7 +1002,6 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
int agent_len;
struct fetch_negotiator negotiator_alloc;
struct fetch_negotiator *negotiator;
- struct oidset gitmodules_oids = OIDSET_INIT;
negotiator = &negotiator_alloc;
fetch_negotiator_init(r, negotiator);
@@ -1129,14 +1113,17 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
if (args->deepen)
setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
NULL);
- else if (si->nr_ours || si->nr_theirs)
+ else if (si->nr_ours || si->nr_theirs) {
+ if (args->reject_shallow_remote)
+ die(_("source repository is shallow, reject to clone."));
alternate_shallow_file = setup_temporary_shallow(si->shallow);
- else
+ } else
alternate_shallow_file = NULL;
if (get_pack(args, fd, pack_lockfiles, NULL, sought, nr_sought,
- &gitmodules_oids))
+ &fsck_options.gitmodules_found))
die(_("git fetch-pack: fetch failed."));
- fsck_gitmodules_oids(&gitmodules_oids);
+ if (fsck_finish(&fsck_options))
+ die("fsck failed");
all_done:
if (negotiator)
@@ -1498,10 +1485,12 @@ static void receive_shallow_info(struct fetch_pack_args *args,
* rejected (unless --update-shallow is set); do the same.
*/
prepare_shallow_info(si, shallows);
- if (si->nr_ours || si->nr_theirs)
+ if (si->nr_ours || si->nr_theirs) {
+ if (args->reject_shallow_remote)
+ die(_("source repository is shallow, reject to clone."));
alternate_shallow_file =
setup_temporary_shallow(si->shallow);
- else
+ } else
alternate_shallow_file = NULL;
} else {
alternate_shallow_file = NULL;
@@ -1587,7 +1576,6 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
struct string_list packfile_uris = STRING_LIST_INIT_DUP;
int i;
struct strvec index_pack_args = STRVEC_INIT;
- struct oidset gitmodules_oids = OIDSET_INIT;
negotiator = &negotiator_alloc;
fetch_negotiator_init(r, negotiator);
@@ -1678,7 +1666,7 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
process_section_header(&reader, "packfile", 0);
if (get_pack(args, fd, pack_lockfiles,
packfile_uris.nr ? &index_pack_args : NULL,
- sought, nr_sought, &gitmodules_oids))
+ sought, nr_sought, &fsck_options.gitmodules_found))
die(_("git fetch-pack: fetch failed."));
do_check_stateless_delimiter(args, &reader);
@@ -1721,7 +1709,7 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
packname[the_hash_algo->hexsz] = '\0';
- parse_gitmodules_oids(cmd.out, &gitmodules_oids);
+ parse_gitmodules_oids(cmd.out, &fsck_options.gitmodules_found);
close(cmd.out);
@@ -1742,7 +1730,8 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
string_list_clear(&packfile_uris, 0);
strvec_clear(&index_pack_args);
- fsck_gitmodules_oids(&gitmodules_oids);
+ if (fsck_finish(&fsck_options))
+ die("fsck failed");
if (negotiator)
negotiator->release(negotiator);
diff --git a/fetch-pack.h b/fetch-pack.h
index 736a3da..f114d72 100644
--- a/fetch-pack.h
+++ b/fetch-pack.h
@@ -39,6 +39,7 @@ struct fetch_pack_args {
unsigned self_contained_and_connected:1;
unsigned cloning:1;
unsigned update_shallow:1;
+ unsigned reject_shallow_remote:1;
unsigned deepen:1;
/*
diff --git a/fsck.c b/fsck.c
index e3030f3..f5ed6a2 100644
--- a/fsck.c
+++ b/fsck.c
@@ -19,90 +19,19 @@
#include "credential.h"
#include "help.h"
-static struct oidset gitmodules_found = OIDSET_INIT;
-static struct oidset gitmodules_done = OIDSET_INIT;
-
-#define FSCK_FATAL -1
-#define FSCK_INFO -2
-
-#define FOREACH_MSG_ID(FUNC) \
- /* fatal errors */ \
- FUNC(NUL_IN_HEADER, FATAL) \
- FUNC(UNTERMINATED_HEADER, FATAL) \
- /* errors */ \
- FUNC(BAD_DATE, ERROR) \
- FUNC(BAD_DATE_OVERFLOW, ERROR) \
- FUNC(BAD_EMAIL, ERROR) \
- FUNC(BAD_NAME, ERROR) \
- FUNC(BAD_OBJECT_SHA1, ERROR) \
- FUNC(BAD_PARENT_SHA1, ERROR) \
- FUNC(BAD_TAG_OBJECT, ERROR) \
- FUNC(BAD_TIMEZONE, ERROR) \
- FUNC(BAD_TREE, ERROR) \
- FUNC(BAD_TREE_SHA1, ERROR) \
- FUNC(BAD_TYPE, ERROR) \
- FUNC(DUPLICATE_ENTRIES, ERROR) \
- FUNC(MISSING_AUTHOR, ERROR) \
- FUNC(MISSING_COMMITTER, ERROR) \
- FUNC(MISSING_EMAIL, ERROR) \
- FUNC(MISSING_NAME_BEFORE_EMAIL, ERROR) \
- FUNC(MISSING_OBJECT, ERROR) \
- FUNC(MISSING_SPACE_BEFORE_DATE, ERROR) \
- FUNC(MISSING_SPACE_BEFORE_EMAIL, ERROR) \
- FUNC(MISSING_TAG, ERROR) \
- FUNC(MISSING_TAG_ENTRY, ERROR) \
- FUNC(MISSING_TREE, ERROR) \
- FUNC(MISSING_TREE_OBJECT, ERROR) \
- FUNC(MISSING_TYPE, ERROR) \
- FUNC(MISSING_TYPE_ENTRY, ERROR) \
- FUNC(MULTIPLE_AUTHORS, ERROR) \
- FUNC(TREE_NOT_SORTED, ERROR) \
- FUNC(UNKNOWN_TYPE, ERROR) \
- FUNC(ZERO_PADDED_DATE, ERROR) \
- FUNC(GITMODULES_MISSING, ERROR) \
- FUNC(GITMODULES_BLOB, ERROR) \
- FUNC(GITMODULES_LARGE, ERROR) \
- FUNC(GITMODULES_NAME, ERROR) \
- FUNC(GITMODULES_SYMLINK, ERROR) \
- FUNC(GITMODULES_URL, ERROR) \
- FUNC(GITMODULES_PATH, ERROR) \
- FUNC(GITMODULES_UPDATE, ERROR) \
- /* warnings */ \
- FUNC(BAD_FILEMODE, WARN) \
- FUNC(EMPTY_NAME, WARN) \
- FUNC(FULL_PATHNAME, WARN) \
- FUNC(HAS_DOT, WARN) \
- FUNC(HAS_DOTDOT, WARN) \
- FUNC(HAS_DOTGIT, WARN) \
- FUNC(NULL_SHA1, WARN) \
- FUNC(ZERO_PADDED_FILEMODE, WARN) \
- FUNC(NUL_IN_COMMIT, WARN) \
- /* infos (reported as warnings, but ignored by default) */ \
- FUNC(GITMODULES_PARSE, INFO) \
- FUNC(BAD_TAG_NAME, INFO) \
- FUNC(MISSING_TAGGER_ENTRY, INFO) \
- /* ignored (elevated when requested) */ \
- FUNC(EXTRA_HEADER_ENTRY, IGNORE)
-
-#define MSG_ID(id, msg_type) FSCK_MSG_##id,
-enum fsck_msg_id {
- FOREACH_MSG_ID(MSG_ID)
- FSCK_MSG_MAX
-};
-#undef MSG_ID
-
#define STR(x) #x
#define MSG_ID(id, msg_type) { STR(id), NULL, NULL, FSCK_##msg_type },
static struct {
const char *id_string;
const char *downcased;
const char *camelcased;
- int msg_type;
+ enum fsck_msg_type msg_type;
} msg_id_info[FSCK_MSG_MAX + 1] = {
- FOREACH_MSG_ID(MSG_ID)
+ FOREACH_FSCK_MSG_ID(MSG_ID)
{ NULL, NULL, NULL, -1 }
};
#undef MSG_ID
+#undef STR
static void prepare_msg_ids(void)
{
@@ -164,25 +93,23 @@ void list_config_fsck_msg_ids(struct string_list *list, const char *prefix)
list_config_item(list, prefix, msg_id_info[i].camelcased);
}
-static int fsck_msg_type(enum fsck_msg_id msg_id,
+static enum fsck_msg_type fsck_msg_type(enum fsck_msg_id msg_id,
struct fsck_options *options)
{
- int msg_type;
-
assert(msg_id >= 0 && msg_id < FSCK_MSG_MAX);
- if (options->msg_type)
- msg_type = options->msg_type[msg_id];
- else {
- msg_type = msg_id_info[msg_id].msg_type;
+ if (!options->msg_type) {
+ enum fsck_msg_type msg_type = msg_id_info[msg_id].msg_type;
+
if (options->strict && msg_type == FSCK_WARN)
msg_type = FSCK_ERROR;
+ return msg_type;
}
- return msg_type;
+ return options->msg_type[msg_id];
}
-static int parse_msg_type(const char *str)
+static enum fsck_msg_type parse_msg_type(const char *str)
{
if (!strcmp(str, "error"))
return FSCK_ERROR;
@@ -202,28 +129,35 @@ int is_valid_msg_type(const char *msg_id, const char *msg_type)
return 1;
}
-void fsck_set_msg_type(struct fsck_options *options,
- const char *msg_id, const char *msg_type)
+void fsck_set_msg_type_from_ids(struct fsck_options *options,
+ enum fsck_msg_id msg_id,
+ enum fsck_msg_type msg_type)
{
- int id = parse_msg_id(msg_id), type;
-
- if (id < 0)
- die("Unhandled message id: %s", msg_id);
- type = parse_msg_type(msg_type);
-
- if (type != FSCK_ERROR && msg_id_info[id].msg_type == FSCK_FATAL)
- die("Cannot demote %s to %s", msg_id, msg_type);
-
if (!options->msg_type) {
int i;
- int *msg_type;
- ALLOC_ARRAY(msg_type, FSCK_MSG_MAX);
+ enum fsck_msg_type *severity;
+ ALLOC_ARRAY(severity, FSCK_MSG_MAX);
for (i = 0; i < FSCK_MSG_MAX; i++)
- msg_type[i] = fsck_msg_type(i, options);
- options->msg_type = msg_type;
+ severity[i] = fsck_msg_type(i, options);
+ options->msg_type = severity;
}
- options->msg_type[id] = type;
+ options->msg_type[msg_id] = msg_type;
+}
+
+void fsck_set_msg_type(struct fsck_options *options,
+ const char *msg_id_str, const char *msg_type_str)
+{
+ int msg_id = parse_msg_id(msg_id_str);
+ enum fsck_msg_type msg_type = parse_msg_type(msg_type_str);
+
+ if (msg_id < 0)
+ die("Unhandled message id: %s", msg_id_str);
+
+ if (msg_type != FSCK_ERROR && msg_id_info[msg_id].msg_type == FSCK_FATAL)
+ die("Cannot demote %s to %s", msg_id_str, msg_type_str);
+
+ fsck_set_msg_type_from_ids(options, msg_id, msg_type);
}
void fsck_set_msg_types(struct fsck_options *options, const char *values)
@@ -264,24 +198,6 @@ void fsck_set_msg_types(struct fsck_options *options, const char *values)
free(to_free);
}
-static void append_msg_id(struct strbuf *sb, const char *msg_id)
-{
- for (;;) {
- char c = *(msg_id)++;
-
- if (!c)
- break;
- if (c != '_')
- strbuf_addch(sb, tolower(c));
- else {
- assert(*msg_id);
- strbuf_addch(sb, *(msg_id)++);
- }
- }
-
- strbuf_addstr(sb, ": ");
-}
-
static int object_on_skiplist(struct fsck_options *opts,
const struct object_id *oid)
{
@@ -291,11 +207,12 @@ static int object_on_skiplist(struct fsck_options *opts,
__attribute__((format (printf, 5, 6)))
static int report(struct fsck_options *options,
const struct object_id *oid, enum object_type object_type,
- enum fsck_msg_id id, const char *fmt, ...)
+ enum fsck_msg_id msg_id, const char *fmt, ...)
{
va_list ap;
struct strbuf sb = STRBUF_INIT;
- int msg_type = fsck_msg_type(id, options), result;
+ enum fsck_msg_type msg_type = fsck_msg_type(msg_id, options);
+ int result;
if (msg_type == FSCK_IGNORE)
return 0;
@@ -308,12 +225,13 @@ static int report(struct fsck_options *options,
else if (msg_type == FSCK_INFO)
msg_type = FSCK_WARN;
- append_msg_id(&sb, msg_id_info[id].id_string);
+ prepare_msg_ids();
+ strbuf_addf(&sb, "%s: ", msg_id_info[msg_id].camelcased);
va_start(ap, fmt);
strbuf_vaddf(&sb, fmt, ap);
result = options->error_func(options, oid, object_type,
- msg_type, sb.buf);
+ msg_type, msg_id, sb.buf);
strbuf_release(&sb);
va_end(ap);
@@ -685,7 +603,7 @@ static int fsck_tree(const struct object_id *oid,
if (is_hfs_dotgitmodules(name) || is_ntfs_dotgitmodules(name)) {
if (!S_ISLNK(mode))
- oidset_insert(&gitmodules_found, oid);
+ oidset_insert(&options->gitmodules_found, oid);
else
retval += report(options,
oid, OBJ_TREE,
@@ -699,7 +617,7 @@ static int fsck_tree(const struct object_id *oid,
has_dotgit |= is_ntfs_dotgit(backslash);
if (is_ntfs_dotgitmodules(backslash)) {
if (!S_ISLNK(mode))
- oidset_insert(&gitmodules_found, oid);
+ oidset_insert(&options->gitmodules_found, oid);
else
retval += report(options, oid, OBJ_TREE,
FSCK_MSG_GITMODULES_SYMLINK,
@@ -1211,9 +1129,9 @@ static int fsck_blob(const struct object_id *oid, const char *buf,
struct fsck_gitmodules_data data;
struct config_options config_opts = { 0 };
- if (!oidset_contains(&gitmodules_found, oid))
+ if (!oidset_contains(&options->gitmodules_found, oid))
return 0;
- oidset_insert(&gitmodules_done, oid);
+ oidset_insert(&options->gitmodules_done, oid);
if (object_on_skiplist(options, oid))
return 0;
@@ -1266,7 +1184,9 @@ int fsck_object(struct object *obj, void *data, unsigned long size,
int fsck_error_function(struct fsck_options *o,
const struct object_id *oid,
enum object_type object_type,
- int msg_type, const char *message)
+ enum fsck_msg_type msg_type,
+ enum fsck_msg_id msg_id,
+ const char *message)
{
if (msg_type == FSCK_WARN) {
warning("object %s: %s", fsck_describe_object(o, oid), message);
@@ -1276,24 +1196,19 @@ int fsck_error_function(struct fsck_options *o,
return 1;
}
-void register_found_gitmodules(const struct object_id *oid)
-{
- oidset_insert(&gitmodules_found, oid);
-}
-
int fsck_finish(struct fsck_options *options)
{
int ret = 0;
struct oidset_iter iter;
const struct object_id *oid;
- oidset_iter_init(&gitmodules_found, &iter);
+ oidset_iter_init(&options->gitmodules_found, &iter);
while ((oid = oidset_iter_next(&iter))) {
enum object_type type;
unsigned long size;
char *buf;
- if (oidset_contains(&gitmodules_done, oid))
+ if (oidset_contains(&options->gitmodules_done, oid))
continue;
buf = read_object_file(oid, &type, &size);
@@ -1318,14 +1233,14 @@ int fsck_finish(struct fsck_options *options)
}
- oidset_clear(&gitmodules_found);
- oidset_clear(&gitmodules_done);
+ oidset_clear(&options->gitmodules_found);
+ oidset_clear(&options->gitmodules_done);
return ret;
}
-int fsck_config_internal(const char *var, const char *value, void *cb,
- struct fsck_options *options)
+int git_fsck_config(const char *var, const char *value, void *cb)
{
+ struct fsck_options *options = cb;
if (strcmp(var, "fsck.skiplist") == 0) {
const char *path;
struct strbuf sb = STRBUF_INIT;
@@ -1346,3 +1261,21 @@ int fsck_config_internal(const char *var, const char *value, void *cb,
return git_default_config(var, value, cb);
}
+
+/*
+ * Custom error callbacks that are used in more than one place.
+ */
+
+int fsck_error_cb_print_missing_gitmodules(struct fsck_options *o,
+ const struct object_id *oid,
+ enum object_type object_type,
+ enum fsck_msg_type msg_type,
+ enum fsck_msg_id msg_id,
+ const char *message)
+{
+ if (msg_id == FSCK_MSG_GITMODULES_MISSING) {
+ puts(oid_to_hex(oid));
+ return 0;
+ }
+ return fsck_error_function(o, oid, object_type, msg_type, msg_id, message);
+}
diff --git a/fsck.h b/fsck.h
index 733378f..7202c3c 100644
--- a/fsck.h
+++ b/fsck.h
@@ -3,15 +3,90 @@
#include "oidset.h"
-#define FSCK_ERROR 1
-#define FSCK_WARN 2
-#define FSCK_IGNORE 3
+enum fsck_msg_type {
+ /* for internal use only */
+ FSCK_IGNORE,
+ FSCK_INFO,
+ FSCK_FATAL,
+ /* "public", fed to e.g. error_func callbacks */
+ FSCK_ERROR,
+ FSCK_WARN,
+};
+
+#define FOREACH_FSCK_MSG_ID(FUNC) \
+ /* fatal errors */ \
+ FUNC(NUL_IN_HEADER, FATAL) \
+ FUNC(UNTERMINATED_HEADER, FATAL) \
+ /* errors */ \
+ FUNC(BAD_DATE, ERROR) \
+ FUNC(BAD_DATE_OVERFLOW, ERROR) \
+ FUNC(BAD_EMAIL, ERROR) \
+ FUNC(BAD_NAME, ERROR) \
+ FUNC(BAD_OBJECT_SHA1, ERROR) \
+ FUNC(BAD_PARENT_SHA1, ERROR) \
+ FUNC(BAD_TAG_OBJECT, ERROR) \
+ FUNC(BAD_TIMEZONE, ERROR) \
+ FUNC(BAD_TREE, ERROR) \
+ FUNC(BAD_TREE_SHA1, ERROR) \
+ FUNC(BAD_TYPE, ERROR) \
+ FUNC(DUPLICATE_ENTRIES, ERROR) \
+ FUNC(MISSING_AUTHOR, ERROR) \
+ FUNC(MISSING_COMMITTER, ERROR) \
+ FUNC(MISSING_EMAIL, ERROR) \
+ FUNC(MISSING_NAME_BEFORE_EMAIL, ERROR) \
+ FUNC(MISSING_OBJECT, ERROR) \
+ FUNC(MISSING_SPACE_BEFORE_DATE, ERROR) \
+ FUNC(MISSING_SPACE_BEFORE_EMAIL, ERROR) \
+ FUNC(MISSING_TAG, ERROR) \
+ FUNC(MISSING_TAG_ENTRY, ERROR) \
+ FUNC(MISSING_TREE, ERROR) \
+ FUNC(MISSING_TREE_OBJECT, ERROR) \
+ FUNC(MISSING_TYPE, ERROR) \
+ FUNC(MISSING_TYPE_ENTRY, ERROR) \
+ FUNC(MULTIPLE_AUTHORS, ERROR) \
+ FUNC(TREE_NOT_SORTED, ERROR) \
+ FUNC(UNKNOWN_TYPE, ERROR) \
+ FUNC(ZERO_PADDED_DATE, ERROR) \
+ FUNC(GITMODULES_MISSING, ERROR) \
+ FUNC(GITMODULES_BLOB, ERROR) \
+ FUNC(GITMODULES_LARGE, ERROR) \
+ FUNC(GITMODULES_NAME, ERROR) \
+ FUNC(GITMODULES_SYMLINK, ERROR) \
+ FUNC(GITMODULES_URL, ERROR) \
+ FUNC(GITMODULES_PATH, ERROR) \
+ FUNC(GITMODULES_UPDATE, ERROR) \
+ /* warnings */ \
+ FUNC(BAD_FILEMODE, WARN) \
+ FUNC(EMPTY_NAME, WARN) \
+ FUNC(FULL_PATHNAME, WARN) \
+ FUNC(HAS_DOT, WARN) \
+ FUNC(HAS_DOTDOT, WARN) \
+ FUNC(HAS_DOTGIT, WARN) \
+ FUNC(NULL_SHA1, WARN) \
+ FUNC(ZERO_PADDED_FILEMODE, WARN) \
+ FUNC(NUL_IN_COMMIT, WARN) \
+ /* infos (reported as warnings, but ignored by default) */ \
+ FUNC(GITMODULES_PARSE, INFO) \
+ FUNC(BAD_TAG_NAME, INFO) \
+ FUNC(MISSING_TAGGER_ENTRY, INFO) \
+ /* ignored (elevated when requested) */ \
+ FUNC(EXTRA_HEADER_ENTRY, IGNORE)
+
+#define MSG_ID(id, msg_type) FSCK_MSG_##id,
+enum fsck_msg_id {
+ FOREACH_FSCK_MSG_ID(MSG_ID)
+ FSCK_MSG_MAX
+};
+#undef MSG_ID
struct fsck_options;
struct object;
+void fsck_set_msg_type_from_ids(struct fsck_options *options,
+ enum fsck_msg_id msg_id,
+ enum fsck_msg_type msg_type);
void fsck_set_msg_type(struct fsck_options *options,
- const char *msg_id, const char *msg_type);
+ const char *msg_id, const char *msg_type);
void fsck_set_msg_types(struct fsck_options *options, const char *values);
int is_valid_msg_type(const char *msg_id, const char *msg_type);
@@ -23,28 +98,55 @@ int is_valid_msg_type(const char *msg_id, const char *msg_type);
* <0 error signaled and abort
* >0 error signaled and do not abort
*/
-typedef int (*fsck_walk_func)(struct object *obj, int type, void *data, struct fsck_options *options);
+typedef int (*fsck_walk_func)(struct object *obj, enum object_type object_type,
+ void *data, struct fsck_options *options);
/* callback for fsck_object, type is FSCK_ERROR or FSCK_WARN */
typedef int (*fsck_error)(struct fsck_options *o,
const struct object_id *oid, enum object_type object_type,
- int msg_type, const char *message);
+ enum fsck_msg_type msg_type, enum fsck_msg_id msg_id,
+ const char *message);
int fsck_error_function(struct fsck_options *o,
const struct object_id *oid, enum object_type object_type,
- int msg_type, const char *message);
+ enum fsck_msg_type msg_type, enum fsck_msg_id msg_id,
+ const char *message);
+int fsck_error_cb_print_missing_gitmodules(struct fsck_options *o,
+ const struct object_id *oid,
+ enum object_type object_type,
+ enum fsck_msg_type msg_type,
+ enum fsck_msg_id msg_id,
+ const char *message);
struct fsck_options {
fsck_walk_func walk;
fsck_error error_func;
unsigned strict:1;
- int *msg_type;
+ enum fsck_msg_type *msg_type;
struct oidset skiplist;
+ struct oidset gitmodules_found;
+ struct oidset gitmodules_done;
kh_oid_map_t *object_names;
};
-#define FSCK_OPTIONS_DEFAULT { NULL, fsck_error_function, 0, NULL, OIDSET_INIT }
-#define FSCK_OPTIONS_STRICT { NULL, fsck_error_function, 1, NULL, OIDSET_INIT }
+#define FSCK_OPTIONS_DEFAULT { \
+ .skiplist = OIDSET_INIT, \
+ .gitmodules_found = OIDSET_INIT, \
+ .gitmodules_done = OIDSET_INIT, \
+ .error_func = fsck_error_function \
+}
+#define FSCK_OPTIONS_STRICT { \
+ .strict = 1, \
+ .gitmodules_found = OIDSET_INIT, \
+ .gitmodules_done = OIDSET_INIT, \
+ .error_func = fsck_error_function, \
+}
+#define FSCK_OPTIONS_MISSING_GITMODULES { \
+ .strict = 1, \
+ .gitmodules_found = OIDSET_INIT, \
+ .gitmodules_done = OIDSET_INIT, \
+ .error_func = fsck_error_cb_print_missing_gitmodules, \
+}
/* descend in all linked child objects
* the return value is:
@@ -62,8 +164,6 @@ int fsck_walk(struct object *obj, void *data, struct fsck_options *options);
int fsck_object(struct object *obj, void *data, unsigned long size,
struct fsck_options *options);
-void register_found_gitmodules(const struct object_id *oid);
-
/*
* fsck a tag, and pass info about it back to the caller. This is
* exposed fsck_object() internals for git-mktag(1).
@@ -109,7 +209,6 @@ const char *fsck_describe_object(struct fsck_options *options,
* git_config() callback for use by fsck-y tools that want to support
* fsck.<msg> fsck.skipList etc.
*/
-int fsck_config_internal(const char *var, const char *value, void *cb,
- struct fsck_options *options);
+int git_fsck_config(const char *var, const char *value, void *cb);
#endif
diff --git a/git-compat-util.h b/git-compat-util.h
index 9ddf9d7..a508dbe 100644
--- a/git-compat-util.h
+++ b/git-compat-util.h
@@ -256,6 +256,11 @@ static inline const char *precompose_argv_prefix(int argc, const char **argv, co
{
return prefix;
}
+static inline const char *precompose_string_if_needed(const char *in)
+{
+ return in;
+}
+
#define probe_utf8_pathname_composition()
#endif
diff --git a/git.c b/git.c
index 9bc077a..b53e665 100644
--- a/git.c
+++ b/git.c
@@ -423,7 +423,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
int nongit_ok;
prefix = setup_git_directory_gently(&nongit_ok);
}
- prefix = precompose_argv_prefix(argc, argv, prefix);
+ precompose_argv_prefix(argc, argv, NULL);
if (use_pager == -1 && p->option & (RUN_SETUP | RUN_SETUP_GENTLY) &&
!(p->option & DELAY_PAGER_CONFIG))
use_pager = check_pager_config(p->cmd);
diff --git a/log-tree.c b/log-tree.c
index 4531ceb..f3178a6 100644
--- a/log-tree.c
+++ b/log-tree.c
@@ -369,8 +369,14 @@ void fmt_output_subject(struct strbuf *filename,
int start_len = filename->len;
int max_len = start_len + info->patch_name_max - (strlen(suffix) + 1);
- if (0 < info->reroll_count)
- strbuf_addf(filename, "v%d-", info->reroll_count);
+ if (info->reroll_count) {
+ struct strbuf temp = STRBUF_INIT;
+
+ strbuf_addf(&temp, "v%s", info->reroll_count);
+ format_sanitized_subject(filename, temp.buf, temp.len);
+ strbuf_addstr(filename, "-");
+ strbuf_release(&temp);
+ }
strbuf_addf(filename, "%04d-%s", nr, subject);
if (max_len < filename->len)
diff --git a/merge-ort.c b/merge-ort.c
index ba35600..5e118a8 100644
--- a/merge-ort.c
+++ b/merge-ort.c
@@ -51,6 +51,12 @@ enum merge_side {
MERGE_SIDE2 = 2
};
+struct traversal_callback_data {
+ unsigned long mask;
+ unsigned long dirmask;
+ struct name_entry names[3];
+};
+
struct rename_info {
/*
* All variables that are arrays of size 3 correspond to data tracked
@@ -89,6 +95,44 @@ struct rename_info {
struct strmap dir_renames[3];
/*
+ * relevant_sources: deleted paths for which we need rename detection
+ *
+ * relevant_sources is a set of deleted paths on each side of
+ * history for which we need rename detection. If a path is deleted
+ * on one side of history, we need to detect if it is part of a
+ * rename if either
+ * * we need to detect renames for an ancestor directory
+ * * the file is modified/deleted on the other side of history
+ * If neither of those are true, we can skip rename detection for
+ * that path.
+ */
+ struct strset relevant_sources[3];
+
+ /*
+ * dir_rename_mask:
+ * 0: optimization removing unmodified potential rename source okay
+ * 2 or 4: optimization okay, but must check for files added to dir
+ * 7: optimization forbidden; need rename source in case of dir rename
+ */
+ unsigned dir_rename_mask:3;
+
+ /*
+ * callback_data_*: supporting data structures for alternate traversal
+ *
+ * We sometimes need to be able to traverse through all the files
+ * in a given tree before all immediate subdirectories within that
+ * tree. Since traverse_trees() doesn't do that naturally, we have
+ * a traverse_trees_wrapper() that stores any immediate
+ * subdirectories while traversing files, then traverses the
+ * immediate subdirectories later. These callback_data* variables
+ * store the information for the subdirectories so that we can do
+ * that traversal order.
+ */
+ struct traversal_callback_data *callback_data;
+ int callback_data_nr, callback_data_alloc;
+ char *callback_data_traverse_path;
+
+ /*
* needed_limit: value needed for inexact rename detection to run
*
* If the current rename limit wasn't high enough for inexact
@@ -358,6 +402,8 @@ static void clear_or_reinit_internal_opts(struct merge_options_internal *opti,
strmap_clear(&renames->dir_rename_count[i], 1);
strmap_func(&renames->dir_renames[i], 0);
+
+ strset_func(&renames->relevant_sources[i]);
}
if (!reinitialize) {
@@ -380,6 +426,12 @@ static void clear_or_reinit_internal_opts(struct merge_options_internal *opti,
}
strmap_clear(&opti->output, 0);
}
+
+ renames->dir_rename_mask = 0;
+
+ /* Clean out callback_data as well. */
+ FREE_AND_NULL(renames->callback_data);
+ renames->callback_data_nr = renames->callback_data_alloc = 0;
}
static int err(struct merge_options *opt, const char *err, ...)
@@ -470,6 +522,82 @@ static char *unique_path(struct strmap *existing_paths,
/*** Function Grouping: functions related to collect_merge_info() ***/
+static int traverse_trees_wrapper_callback(int n,
+ unsigned long mask,
+ unsigned long dirmask,
+ struct name_entry *names,
+ struct traverse_info *info)
+{
+ struct merge_options *opt = info->data;
+ struct rename_info *renames = &opt->priv->renames;
+ unsigned filemask = mask & ~dirmask;
+
+ assert(n==3);
+
+ if (!renames->callback_data_traverse_path)
+ renames->callback_data_traverse_path = xstrdup(info->traverse_path);
+
+ if (filemask && filemask == renames->dir_rename_mask)
+ renames->dir_rename_mask = 0x07;
+
+ ALLOC_GROW(renames->callback_data, renames->callback_data_nr + 1,
+ renames->callback_data_alloc);
+ renames->callback_data[renames->callback_data_nr].mask = mask;
+ renames->callback_data[renames->callback_data_nr].dirmask = dirmask;
+ COPY_ARRAY(renames->callback_data[renames->callback_data_nr].names,
+ names, 3);
+ renames->callback_data_nr++;
+
+ return mask;
+}
+
+/*
+ * Much like traverse_trees(), BUT:
+ * - read all the tree entries FIRST, saving them
+ * - note that the above step provides an opportunity to compute necessary
+ * additional details before the "real" traversal
+ * - loop through the saved entries and call the original callback on them
+ */
+static int traverse_trees_wrapper(struct index_state *istate,
+ int n,
+ struct tree_desc *t,
+ struct traverse_info *info)
+{
+ int ret, i, old_offset;
+ traverse_callback_t old_fn;
+ char *old_callback_data_traverse_path;
+ struct merge_options *opt = info->data;
+ struct rename_info *renames = &opt->priv->renames;
+
+ assert(renames->dir_rename_mask == 2 || renames->dir_rename_mask == 4);
+
+ old_callback_data_traverse_path = renames->callback_data_traverse_path;
+ old_fn = info->fn;
+ old_offset = renames->callback_data_nr;
+
+ renames->callback_data_traverse_path = NULL;
+ info->fn = traverse_trees_wrapper_callback;
+ ret = traverse_trees(istate, n, t, info);
+ if (ret < 0)
+ return ret;
+
+ info->traverse_path = renames->callback_data_traverse_path;
+ info->fn = old_fn;
+ for (i = old_offset; i < renames->callback_data_nr; ++i) {
+ info->fn(n,
+ renames->callback_data[i].mask,
+ renames->callback_data[i].dirmask,
+ renames->callback_data[i].names,
+ info);
+ }
+
+ renames->callback_data_nr = old_offset;
+ free(renames->callback_data_traverse_path);
+ renames->callback_data_traverse_path = old_callback_data_traverse_path;
+ info->traverse_path = NULL;
+ return 0;
+}
+
static void setup_path_info(struct merge_options *opt,
struct string_list_item *result,
const char *current_dir_name,
@@ -533,12 +661,22 @@ static void add_pair(struct merge_options *opt,
struct name_entry *names,
const char *pathname,
unsigned side,
- unsigned is_add /* if false, is_delete */)
+ unsigned is_add /* if false, is_delete */,
+ unsigned match_mask,
+ unsigned dir_rename_mask)
{
struct diff_filespec *one, *two;
struct rename_info *renames = &opt->priv->renames;
int names_idx = is_add ? side : 0;
+ if (!is_add) {
+ unsigned content_relevant = (match_mask == 0);
+ unsigned location_relevant = (dir_rename_mask == 0x07);
+
+ if (content_relevant || location_relevant)
+ strset_add(&renames->relevant_sources[side], pathname);
+ }
+
one = alloc_filespec(pathname);
two = alloc_filespec(pathname);
fill_filespec(is_add ? two : one,
@@ -557,6 +695,36 @@ static void collect_rename_info(struct merge_options *opt,
struct rename_info *renames = &opt->priv->renames;
unsigned side;
+ /*
+ * Update dir_rename_mask (determines ignore-rename-source validity)
+ *
+ * dir_rename_mask helps us keep track of when directory rename
+ * detection may be relevant. Basically, whenver a directory is
+ * removed on one side of history, and a file is added to that
+ * directory on the other side of history, directory rename
+ * detection is relevant (meaning we have to detect renames for all
+ * files within that directory to deduce where the directory
+ * moved). Also, whenever a directory needs directory rename
+ * detection, due to the "majority rules" choice for where to move
+ * it (see t6423 testcase 1f), we also need to detect renames for
+ * all files within subdirectories of that directory as well.
+ *
+ * Here we haven't looked at files within the directory yet, we are
+ * just looking at the directory itself. So, if we aren't yet in
+ * a case where a parent directory needed directory rename detection
+ * (i.e. dir_rename_mask != 0x07), and if the directory was removed
+ * on one side of history, record the mask of the other side of
+ * history in dir_rename_mask.
+ */
+ if (renames->dir_rename_mask != 0x07 &&
+ (dirmask == 3 || dirmask == 5)) {
+ /* simple sanity check */
+ assert(renames->dir_rename_mask == 0 ||
+ renames->dir_rename_mask == (dirmask & ~1));
+ /* update dir_rename_mask; have it record mask of new side */
+ renames->dir_rename_mask = (dirmask & ~1);
+ }
+
/* Update dirs_removed, as needed */
if (dirmask == 1 || dirmask == 3 || dirmask == 5) {
/* absent_mask = 0x07 - dirmask; sides = absent_mask/2 */
@@ -575,11 +743,15 @@ static void collect_rename_info(struct merge_options *opt,
/* Check for deletion on side */
if ((filemask & 1) && !(filemask & side_mask))
- add_pair(opt, names, fullname, side, 0 /* delete */);
+ add_pair(opt, names, fullname, side, 0 /* delete */,
+ match_mask & filemask,
+ renames->dir_rename_mask);
/* Check for addition on side */
if (!(filemask & 1) && (filemask & side_mask))
- add_pair(opt, names, fullname, side, 1 /* add */);
+ add_pair(opt, names, fullname, side, 1 /* add */,
+ match_mask & filemask,
+ renames->dir_rename_mask);
}
}
@@ -597,12 +769,14 @@ static int collect_merge_info_callback(int n,
*/
struct merge_options *opt = info->data;
struct merge_options_internal *opti = opt->priv;
+ struct rename_info *renames = &opt->priv->renames;
struct string_list_item pi; /* Path Info */
struct conflict_info *ci; /* typed alias to pi.util (which is void*) */
struct name_entry *p;
size_t len;
char *fullpath;
const char *dirname = opti->current_dir_name;
+ unsigned prev_dir_rename_mask = renames->dir_rename_mask;
unsigned filemask = mask & ~dirmask;
unsigned match_mask = 0; /* will be updated below */
unsigned mbase_null = !(mask & 1);
@@ -743,8 +917,13 @@ static int collect_merge_info_callback(int n,
original_dir_name = opti->current_dir_name;
opti->current_dir_name = pi.string;
- ret = traverse_trees(NULL, 3, t, &newinfo);
+ if (renames->dir_rename_mask == 0 ||
+ renames->dir_rename_mask == 0x07)
+ ret = traverse_trees(NULL, 3, t, &newinfo);
+ else
+ ret = traverse_trees_wrapper(NULL, 3, t, &newinfo);
opti->current_dir_name = original_dir_name;
+ renames->dir_rename_mask = prev_dir_rename_mask;
for (i = MERGE_BASE; i <= MERGE_SIDE2; i++)
free(buf[i]);
@@ -1977,6 +2156,19 @@ static int process_renames(struct merge_options *opt,
return clean_merge;
}
+static inline int possible_side_renames(struct rename_info *renames,
+ unsigned side_index)
+{
+ return renames->pairs[side_index].nr > 0 &&
+ !strset_empty(&renames->relevant_sources[side_index]);
+}
+
+static inline int possible_renames(struct rename_info *renames)
+{
+ return possible_side_renames(renames, 1) ||
+ possible_side_renames(renames, 2);
+}
+
static void resolve_diffpair_statuses(struct diff_queue_struct *q)
{
/*
@@ -2013,6 +2205,16 @@ static void detect_regular_renames(struct merge_options *opt,
struct diff_options diff_opts;
struct rename_info *renames = &opt->priv->renames;
+ if (!possible_side_renames(renames, side_index)) {
+ /*
+ * No rename detection needed for this side, but we still need
+ * to make sure 'adds' are marked correctly in case the other
+ * side had directory renames.
+ */
+ resolve_diffpair_statuses(&renames->pairs[side_index]);
+ return;
+ }
+
repo_diff_setup(opt->repo, &diff_opts);
diff_opts.flags.recursive = 1;
diff_opts.flags.rename_empty = 0;
@@ -2028,6 +2230,7 @@ static void detect_regular_renames(struct merge_options *opt,
diff_queued_diff = renames->pairs[side_index];
trace2_region_enter("diff", "diffcore_rename", opt->repo);
diffcore_rename_extended(&diff_opts,
+ &renames->relevant_sources[side_index],
&renames->dirs_removed[side_index],
&renames->dir_rename_count[side_index]);
trace2_region_leave("diff", "diffcore_rename", opt->repo);
@@ -2129,6 +2332,8 @@ static int detect_and_process_renames(struct merge_options *opt,
int need_dir_renames, s, clean = 1;
memset(&combined, 0, sizeof(combined));
+ if (!possible_renames(renames))
+ goto cleanup;
trace2_region_enter("merge", "regular renames", opt->repo);
detect_regular_renames(opt, MERGE_SIDE1);
@@ -2163,6 +2368,25 @@ static int detect_and_process_renames(struct merge_options *opt,
clean &= process_renames(opt, &combined);
trace2_region_leave("merge", "process renames", opt->repo);
+ goto simple_cleanup; /* collect_renames() handles some of cleanup */
+
+cleanup:
+ /*
+ * Free now unneeded filepairs, which would have been handled
+ * in collect_renames() normally but we skipped that code.
+ */
+ for (s = MERGE_SIDE1; s <= MERGE_SIDE2; s++) {
+ struct diff_queue_struct *side_pairs;
+ int i;
+
+ side_pairs = &renames->pairs[s];
+ for (i = 0; i < side_pairs->nr; ++i) {
+ struct diff_filepair *p = side_pairs->queue[i];
+ diff_free_filepair(p);
+ }
+ }
+
+simple_cleanup:
/* Free memory for renames->pairs[] and combined */
for (s = MERGE_SIDE1; s <= MERGE_SIDE2; s++) {
free(renames->pairs[s].queue);
@@ -3226,6 +3450,8 @@ static void merge_start(struct merge_options *opt, struct merge_result *result)
NULL, 1);
strmap_init_with_options(&renames->dir_renames[i],
NULL, 0);
+ strset_init_with_options(&renames->relevant_sources[i],
+ NULL, 0);
}
/*
diff --git a/midx.c b/midx.c
index becfafe..9e86583 100644
--- a/midx.c
+++ b/midx.c
@@ -12,6 +12,7 @@
#include "run-command.h"
#include "repository.h"
#include "chunk-format.h"
+#include "pack.h"
#define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */
#define MIDX_VERSION 1
@@ -47,11 +48,22 @@ static uint8_t oid_version(void)
}
}
+static const unsigned char *get_midx_checksum(struct multi_pack_index *m)
+{
+ return m->data + m->data_len - the_hash_algo->rawsz;
+}
+
static char *get_midx_filename(const char *object_dir)
{
return xstrfmt("%s/pack/multi-pack-index", object_dir);
}
+char *get_midx_rev_filename(struct multi_pack_index *m)
+{
+ return xstrfmt("%s/pack/multi-pack-index-%s.rev",
+ m->object_dir, hash_to_hex(get_midx_checksum(m)));
+}
+
static int midx_read_oid_fanout(const unsigned char *chunk_start,
size_t chunk_size, void *data)
{
@@ -239,7 +251,7 @@ struct object_id *nth_midxed_object_oid(struct object_id *oid,
return oid;
}
-static off_t nth_midxed_offset(struct multi_pack_index *m, uint32_t pos)
+off_t nth_midxed_offset(struct multi_pack_index *m, uint32_t pos)
{
const unsigned char *offset_data;
uint32_t offset32;
@@ -258,7 +270,7 @@ static off_t nth_midxed_offset(struct multi_pack_index *m, uint32_t pos)
return offset32;
}
-static uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos)
+uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos)
{
return get_be32(m->chunk_object_offsets +
(off_t)pos * MIDX_CHUNK_OFFSET_WIDTH);
@@ -431,6 +443,14 @@ static int pack_info_compare(const void *_a, const void *_b)
return strcmp(a->pack_name, b->pack_name);
}
+static int idx_or_pack_name_cmp(const void *_va, const void *_vb)
+{
+ const char *pack_name = _va;
+ const struct pack_info *compar = _vb;
+
+ return cmp_idx_or_pack_name(pack_name, compar->pack_name);
+}
+
struct write_midx_context {
struct pack_info *info;
uint32_t nr;
@@ -443,8 +463,11 @@ struct write_midx_context {
uint32_t entries_nr;
uint32_t *pack_perm;
+ uint32_t *pack_order;
unsigned large_offsets_needed:1;
uint32_t num_large_offsets;
+
+ int preferred_pack_idx;
};
static void add_pack_to_midx(const char *full_path, size_t full_path_len,
@@ -489,6 +512,7 @@ struct pack_midx_entry {
uint32_t pack_int_id;
time_t pack_mtime;
uint64_t offset;
+ unsigned preferred : 1;
};
static int midx_oid_compare(const void *_a, const void *_b)
@@ -500,6 +524,12 @@ static int midx_oid_compare(const void *_a, const void *_b)
if (cmp)
return cmp;
+ /* Sort objects in a preferred pack first when multiple copies exist. */
+ if (a->preferred > b->preferred)
+ return -1;
+ if (a->preferred < b->preferred)
+ return 1;
+
if (a->pack_mtime > b->pack_mtime)
return -1;
else if (a->pack_mtime < b->pack_mtime)
@@ -527,7 +557,8 @@ static int nth_midxed_pack_midx_entry(struct multi_pack_index *m,
static void fill_pack_entry(uint32_t pack_int_id,
struct packed_git *p,
uint32_t cur_object,
- struct pack_midx_entry *entry)
+ struct pack_midx_entry *entry,
+ int preferred)
{
if (nth_packed_object_id(&entry->oid, p, cur_object) < 0)
die(_("failed to locate object %d in packfile"), cur_object);
@@ -536,6 +567,7 @@ static void fill_pack_entry(uint32_t pack_int_id,
entry->pack_mtime = p->mtime;
entry->offset = nth_packed_object_offset(p, cur_object);
+ entry->preferred = !!preferred;
}
/*
@@ -552,7 +584,8 @@ static void fill_pack_entry(uint32_t pack_int_id,
static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
struct pack_info *info,
uint32_t nr_packs,
- uint32_t *nr_objects)
+ uint32_t *nr_objects,
+ int preferred_pack)
{
uint32_t cur_fanout, cur_pack, cur_object;
uint32_t alloc_fanout, alloc_objects, total_objects = 0;
@@ -589,12 +622,17 @@ static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
nth_midxed_pack_midx_entry(m,
&entries_by_fanout[nr_fanout],
cur_object);
+ if (nth_midxed_pack_int_id(m, cur_object) == preferred_pack)
+ entries_by_fanout[nr_fanout].preferred = 1;
+ else
+ entries_by_fanout[nr_fanout].preferred = 0;
nr_fanout++;
}
}
for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++) {
uint32_t start = 0, end;
+ int preferred = cur_pack == preferred_pack;
if (cur_fanout)
start = get_pack_fanout(info[cur_pack].p, cur_fanout - 1);
@@ -602,7 +640,11 @@ static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
for (cur_object = start; cur_object < end; cur_object++) {
ALLOC_GROW(entries_by_fanout, nr_fanout + 1, alloc_fanout);
- fill_pack_entry(cur_pack, info[cur_pack].p, cur_object, &entries_by_fanout[nr_fanout]);
+ fill_pack_entry(cur_pack,
+ info[cur_pack].p,
+ cur_object,
+ &entries_by_fanout[nr_fanout],
+ preferred);
nr_fanout++;
}
}
@@ -776,10 +818,80 @@ static int write_midx_large_offsets(struct hashfile *f,
return 0;
}
+struct midx_pack_order_data {
+ uint32_t nr;
+ uint32_t pack;
+ off_t offset;
+};
+
+static int midx_pack_order_cmp(const void *va, const void *vb)
+{
+ const struct midx_pack_order_data *a = va, *b = vb;
+ if (a->pack < b->pack)
+ return -1;
+ else if (a->pack > b->pack)
+ return 1;
+ else if (a->offset < b->offset)
+ return -1;
+ else if (a->offset > b->offset)
+ return 1;
+ else
+ return 0;
+}
+
+static uint32_t *midx_pack_order(struct write_midx_context *ctx)
+{
+ struct midx_pack_order_data *data;
+ uint32_t *pack_order;
+ uint32_t i;
+
+ ALLOC_ARRAY(data, ctx->entries_nr);
+ for (i = 0; i < ctx->entries_nr; i++) {
+ struct pack_midx_entry *e = &ctx->entries[i];
+ data[i].nr = i;
+ data[i].pack = ctx->pack_perm[e->pack_int_id];
+ if (!e->preferred)
+ data[i].pack |= (1U << 31);
+ data[i].offset = e->offset;
+ }
+
+ QSORT(data, ctx->entries_nr, midx_pack_order_cmp);
+
+ ALLOC_ARRAY(pack_order, ctx->entries_nr);
+ for (i = 0; i < ctx->entries_nr; i++)
+ pack_order[i] = data[i].nr;
+ free(data);
+
+ return pack_order;
+}
+
+static void write_midx_reverse_index(char *midx_name, unsigned char *midx_hash,
+ struct write_midx_context *ctx)
+{
+ struct strbuf buf = STRBUF_INIT;
+ const char *tmp_file;
+
+ strbuf_addf(&buf, "%s-%s.rev", midx_name, hash_to_hex(midx_hash));
+
+ tmp_file = write_rev_file_order(NULL, ctx->pack_order, ctx->entries_nr,
+ midx_hash, WRITE_REV);
+
+ if (finalize_object_file(tmp_file, buf.buf))
+ die(_("cannot store reverse index file"));
+
+ strbuf_release(&buf);
+}
+
+static void clear_midx_files_ext(struct repository *r, const char *ext,
+ unsigned char *keep_hash);
+
static int write_midx_internal(const char *object_dir, struct multi_pack_index *m,
- struct string_list *packs_to_drop, unsigned flags)
+ struct string_list *packs_to_drop,
+ const char *preferred_pack_name,
+ unsigned flags)
{
char *midx_name;
+ unsigned char midx_hash[GIT_MAX_RAWSZ];
uint32_t i;
struct hashfile *f = NULL;
struct lock_file lk;
@@ -828,7 +940,19 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
if (ctx.m && ctx.nr == ctx.m->num_packs && !packs_to_drop)
goto cleanup;
- ctx.entries = get_sorted_entries(ctx.m, ctx.info, ctx.nr, &ctx.entries_nr);
+ ctx.preferred_pack_idx = -1;
+ if (preferred_pack_name) {
+ for (i = 0; i < ctx.nr; i++) {
+ if (!cmp_idx_or_pack_name(preferred_pack_name,
+ ctx.info[i].pack_name)) {
+ ctx.preferred_pack_idx = i;
+ break;
+ }
+ }
+ }
+
+ ctx.entries = get_sorted_entries(ctx.m, ctx.info, ctx.nr, &ctx.entries_nr,
+ ctx.preferred_pack_idx);
ctx.large_offsets_needed = 0;
for (i = 0; i < ctx.entries_nr; i++) {
@@ -889,13 +1013,30 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
pack_name_concat_len += strlen(ctx.info[i].pack_name) + 1;
}
+ /* Check that the preferred pack wasn't expired (if given). */
+ if (preferred_pack_name) {
+ struct pack_info *preferred = bsearch(preferred_pack_name,
+ ctx.info, ctx.nr,
+ sizeof(*ctx.info),
+ idx_or_pack_name_cmp);
+
+ if (!preferred)
+ warning(_("unknown preferred pack: '%s'"),
+ preferred_pack_name);
+ else {
+ uint32_t perm = ctx.pack_perm[preferred->orig_pack_int_id];
+ if (perm == PACK_EXPIRED)
+ warning(_("preferred pack '%s' is expired"),
+ preferred_pack_name);
+ }
+ }
+
if (pack_name_concat_len % MIDX_CHUNK_ALIGNMENT)
pack_name_concat_len += MIDX_CHUNK_ALIGNMENT -
(pack_name_concat_len % MIDX_CHUNK_ALIGNMENT);
hold_lock_file_for_update(&lk, midx_name, LOCK_DIE_ON_ERROR);
f = hashfd(get_lock_file_fd(&lk), get_lock_file_path(&lk));
- FREE_AND_NULL(midx_name);
if (ctx.m)
close_midx(ctx.m);
@@ -927,8 +1068,16 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
write_midx_header(f, get_num_chunks(cf), ctx.nr - dropped_packs);
write_chunkfile(cf, &ctx);
- finalize_hashfile(f, NULL, CSUM_FSYNC | CSUM_HASH_IN_STREAM);
+ finalize_hashfile(f, midx_hash, CSUM_FSYNC | CSUM_HASH_IN_STREAM);
free_chunkfile(cf);
+
+ if (flags & MIDX_WRITE_REV_INDEX)
+ ctx.pack_order = midx_pack_order(&ctx);
+
+ if (flags & MIDX_WRITE_REV_INDEX)
+ write_midx_reverse_index(midx_name, midx_hash, &ctx);
+ clear_midx_files_ext(the_repository, ".rev", midx_hash);
+
commit_lock_file(&lk);
cleanup:
@@ -943,13 +1092,55 @@ cleanup:
free(ctx.info);
free(ctx.entries);
free(ctx.pack_perm);
+ free(ctx.pack_order);
free(midx_name);
return result;
}
-int write_midx_file(const char *object_dir, unsigned flags)
+int write_midx_file(const char *object_dir,
+ const char *preferred_pack_name,
+ unsigned flags)
{
- return write_midx_internal(object_dir, NULL, NULL, flags);
+ return write_midx_internal(object_dir, NULL, NULL, preferred_pack_name,
+ flags);
+}
+
+struct clear_midx_data {
+ char *keep;
+ const char *ext;
+};
+
+static void clear_midx_file_ext(const char *full_path, size_t full_path_len,
+ const char *file_name, void *_data)
+{
+ struct clear_midx_data *data = _data;
+
+ if (!(starts_with(file_name, "multi-pack-index-") &&
+ ends_with(file_name, data->ext)))
+ return;
+ if (data->keep && !strcmp(data->keep, file_name))
+ return;
+
+ if (unlink(full_path))
+ die_errno(_("failed to remove %s"), full_path);
+}
+
+static void clear_midx_files_ext(struct repository *r, const char *ext,
+ unsigned char *keep_hash)
+{
+ struct clear_midx_data data;
+ memset(&data, 0, sizeof(struct clear_midx_data));
+
+ if (keep_hash)
+ data.keep = xstrfmt("multi-pack-index-%s%s",
+ hash_to_hex(keep_hash), ext);
+ data.ext = ext;
+
+ for_each_file_in_pack_dir(r->objects->odb->path,
+ clear_midx_file_ext,
+ &data);
+
+ free(data.keep);
}
void clear_midx_file(struct repository *r)
@@ -964,6 +1155,8 @@ void clear_midx_file(struct repository *r)
if (remove_path(midx))
die(_("failed to clear multi-pack-index at %s"), midx);
+ clear_midx_files_ext(r, ".rev", NULL);
+
free(midx);
}
@@ -1184,7 +1377,7 @@ int expire_midx_packs(struct repository *r, const char *object_dir, unsigned fla
free(count);
if (packs_to_drop.nr)
- result = write_midx_internal(object_dir, m, &packs_to_drop, flags);
+ result = write_midx_internal(object_dir, m, &packs_to_drop, NULL, flags);
string_list_clear(&packs_to_drop, 0);
return result;
@@ -1373,7 +1566,7 @@ int midx_repack(struct repository *r, const char *object_dir, size_t batch_size,
goto cleanup;
}
- result = write_midx_internal(object_dir, m, NULL, flags);
+ result = write_midx_internal(object_dir, m, NULL, NULL, flags);
m = NULL;
cleanup:
diff --git a/midx.h b/midx.h
index b18cf53..8684cf0 100644
--- a/midx.h
+++ b/midx.h
@@ -15,6 +15,10 @@ struct multi_pack_index {
const unsigned char *data;
size_t data_len;
+ const uint32_t *revindex_data;
+ const uint32_t *revindex_map;
+ size_t revindex_len;
+
uint32_t signature;
unsigned char version;
unsigned char hash_len;
@@ -36,10 +40,15 @@ struct multi_pack_index {
};
#define MIDX_PROGRESS (1 << 0)
+#define MIDX_WRITE_REV_INDEX (1 << 1)
+
+char *get_midx_rev_filename(struct multi_pack_index *m);
struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local);
int prepare_midx_pack(struct repository *r, struct multi_pack_index *m, uint32_t pack_int_id);
int bsearch_midx(const struct object_id *oid, struct multi_pack_index *m, uint32_t *result);
+off_t nth_midxed_offset(struct multi_pack_index *m, uint32_t pos);
+uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos);
struct object_id *nth_midxed_object_oid(struct object_id *oid,
struct multi_pack_index *m,
uint32_t n);
@@ -47,7 +56,7 @@ int fill_midx_entry(struct repository *r, const struct object_id *oid, struct pa
int midx_contains_pack(struct multi_pack_index *m, const char *idx_or_pack_name);
int prepare_multi_pack_index_one(struct repository *r, const char *object_dir, int local);
-int write_midx_file(const char *object_dir, unsigned flags);
+int write_midx_file(const char *object_dir, const char *preferred_pack_name, unsigned flags);
void clear_midx_file(struct repository *r);
int verify_midx_file(struct repository *r, const char *object_dir, unsigned flags);
int expire_midx_packs(struct repository *r, const char *object_dir, unsigned flags);
diff --git a/pack-bitmap.c b/pack-bitmap.c
index 1ebe0c8..3ed1543 100644
--- a/pack-bitmap.c
+++ b/pack-bitmap.c
@@ -13,6 +13,7 @@
#include "repository.h"
#include "object-store.h"
#include "list-objects-filter-options.h"
+#include "config.h"
/*
* An entry on the bitmap index, representing the bitmap for a given
@@ -997,6 +998,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
object_list_insert(object, &wants);
object = parse_object_or_die(get_tagged_oid(tag), NULL);
+ object->flags |= (tag->object.flags & UNINTERESTING);
}
if (object->flags & UNINTERESTING)
@@ -1350,6 +1352,24 @@ void test_bitmap_walk(struct rev_info *revs)
free_bitmap_index(bitmap_git);
}
+int test_bitmap_commits(struct repository *r)
+{
+ struct bitmap_index *bitmap_git = prepare_bitmap_git(r);
+ struct object_id oid;
+ MAYBE_UNUSED void *value;
+
+ if (!bitmap_git)
+ die("failed to load bitmap indexes");
+
+ kh_foreach(bitmap_git->bitmaps, oid, value, {
+ printf("%s\n", oid_to_hex(&oid));
+ });
+
+ free_bitmap_index(bitmap_git);
+
+ return 0;
+}
+
int rebuild_bitmap(const uint32_t *reposition,
struct ewah_bitmap *source,
struct bitmap *dest)
@@ -1511,3 +1531,8 @@ off_t get_disk_usage_from_bitmap(struct bitmap_index *bitmap_git,
return total;
}
+
+const struct string_list *bitmap_preferred_tips(struct repository *r)
+{
+ return repo_config_get_value_multi(r, "pack.preferbitmaptips");
+}
diff --git a/pack-bitmap.h b/pack-bitmap.h
index 36d9993..78f2b3f 100644
--- a/pack-bitmap.h
+++ b/pack-bitmap.h
@@ -5,6 +5,7 @@
#include "khash.h"
#include "pack.h"
#include "pack-objects.h"
+#include "string-list.h"
struct commit;
struct repository;
@@ -49,6 +50,7 @@ void traverse_bitmap_commit_list(struct bitmap_index *,
struct rev_info *revs,
show_reachable_fn show_reachable);
void test_bitmap_walk(struct rev_info *revs);
+int test_bitmap_commits(struct repository *r);
struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
struct list_objects_filter_options *filter);
int reuse_partial_packfile_from_bitmap(struct bitmap_index *,
@@ -90,4 +92,6 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
const char *filename,
uint16_t options);
+const struct string_list *bitmap_preferred_tips(struct repository *r);
+
#endif
diff --git a/pack-revindex.c b/pack-revindex.c
index 4262530..0e4a31d 100644
--- a/pack-revindex.c
+++ b/pack-revindex.c
@@ -3,6 +3,7 @@
#include "object-store.h"
#include "packfile.h"
#include "config.h"
+#include "midx.h"
struct revindex_entry {
off_t offset;
@@ -293,6 +294,43 @@ int load_pack_revindex(struct packed_git *p)
return -1;
}
+int load_midx_revindex(struct multi_pack_index *m)
+{
+ char *revindex_name;
+ int ret;
+ if (m->revindex_data)
+ return 0;
+
+ revindex_name = get_midx_rev_filename(m);
+
+ ret = load_revindex_from_disk(revindex_name,
+ m->num_objects,
+ &m->revindex_map,
+ &m->revindex_len);
+ if (ret)
+ goto cleanup;
+
+ m->revindex_data = (const uint32_t *)((const char *)m->revindex_map + RIDX_HEADER_SIZE);
+
+cleanup:
+ free(revindex_name);
+ return ret;
+}
+
+int close_midx_revindex(struct multi_pack_index *m)
+{
+ if (!m || !m->revindex_map)
+ return 0;
+
+ munmap((void*)m->revindex_map, m->revindex_len);
+
+ m->revindex_map = NULL;
+ m->revindex_data = NULL;
+ m->revindex_len = 0;
+
+ return 0;
+}
+
int offset_to_pack_pos(struct packed_git *p, off_t ofs, uint32_t *pos)
{
unsigned lo, hi;
@@ -347,3 +385,91 @@ off_t pack_pos_to_offset(struct packed_git *p, uint32_t pos)
else
return nth_packed_object_offset(p, pack_pos_to_index(p, pos));
}
+
+uint32_t pack_pos_to_midx(struct multi_pack_index *m, uint32_t pos)
+{
+ if (!m->revindex_data)
+ BUG("pack_pos_to_midx: reverse index not yet loaded");
+ if (m->num_objects <= pos)
+ BUG("pack_pos_to_midx: out-of-bounds object at %"PRIu32, pos);
+ return get_be32(m->revindex_data + pos);
+}
+
+struct midx_pack_key {
+ uint32_t pack;
+ off_t offset;
+
+ uint32_t preferred_pack;
+ struct multi_pack_index *midx;
+};
+
+static int midx_pack_order_cmp(const void *va, const void *vb)
+{
+ const struct midx_pack_key *key = va;
+ struct multi_pack_index *midx = key->midx;
+
+ uint32_t versus = pack_pos_to_midx(midx, (uint32_t*)vb - (const uint32_t *)midx->revindex_data);
+ uint32_t versus_pack = nth_midxed_pack_int_id(midx, versus);
+ off_t versus_offset;
+
+ uint32_t key_preferred = key->pack == key->preferred_pack;
+ uint32_t versus_preferred = versus_pack == key->preferred_pack;
+
+ /*
+ * First, compare the preferred-ness, noting that the preferred pack
+ * comes first.
+ */
+ if (key_preferred && !versus_preferred)
+ return -1;
+ else if (!key_preferred && versus_preferred)
+ return 1;
+
+ /* Then, break ties first by comparing the pack IDs. */
+ if (key->pack < versus_pack)
+ return -1;
+ else if (key->pack > versus_pack)
+ return 1;
+
+ /* Finally, break ties by comparing offsets within a pack. */
+ versus_offset = nth_midxed_offset(midx, versus);
+ if (key->offset < versus_offset)
+ return -1;
+ else if (key->offset > versus_offset)
+ return 1;
+
+ return 0;
+}
+
+int midx_to_pack_pos(struct multi_pack_index *m, uint32_t at, uint32_t *pos)
+{
+ struct midx_pack_key key;
+ uint32_t *found;
+
+ if (!m->revindex_data)
+ BUG("midx_to_pack_pos: reverse index not yet loaded");
+ if (m->num_objects <= at)
+ BUG("midx_to_pack_pos: out-of-bounds object at %"PRIu32, at);
+
+ key.pack = nth_midxed_pack_int_id(m, at);
+ key.offset = nth_midxed_offset(m, at);
+ key.midx = m;
+ /*
+ * The preferred pack sorts first, so determine its identifier by
+ * looking at the first object in pseudo-pack order.
+ *
+ * Note that if no --preferred-pack is explicitly given when writing a
+ * multi-pack index, then whichever pack has the lowest identifier
+ * implicitly is preferred (and includes all its objects, since ties are
+ * broken first by pack identifier).
+ */
+ key.preferred_pack = nth_midxed_pack_int_id(m, pack_pos_to_midx(m, 0));
+
+ found = bsearch(&key, m->revindex_data, m->num_objects,
+ sizeof(*m->revindex_data), midx_pack_order_cmp);
+
+ if (!found)
+ return error("bad offset for revindex");
+
+ *pos = found - m->revindex_data;
+ return 0;
+}
diff --git a/pack-revindex.h b/pack-revindex.h
index ba7c82c..479b8f2 100644
--- a/pack-revindex.h
+++ b/pack-revindex.h
@@ -14,6 +14,20 @@
*
* - offset: the byte offset within the .pack file at which the object contents
* can be found
+ *
+ * The revindex can also be used with a multi-pack index (MIDX). In this
+ * setting:
+ *
+ * - index position refers to an object's numeric position within the MIDX
+ *
+ * - pack position refers to an object's position within a non-existent pack
+ * described by the MIDX. The pack structure is described in
+ * Documentation/technical/pack-format.txt.
+ *
+ * It is effectively a concatanation of all packs in the MIDX (ordered by
+ * their numeric ID within the MIDX) in their original order within each
+ * pack), removing duplicates, and placing the preferred pack (if any)
+ * first.
*/
@@ -24,6 +38,7 @@
#define GIT_TEST_REV_INDEX_DIE_IN_MEMORY "GIT_TEST_REV_INDEX_DIE_IN_MEMORY"
struct packed_git;
+struct multi_pack_index;
/*
* load_pack_revindex populates the revindex's internal data-structures for the
@@ -35,6 +50,22 @@ struct packed_git;
int load_pack_revindex(struct packed_git *p);
/*
+ * load_midx_revindex loads the '.rev' file corresponding to the given
+ * multi-pack index by mmap-ing it and assigning pointers in the
+ * multi_pack_index to point at it.
+ *
+ * A negative number is returned on error.
+ */
+int load_midx_revindex(struct multi_pack_index *m);
+
+/*
+ * Frees resources associated with a multi-pack reverse index.
+ *
+ * A negative number is returned on error.
+ */
+int close_midx_revindex(struct multi_pack_index *m);
+
+/*
* offset_to_pack_pos converts an object offset to a pack position. This
* function returns zero on success, and a negative number otherwise. The
* parameter 'pos' is usable only on success.
@@ -71,4 +102,26 @@ uint32_t pack_pos_to_index(struct packed_git *p, uint32_t pos);
*/
off_t pack_pos_to_offset(struct packed_git *p, uint32_t pos);
+/*
+ * pack_pos_to_midx converts the object at position "pos" within the MIDX
+ * pseudo-pack into a MIDX position.
+ *
+ * If the reverse index has not yet been loaded, or the position is out of
+ * bounds, this function aborts.
+ *
+ * This function runs in time O(log N) with the number of objects in the MIDX.
+ */
+uint32_t pack_pos_to_midx(struct multi_pack_index *m, uint32_t pos);
+
+/*
+ * midx_to_pack_pos converts from the MIDX-relative position at "at" to the
+ * corresponding pack position.
+ *
+ * If the reverse index has not yet been loaded, or the position is out of
+ * bounds, this function aborts.
+ *
+ * This function runs in constant time.
+ */
+int midx_to_pack_pos(struct multi_pack_index *midx, uint32_t at, uint32_t *pos);
+
#endif
diff --git a/pack-write.c b/pack-write.c
index 2ca85a9..f1fc3ec 100644
--- a/pack-write.c
+++ b/pack-write.c
@@ -201,21 +201,12 @@ static void write_rev_header(struct hashfile *f)
}
static void write_rev_index_positions(struct hashfile *f,
- struct pack_idx_entry **objects,
+ uint32_t *pack_order,
uint32_t nr_objects)
{
- uint32_t *pack_order;
uint32_t i;
-
- ALLOC_ARRAY(pack_order, nr_objects);
- for (i = 0; i < nr_objects; i++)
- pack_order[i] = i;
- QSORT_S(pack_order, nr_objects, pack_order_cmp, objects);
-
for (i = 0; i < nr_objects; i++)
hashwrite_be32(f, pack_order[i]);
-
- free(pack_order);
}
static void write_rev_trailer(struct hashfile *f, const unsigned char *hash)
@@ -229,6 +220,29 @@ const char *write_rev_file(const char *rev_name,
const unsigned char *hash,
unsigned flags)
{
+ uint32_t *pack_order;
+ uint32_t i;
+ const char *ret;
+
+ ALLOC_ARRAY(pack_order, nr_objects);
+ for (i = 0; i < nr_objects; i++)
+ pack_order[i] = i;
+ QSORT_S(pack_order, nr_objects, pack_order_cmp, objects);
+
+ ret = write_rev_file_order(rev_name, pack_order, nr_objects, hash,
+ flags);
+
+ free(pack_order);
+
+ return ret;
+}
+
+const char *write_rev_file_order(const char *rev_name,
+ uint32_t *pack_order,
+ uint32_t nr_objects,
+ const unsigned char *hash,
+ unsigned flags)
+{
struct hashfile *f;
int fd;
@@ -262,7 +276,7 @@ const char *write_rev_file(const char *rev_name,
write_rev_header(f);
- write_rev_index_positions(f, objects, nr_objects);
+ write_rev_index_positions(f, pack_order, nr_objects);
write_rev_trailer(f, hash);
if (rev_name && adjust_shared_perm(rev_name) < 0)
diff --git a/pack.h b/pack.h
index 857cbd5..fa13954 100644
--- a/pack.h
+++ b/pack.h
@@ -94,6 +94,7 @@ struct ref;
void write_promisor_file(const char *promisor_name, struct ref **sought, int nr_sought);
const char *write_rev_file(const char *rev_name, struct pack_idx_entry **objects, uint32_t nr_objects, const unsigned char *hash, unsigned flags);
+const char *write_rev_file_order(const char *rev_name, uint32_t *pack_order, uint32_t nr_objects, const unsigned char *hash, unsigned flags);
/*
* The "hdr" output buffer should be at least this big, which will handle sizes
diff --git a/packfile.c b/packfile.c
index 6661f33..8668345 100644
--- a/packfile.c
+++ b/packfile.c
@@ -862,6 +862,9 @@ static void prepare_pack(const char *full_name, size_t full_name_len,
if (!strcmp(file_name, "multi-pack-index"))
return;
+ if (starts_with(file_name, "multi-pack-index") &&
+ ends_with(file_name, ".rev"))
+ return;
if (ends_with(file_name, ".idx") ||
ends_with(file_name, ".rev") ||
ends_with(file_name, ".pack") ||
diff --git a/parse-options.c b/parse-options.c
index fbea16e..e6f5676 100644
--- a/parse-options.c
+++ b/parse-options.c
@@ -625,6 +625,8 @@ static int show_gitcomp(const struct option *opts, int show_all)
*
* Right now this is only used to preprocess and substitute
* OPTION_ALIAS.
+ *
+ * The returned options should be freed using free_preprocessed_options.
*/
static struct option *preprocess_options(struct parse_opt_ctx_t *ctx,
const struct option *options)
@@ -678,6 +680,7 @@ static struct option *preprocess_options(struct parse_opt_ctx_t *ctx,
newopt[i].short_name = short_name;
newopt[i].long_name = long_name;
newopt[i].help = strbuf_detach(&help, NULL);
+ newopt[i].flags |= PARSE_OPT_FROM_ALIAS;
break;
}
@@ -693,6 +696,20 @@ static struct option *preprocess_options(struct parse_opt_ctx_t *ctx,
return newopt;
}
+static void free_preprocessed_options(struct option *options)
+{
+ int i;
+
+ if (!options)
+ return;
+
+ for (i = 0; options[i].type != OPTION_END; i++) {
+ if (options[i].flags & PARSE_OPT_FROM_ALIAS)
+ free((void *)options[i].help);
+ }
+ free(options);
+}
+
static int usage_with_options_internal(struct parse_opt_ctx_t *,
const char * const *,
const struct option *, int, int);
@@ -870,7 +887,7 @@ int parse_options(int argc, const char **argv, const char *prefix,
}
precompose_argv_prefix(argc, argv, NULL);
- free(real_options);
+ free_preprocessed_options(real_options);
free(ctx.alias_groups);
return parse_options_end(&ctx);
}
diff --git a/parse-options.h b/parse-options.h
index ff6506a..a845a9d 100644
--- a/parse-options.h
+++ b/parse-options.h
@@ -28,26 +28,27 @@ enum parse_opt_type {
};
enum parse_opt_flags {
- PARSE_OPT_KEEP_DASHDASH = 1,
- PARSE_OPT_STOP_AT_NON_OPTION = 2,
- PARSE_OPT_KEEP_ARGV0 = 4,
- PARSE_OPT_KEEP_UNKNOWN = 8,
- PARSE_OPT_NO_INTERNAL_HELP = 16,
- PARSE_OPT_ONE_SHOT = 32
+ PARSE_OPT_KEEP_DASHDASH = 1 << 0,
+ PARSE_OPT_STOP_AT_NON_OPTION = 1 << 1,
+ PARSE_OPT_KEEP_ARGV0 = 1 << 2,
+ PARSE_OPT_KEEP_UNKNOWN = 1 << 3,
+ PARSE_OPT_NO_INTERNAL_HELP = 1 << 4,
+ PARSE_OPT_ONE_SHOT = 1 << 5,
};
enum parse_opt_option_flags {
- PARSE_OPT_OPTARG = 1,
- PARSE_OPT_NOARG = 2,
- PARSE_OPT_NONEG = 4,
- PARSE_OPT_HIDDEN = 8,
- PARSE_OPT_LASTARG_DEFAULT = 16,
- PARSE_OPT_NODASH = 32,
- PARSE_OPT_LITERAL_ARGHELP = 64,
- PARSE_OPT_SHELL_EVAL = 256,
- PARSE_OPT_NOCOMPLETE = 512,
- PARSE_OPT_COMP_ARG = 1024,
- PARSE_OPT_CMDMODE = 2048
+ PARSE_OPT_OPTARG = 1 << 0,
+ PARSE_OPT_NOARG = 1 << 1,
+ PARSE_OPT_NONEG = 1 << 2,
+ PARSE_OPT_HIDDEN = 1 << 3,
+ PARSE_OPT_LASTARG_DEFAULT = 1 << 4,
+ PARSE_OPT_NODASH = 1 << 5,
+ PARSE_OPT_LITERAL_ARGHELP = 1 << 6,
+ PARSE_OPT_FROM_ALIAS = 1 << 7,
+ PARSE_OPT_SHELL_EVAL = 1 << 8,
+ PARSE_OPT_NOCOMPLETE = 1 << 9,
+ PARSE_OPT_COMP_ARG = 1 << 10,
+ PARSE_OPT_CMDMODE = 1 << 11,
};
enum parse_opt_result {
diff --git a/pkt-line.c b/pkt-line.c
index d633005..0194137 100644
--- a/pkt-line.c
+++ b/pkt-line.c
@@ -196,17 +196,26 @@ int packet_write_fmt_gently(int fd, const char *fmt, ...)
static int packet_write_gently(const int fd_out, const char *buf, size_t size)
{
- static char packet_write_buffer[LARGE_PACKET_MAX];
+ char header[4];
size_t packet_size;
- if (size > sizeof(packet_write_buffer) - 4)
+ if (size > LARGE_PACKET_DATA_MAX)
return error(_("packet write failed - data exceeds max packet size"));
packet_trace(buf, size, 1);
packet_size = size + 4;
- set_packet_header(packet_write_buffer, packet_size);
- memcpy(packet_write_buffer + 4, buf, size);
- if (write_in_full(fd_out, packet_write_buffer, packet_size) < 0)
+
+ set_packet_header(header, packet_size);
+
+ /*
+ * Write the header and the buffer in 2 parts so that we do
+ * not need to allocate a buffer or rely on a static buffer.
+ * This also avoids putting a large buffer on the stack which
+ * might have multi-threading issues.
+ */
+
+ if (write_in_full(fd_out, header, 4) < 0 ||
+ write_in_full(fd_out, buf, size) < 0)
return error(_("packet write failed"));
return 0;
}
@@ -242,26 +251,27 @@ void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len)
packet_trace(data, len, 1);
}
-int write_packetized_from_fd(int fd_in, int fd_out)
+int write_packetized_from_fd_no_flush(int fd_in, int fd_out)
{
- static char buf[LARGE_PACKET_DATA_MAX];
+ char *buf = xmalloc(LARGE_PACKET_DATA_MAX);
int err = 0;
ssize_t bytes_to_write;
while (!err) {
- bytes_to_write = xread(fd_in, buf, sizeof(buf));
- if (bytes_to_write < 0)
+ bytes_to_write = xread(fd_in, buf, LARGE_PACKET_DATA_MAX);
+ if (bytes_to_write < 0) {
+ free(buf);
return COPY_READ_ERROR;
+ }
if (bytes_to_write == 0)
break;
err = packet_write_gently(fd_out, buf, bytes_to_write);
}
- if (!err)
- err = packet_flush_gently(fd_out);
+ free(buf);
return err;
}
-int write_packetized_from_buf(const char *src_in, size_t len, int fd_out)
+int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out)
{
int err = 0;
size_t bytes_written = 0;
@@ -277,8 +287,6 @@ int write_packetized_from_buf(const char *src_in, size_t len, int fd_out)
err = packet_write_gently(fd_out, src_in + bytes_written, bytes_to_write);
bytes_written += bytes_to_write;
}
- if (!err)
- err = packet_flush_gently(fd_out);
return err;
}
@@ -298,8 +306,11 @@ static int get_packet_data(int fd, char **src_buf, size_t *src_size,
*src_size -= ret;
} else {
ret = read_in_full(fd, dst, size);
- if (ret < 0)
+ if (ret < 0) {
+ if (options & PACKET_READ_GENTLE_ON_READ_ERROR)
+ return error_errno(_("read error"));
die_errno(_("read error"));
+ }
}
/* And complain if we didn't get enough bytes to satisfy the read. */
@@ -307,6 +318,8 @@ static int get_packet_data(int fd, char **src_buf, size_t *src_size,
if (options & PACKET_READ_GENTLE_ON_EOF)
return -1;
+ if (options & PACKET_READ_GENTLE_ON_READ_ERROR)
+ return error(_("the remote end hung up unexpectedly"));
die(_("the remote end hung up unexpectedly"));
}
@@ -335,6 +348,9 @@ enum packet_read_status packet_read_with_status(int fd, char **src_buffer,
len = packet_length(linelen);
if (len < 0) {
+ if (options & PACKET_READ_GENTLE_ON_READ_ERROR)
+ return error(_("protocol error: bad line length "
+ "character: %.4s"), linelen);
die(_("protocol error: bad line length character: %.4s"), linelen);
} else if (!len) {
packet_trace("0000", 4, 0);
@@ -349,12 +365,19 @@ enum packet_read_status packet_read_with_status(int fd, char **src_buffer,
*pktlen = 0;
return PACKET_READ_RESPONSE_END;
} else if (len < 4) {
+ if (options & PACKET_READ_GENTLE_ON_READ_ERROR)
+ return error(_("protocol error: bad line length %d"),
+ len);
die(_("protocol error: bad line length %d"), len);
}
len -= 4;
- if ((unsigned)len >= size)
+ if ((unsigned)len >= size) {
+ if (options & PACKET_READ_GENTLE_ON_READ_ERROR)
+ return error(_("protocol error: bad line length %d"),
+ len);
die(_("protocol error: bad line length %d"), len);
+ }
if (get_packet_data(fd, src_buffer, src_len, buffer, len, options) < 0) {
*pktlen = -1;
@@ -421,7 +444,7 @@ char *packet_read_line_buf(char **src, size_t *src_len, int *dst_len)
return packet_read_line_generic(-1, src, src_len, dst_len);
}
-ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out)
+ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out, int options)
{
int packet_len;
@@ -437,7 +460,7 @@ ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out)
* that there is already room for the extra byte.
*/
sb_out->buf + sb_out->len, LARGE_PACKET_DATA_MAX+1,
- PACKET_READ_GENTLE_ON_EOF);
+ options);
if (packet_len <= 0)
break;
sb_out->len += packet_len;
diff --git a/pkt-line.h b/pkt-line.h
index 8c90daa..5af5f45 100644
--- a/pkt-line.h
+++ b/pkt-line.h
@@ -32,8 +32,8 @@ void packet_buf_write(struct strbuf *buf, const char *fmt, ...) __attribute__((f
void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len);
int packet_flush_gently(int fd);
int packet_write_fmt_gently(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
-int write_packetized_from_fd(int fd_in, int fd_out);
-int write_packetized_from_buf(const char *src_in, size_t len, int fd_out);
+int write_packetized_from_fd_no_flush(int fd_in, int fd_out);
+int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out);
/*
* Read a packetized line into the buffer, which must be at least size bytes
@@ -68,10 +68,15 @@ int write_packetized_from_buf(const char *src_in, size_t len, int fd_out);
*
* If options contains PACKET_READ_DIE_ON_ERR_PACKET, it dies when it sees an
* ERR packet.
+ *
+ * If options contains PACKET_READ_GENTLE_ON_READ_ERROR, we will not die
+ * on read errors, but instead return -1. However, we may still die on an
+ * ERR packet (if requested).
*/
-#define PACKET_READ_GENTLE_ON_EOF (1u<<0)
-#define PACKET_READ_CHOMP_NEWLINE (1u<<1)
-#define PACKET_READ_DIE_ON_ERR_PACKET (1u<<2)
+#define PACKET_READ_GENTLE_ON_EOF (1u<<0)
+#define PACKET_READ_CHOMP_NEWLINE (1u<<1)
+#define PACKET_READ_DIE_ON_ERR_PACKET (1u<<2)
+#define PACKET_READ_GENTLE_ON_READ_ERROR (1u<<3)
int packet_read(int fd, char **src_buffer, size_t *src_len, char
*buffer, unsigned size, int options);
@@ -131,7 +136,7 @@ char *packet_read_line_buf(char **src_buf, size_t *src_len, int *size);
/*
* Reads a stream of variable sized packets until a flush packet is detected.
*/
-ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out);
+ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out, int options);
/*
* Receive multiplexed output stream over git native protocol.
diff --git a/ref-filter.c b/ref-filter.c
index f0bd32f..a0adb45 100644
--- a/ref-filter.c
+++ b/ref-filter.c
@@ -1608,7 +1608,7 @@ static int get_object(struct ref_array_item *ref, int deref, struct object **obj
if (oi->info.contentp) {
*obj = parse_object_buffer(the_repository, &oi->oid, oi->type, oi->size, oi->content, &eaten);
- if (!obj) {
+ if (!*obj) {
if (!eaten)
free(oi->content);
return strbuf_addf_ret(err, -1, _("parse_object_buffer failed on %s for %s"),
diff --git a/revision.h b/revision.h
index a20a530..a24f72d 100644
--- a/revision.h
+++ b/revision.h
@@ -236,7 +236,7 @@ struct rev_info {
const char *mime_boundary;
const char *patch_suffix;
int numbered_files;
- int reroll_count;
+ const char *reroll_count;
char *message_id;
struct ident_split from_ident;
struct string_list *ref_message_ids;
diff --git a/sequencer.c b/sequencer.c
index b4c63ae..fd183b5 100644
--- a/sequencer.c
+++ b/sequencer.c
@@ -2032,13 +2032,24 @@ static void record_in_rewritten(struct object_id *oid,
flush_rewritten_pending();
}
+static int should_edit(struct replay_opts *opts) {
+ if (opts->edit < 0)
+ /*
+ * Note that we only handle the case of non-conflicted
+ * commits; continue_single_pick() handles the conflicted
+ * commits itself instead of calling this function.
+ */
+ return (opts->action == REPLAY_REVERT && isatty(0)) ? 1 : 0;
+ return opts->edit;
+}
+
static int do_pick_commit(struct repository *r,
struct todo_item *item,
struct replay_opts *opts,
int final_fixup, int *check_todo)
{
- unsigned int flags = opts->edit ? EDIT_MSG : 0;
- const char *msg_file = opts->edit ? NULL : git_path_merge_msg(r);
+ unsigned int flags = should_edit(opts) ? EDIT_MSG : 0;
+ const char *msg_file = should_edit(opts) ? NULL : git_path_merge_msg(r);
struct object_id head;
struct commit *base, *next, *parent;
const char *base_label, *next_label;
@@ -3283,9 +3294,9 @@ static int save_opts(struct replay_opts *opts)
if (opts->no_commit)
res |= git_config_set_in_file_gently(opts_file,
"options.no-commit", "true");
- if (opts->edit)
- res |= git_config_set_in_file_gently(opts_file,
- "options.edit", "true");
+ if (opts->edit >= 0)
+ res |= git_config_set_in_file_gently(opts_file, "options.edit",
+ opts->edit ? "true" : "false");
if (opts->allow_empty)
res |= git_config_set_in_file_gently(opts_file,
"options.allow-empty", "true");
@@ -4259,7 +4270,7 @@ static int pick_commits(struct repository *r,
prev_reflog_action = xstrdup(getenv(GIT_REFLOG_ACTION));
if (opts->allow_ff)
assert(!(opts->signoff || opts->no_commit ||
- opts->record_origin || opts->edit ||
+ opts->record_origin || should_edit(opts) ||
opts->committer_date_is_author_date ||
opts->ignore_date));
if (read_and_refresh_cache(r, opts))
@@ -4552,14 +4563,33 @@ cleanup_head_ref:
return sequencer_remove_state(opts);
}
-static int continue_single_pick(struct repository *r)
+static int continue_single_pick(struct repository *r, struct replay_opts *opts)
{
- const char *argv[] = { "commit", NULL };
+ struct strvec argv = STRVEC_INIT;
+ int ret;
if (!refs_ref_exists(get_main_ref_store(r), "CHERRY_PICK_HEAD") &&
!refs_ref_exists(get_main_ref_store(r), "REVERT_HEAD"))
return error(_("no cherry-pick or revert in progress"));
- return run_command_v_opt(argv, RUN_GIT_CMD);
+
+ strvec_push(&argv, "commit");
+
+ /*
+ * continue_single_pick() handles the case of recovering from a
+ * conflict. should_edit() doesn't handle that case; for a conflict,
+ * we want to edit if the user asked for it, or if they didn't specify
+ * and stdin is a tty.
+ */
+ if (!opts->edit || (opts->edit < 0 && !isatty(0)))
+ /*
+ * Include --cleanup=strip as well because we don't want the
+ * "# Conflicts:" messages.
+ */
+ strvec_pushl(&argv, "--no-edit", "--cleanup=strip", NULL);
+
+ ret = run_command_v_opt(argv.v, RUN_GIT_CMD);
+ strvec_clear(&argv);
+ return ret;
}
static int commit_staged_changes(struct repository *r,
@@ -4729,7 +4759,7 @@ int sequencer_continue(struct repository *r, struct replay_opts *opts)
goto release_todo_list;
}
} else if (!file_exists(get_todo_path(opts)))
- return continue_single_pick(r);
+ return continue_single_pick(r, opts);
else if ((res = read_populate_todo(r, &todo_list, opts)))
goto release_todo_list;
@@ -4738,7 +4768,7 @@ int sequencer_continue(struct repository *r, struct replay_opts *opts)
if (refs_ref_exists(get_main_ref_store(r),
"CHERRY_PICK_HEAD") ||
refs_ref_exists(get_main_ref_store(r), "REVERT_HEAD")) {
- res = continue_single_pick(r);
+ res = continue_single_pick(r, opts);
if (res)
goto release_todo_list;
}
diff --git a/sequencer.h b/sequencer.h
index f8b2e4a..d57d8ea 100644
--- a/sequencer.h
+++ b/sequencer.h
@@ -31,8 +31,10 @@ enum commit_msg_cleanup_mode {
struct replay_opts {
enum replay_action action;
- /* Boolean options */
+ /* Tri-state options: unspecified, false, or true */
int edit;
+
+ /* Boolean options */
int record_origin;
int no_commit;
int signoff;
@@ -71,7 +73,7 @@ struct replay_opts {
/* Only used by REPLAY_NONE */
struct rev_info *revs;
};
-#define REPLAY_OPTS_INIT { .action = -1, .current_fixups = STRBUF_INIT }
+#define REPLAY_OPTS_INIT { .edit = -1, .action = -1, .current_fixups = STRBUF_INIT }
/*
* Note that ordering matters in this enum. Not only must it match the mapping
diff --git a/setup.c b/setup.c
index c04cd25..59e2fac 100644
--- a/setup.c
+++ b/setup.c
@@ -1274,18 +1274,10 @@ const char *setup_git_directory_gently(int *nongit_ok)
* the GIT_PREFIX environment variable must always match. For details
* see Documentation/config/alias.txt.
*/
- if (nongit_ok && *nongit_ok) {
+ if (nongit_ok && *nongit_ok)
startup_info->have_repository = 0;
- startup_info->prefix = NULL;
- setenv(GIT_PREFIX_ENVIRONMENT, "", 1);
- } else {
+ else
startup_info->have_repository = 1;
- startup_info->prefix = prefix;
- if (prefix)
- setenv(GIT_PREFIX_ENVIRONMENT, prefix, 1);
- else
- setenv(GIT_PREFIX_ENVIRONMENT, "", 1);
- }
/*
* Not all paths through the setup code will call 'set_git_dir()' (which
@@ -1311,6 +1303,22 @@ const char *setup_git_directory_gently(int *nongit_ok)
if (startup_info->have_repository)
repo_set_hash_algo(the_repository, repo_fmt.hash_algo);
}
+ /*
+ * Since precompose_string_if_needed() needs to look at
+ * the core.precomposeunicode configuration, this
+ * has to happen after the above block that finds
+ * out where the repository is, i.e. a preparation
+ * for calling git_config_get_bool().
+ */
+ if (prefix) {
+ prefix = precompose_string_if_needed(prefix);
+ startup_info->prefix = prefix;
+ setenv(GIT_PREFIX_ENVIRONMENT, prefix, 1);
+ } else {
+ startup_info->prefix = NULL;
+ setenv(GIT_PREFIX_ENVIRONMENT, "", 1);
+ }
+
strbuf_release(&dir);
strbuf_release(&gitdir);
diff --git a/simple-ipc.h b/simple-ipc.h
new file mode 100644
index 0000000..dc3606e
--- /dev/null
+++ b/simple-ipc.h
@@ -0,0 +1,239 @@
+#ifndef GIT_SIMPLE_IPC_H
+#define GIT_SIMPLE_IPC_H
+
+/*
+ * See Documentation/technical/api-simple-ipc.txt
+ */
+
+#if defined(GIT_WINDOWS_NATIVE) || !defined(NO_UNIX_SOCKETS)
+#define SUPPORTS_SIMPLE_IPC
+#endif
+
+#ifdef SUPPORTS_SIMPLE_IPC
+#include "pkt-line.h"
+
+/*
+ * Simple IPC Client Side API.
+ */
+
+enum ipc_active_state {
+ /*
+ * The pipe/socket exists and the daemon is waiting for connections.
+ */
+ IPC_STATE__LISTENING = 0,
+
+ /*
+ * The pipe/socket exists, but the daemon is not listening.
+ * Perhaps it is very busy.
+ * Perhaps the daemon died without deleting the path.
+ * Perhaps it is shutting down and draining existing clients.
+ * Perhaps it is dead, but other clients are lingering and
+ * still holding a reference to the pathname.
+ */
+ IPC_STATE__NOT_LISTENING,
+
+ /*
+ * The requested pathname is bogus and no amount of retries
+ * will fix that.
+ */
+ IPC_STATE__INVALID_PATH,
+
+ /*
+ * The requested pathname is not found. This usually means
+ * that there is no daemon present.
+ */
+ IPC_STATE__PATH_NOT_FOUND,
+
+ IPC_STATE__OTHER_ERROR,
+};
+
+struct ipc_client_connect_options {
+ /*
+ * Spin under timeout if the server is running but can't
+ * accept our connection yet. This should always be set
+ * unless you just want to poke the server and see if it
+ * is alive.
+ */
+ unsigned int wait_if_busy:1;
+
+ /*
+ * Spin under timeout if the pipe/socket is not yet present
+ * on the file system. This is useful if we just started
+ * the service and need to wait for it to become ready.
+ */
+ unsigned int wait_if_not_found:1;
+
+ /*
+ * Disallow chdir() when creating a Unix domain socket.
+ */
+ unsigned int uds_disallow_chdir:1;
+};
+
+#define IPC_CLIENT_CONNECT_OPTIONS_INIT { \
+ .wait_if_busy = 0, \
+ .wait_if_not_found = 0, \
+ .uds_disallow_chdir = 0, \
+}
+
+/*
+ * Determine if a server is listening on this named pipe or socket using
+ * platform-specific logic. This might just probe the filesystem or it
+ * might make a trivial connection to the server using this pathname.
+ */
+enum ipc_active_state ipc_get_active_state(const char *path);
+
+struct ipc_client_connection {
+ int fd;
+};
+
+/*
+ * Try to connect to the daemon on the named pipe or socket.
+ *
+ * Returns IPC_STATE__LISTENING and a connection handle.
+ *
+ * Otherwise, returns info to help decide whether to retry or to
+ * spawn/respawn the server.
+ */
+enum ipc_active_state ipc_client_try_connect(
+ const char *path,
+ const struct ipc_client_connect_options *options,
+ struct ipc_client_connection **p_connection);
+
+void ipc_client_close_connection(struct ipc_client_connection *connection);
+
+/*
+ * Used by the client to synchronously send and receive a message with
+ * the server on the provided client connection.
+ *
+ * Returns 0 when successful.
+ *
+ * Calls error() and returns non-zero otherwise.
+ */
+int ipc_client_send_command_to_connection(
+ struct ipc_client_connection *connection,
+ const char *message, struct strbuf *answer);
+
+/*
+ * Used by the client to synchronously connect and send and receive a
+ * message to the server listening at the given path.
+ *
+ * Returns 0 when successful.
+ *
+ * Calls error() and returns non-zero otherwise.
+ */
+int ipc_client_send_command(const char *path,
+ const struct ipc_client_connect_options *options,
+ const char *message, struct strbuf *answer);
+
+/*
+ * Simple IPC Server Side API.
+ */
+
+struct ipc_server_reply_data;
+
+typedef int (ipc_server_reply_cb)(struct ipc_server_reply_data *,
+ const char *response,
+ size_t response_len);
+
+/*
+ * Prototype for an application-supplied callback to process incoming
+ * client IPC messages and compose a reply. The `application_cb` should
+ * use the provided `reply_cb` and `reply_data` to send an IPC response
+ * back to the client. The `reply_cb` callback can be called multiple
+ * times for chunking purposes. A reply message is optional and may be
+ * omitted if not necessary for the application.
+ *
+ * The return value from the application callback is ignored.
+ * The value `SIMPLE_IPC_QUIT` can be used to shutdown the server.
+ */
+typedef int (ipc_server_application_cb)(void *application_data,
+ const char *request,
+ ipc_server_reply_cb *reply_cb,
+ struct ipc_server_reply_data *reply_data);
+
+#define SIMPLE_IPC_QUIT -2
+
+/*
+ * Opaque instance data to represent an IPC server instance.
+ */
+struct ipc_server_data;
+
+/*
+ * Control parameters for the IPC server instance.
+ * Use this to hide platform-specific settings.
+ */
+struct ipc_server_opts
+{
+ int nr_threads;
+
+ /*
+ * Disallow chdir() when creating a Unix domain socket.
+ */
+ unsigned int uds_disallow_chdir:1;
+};
+
+/*
+ * Start an IPC server instance in one or more background threads
+ * and return a handle to the pool.
+ *
+ * Returns 0 if the asynchronous server pool was started successfully.
+ * Returns -1 if not.
+ * Returns -2 if we could not startup because another server is using
+ * the socket or named pipe.
+ *
+ * When a client IPC message is received, the `application_cb` will be
+ * called (possibly on a random thread) to handle the message and
+ * optionally compose a reply message.
+ */
+int ipc_server_run_async(struct ipc_server_data **returned_server_data,
+ const char *path, const struct ipc_server_opts *opts,
+ ipc_server_application_cb *application_cb,
+ void *application_data);
+
+/*
+ * Gently signal the IPC server pool to shutdown. No new client
+ * connections will be accepted, but existing connections will be
+ * allowed to complete.
+ */
+int ipc_server_stop_async(struct ipc_server_data *server_data);
+
+/*
+ * Block the calling thread until all threads in the IPC server pool
+ * have completed and been joined.
+ */
+int ipc_server_await(struct ipc_server_data *server_data);
+
+/*
+ * Close and free all resource handles associated with the IPC server
+ * pool.
+ */
+void ipc_server_free(struct ipc_server_data *server_data);
+
+/*
+ * Run an IPC server instance and block the calling thread of the
+ * current process. It does not return until the IPC server has
+ * either shutdown or had an unrecoverable error.
+ *
+ * The IPC server handles incoming IPC messages from client processes
+ * and may use one or more background threads as necessary.
+ *
+ * Returns 0 after the server has completed successfully.
+ * Returns -1 if the server cannot be started.
+ * Returns -2 if we could not startup because another server is using
+ * the socket or named pipe.
+ *
+ * When a client IPC message is received, the `application_cb` will be
+ * called (possibly on a random thread) to handle the message and
+ * optionally compose a reply message.
+ *
+ * Note that `ipc_server_run()` is a synchronous wrapper around the
+ * above asynchronous routines. It effectively hides all of the
+ * server state and thread details from the caller and presents a
+ * simple synchronous interface.
+ */
+int ipc_server_run(const char *path, const struct ipc_server_opts *opts,
+ ipc_server_application_cb *application_cb,
+ void *application_data);
+
+#endif /* SUPPORTS_SIMPLE_IPC */
+#endif /* GIT_SIMPLE_IPC_H */
diff --git a/t/helper/test-bitmap.c b/t/helper/test-bitmap.c
new file mode 100644
index 0000000..134a1e9
--- /dev/null
+++ b/t/helper/test-bitmap.c
@@ -0,0 +1,24 @@
+#include "test-tool.h"
+#include "cache.h"
+#include "pack-bitmap.h"
+
+static int bitmap_list_commits(void)
+{
+ return test_bitmap_commits(the_repository);
+}
+
+int cmd__bitmap(int argc, const char **argv)
+{
+ setup_git_directory();
+
+ if (argc != 2)
+ goto usage;
+
+ if (!strcmp(argv[1], "list-commits"))
+ return bitmap_list_commits();
+
+usage:
+ usage("\ttest-tool bitmap list-commits");
+
+ return -1;
+}
diff --git a/t/helper/test-read-midx.c b/t/helper/test-read-midx.c
index 2430880..7c2eb11 100644
--- a/t/helper/test-read-midx.c
+++ b/t/helper/test-read-midx.c
@@ -4,7 +4,7 @@
#include "repository.h"
#include "object-store.h"
-static int read_midx_file(const char *object_dir)
+static int read_midx_file(const char *object_dir, int show_objects)
{
uint32_t i;
struct multi_pack_index *m;
@@ -43,13 +43,29 @@ static int read_midx_file(const char *object_dir)
printf("object-dir: %s\n", m->object_dir);
+ if (show_objects) {
+ struct object_id oid;
+ struct pack_entry e;
+
+ for (i = 0; i < m->num_objects; i++) {
+ nth_midxed_object_oid(&oid, m, i);
+ fill_midx_entry(the_repository, &oid, &e, m);
+
+ printf("%s %"PRIu64"\t%s\n",
+ oid_to_hex(&oid), e.offset, e.p->pack_name);
+ }
+ return 0;
+ }
+
return 0;
}
int cmd__read_midx(int argc, const char **argv)
{
- if (argc != 2)
- usage("read-midx <object-dir>");
+ if (!(argc == 2 || argc == 3))
+ usage("read-midx [--show-objects] <object-dir>");
- return read_midx_file(argv[1]);
+ if (!strcmp(argv[1], "--show-objects"))
+ return read_midx_file(argv[2], 1);
+ return read_midx_file(argv[1], 0);
}
diff --git a/t/helper/test-simple-ipc.c b/t/helper/test-simple-ipc.c
new file mode 100644
index 0000000..42040ef
--- /dev/null
+++ b/t/helper/test-simple-ipc.c
@@ -0,0 +1,787 @@
+/*
+ * test-simple-ipc.c: verify that the Inter-Process Communication works.
+ */
+
+#include "test-tool.h"
+#include "cache.h"
+#include "strbuf.h"
+#include "simple-ipc.h"
+#include "parse-options.h"
+#include "thread-utils.h"
+#include "strvec.h"
+
+#ifndef SUPPORTS_SIMPLE_IPC
+int cmd__simple_ipc(int argc, const char **argv)
+{
+ die("simple IPC not available on this platform");
+}
+#else
+
+/*
+ * The test daemon defines an "application callback" that supports a
+ * series of commands (see `test_app_cb()`).
+ *
+ * Unknown commands are caught here and we send an error message back
+ * to the client process.
+ */
+static int app__unhandled_command(const char *command,
+ ipc_server_reply_cb *reply_cb,
+ struct ipc_server_reply_data *reply_data)
+{
+ struct strbuf buf = STRBUF_INIT;
+ int ret;
+
+ strbuf_addf(&buf, "unhandled command: %s", command);
+ ret = reply_cb(reply_data, buf.buf, buf.len);
+ strbuf_release(&buf);
+
+ return ret;
+}
+
+/*
+ * Reply with a single very large buffer. This is to ensure that
+ * long response are properly handled -- whether the chunking occurs
+ * in the kernel or in the (probably pkt-line) layer.
+ */
+#define BIG_ROWS (10000)
+static int app__big_command(ipc_server_reply_cb *reply_cb,
+ struct ipc_server_reply_data *reply_data)
+{
+ struct strbuf buf = STRBUF_INIT;
+ int row;
+ int ret;
+
+ for (row = 0; row < BIG_ROWS; row++)
+ strbuf_addf(&buf, "big: %.75d\n", row);
+
+ ret = reply_cb(reply_data, buf.buf, buf.len);
+ strbuf_release(&buf);
+
+ return ret;
+}
+
+/*
+ * Reply with a series of lines. This is to ensure that we can incrementally
+ * compute the response and chunk it to the client.
+ */
+#define CHUNK_ROWS (10000)
+static int app__chunk_command(ipc_server_reply_cb *reply_cb,
+ struct ipc_server_reply_data *reply_data)
+{
+ struct strbuf buf = STRBUF_INIT;
+ int row;
+ int ret;
+
+ for (row = 0; row < CHUNK_ROWS; row++) {
+ strbuf_setlen(&buf, 0);
+ strbuf_addf(&buf, "big: %.75d\n", row);
+ ret = reply_cb(reply_data, buf.buf, buf.len);
+ }
+
+ strbuf_release(&buf);
+
+ return ret;
+}
+
+/*
+ * Slowly reply with a series of lines. This is to model an expensive to
+ * compute chunked response (which might happen if this callback is running
+ * in a thread and is fighting for a lock with other threads).
+ */
+#define SLOW_ROWS (1000)
+#define SLOW_DELAY_MS (10)
+static int app__slow_command(ipc_server_reply_cb *reply_cb,
+ struct ipc_server_reply_data *reply_data)
+{
+ struct strbuf buf = STRBUF_INIT;
+ int row;
+ int ret;
+
+ for (row = 0; row < SLOW_ROWS; row++) {
+ strbuf_setlen(&buf, 0);
+ strbuf_addf(&buf, "big: %.75d\n", row);
+ ret = reply_cb(reply_data, buf.buf, buf.len);
+ sleep_millisec(SLOW_DELAY_MS);
+ }
+
+ strbuf_release(&buf);
+
+ return ret;
+}
+
+/*
+ * The client sent a command followed by a (possibly very) large buffer.
+ */
+static int app__sendbytes_command(const char *received,
+ ipc_server_reply_cb *reply_cb,
+ struct ipc_server_reply_data *reply_data)
+{
+ struct strbuf buf_resp = STRBUF_INIT;
+ const char *p = "?";
+ int len_ballast = 0;
+ int k;
+ int errs = 0;
+ int ret;
+
+ if (skip_prefix(received, "sendbytes ", &p))
+ len_ballast = strlen(p);
+
+ /*
+ * Verify that the ballast is n copies of a single letter.
+ * And that the multi-threaded IO layer didn't cross the streams.
+ */
+ for (k = 1; k < len_ballast; k++)
+ if (p[k] != p[0])
+ errs++;
+
+ if (errs)
+ strbuf_addf(&buf_resp, "errs:%d\n", errs);
+ else
+ strbuf_addf(&buf_resp, "rcvd:%c%08d\n", p[0], len_ballast);
+
+ ret = reply_cb(reply_data, buf_resp.buf, buf_resp.len);
+
+ strbuf_release(&buf_resp);
+
+ return ret;
+}
+
+/*
+ * An arbitrary fixed address to verify that the application instance
+ * data is handled properly.
+ */
+static int my_app_data = 42;
+
+static ipc_server_application_cb test_app_cb;
+
+/*
+ * This is the "application callback" that sits on top of the
+ * "ipc-server". It completely defines the set of commands supported
+ * by this application.
+ */
+static int test_app_cb(void *application_data,
+ const char *command,
+ ipc_server_reply_cb *reply_cb,
+ struct ipc_server_reply_data *reply_data)
+{
+ /*
+ * Verify that we received the application-data that we passed
+ * when we started the ipc-server. (We have several layers of
+ * callbacks calling callbacks and it's easy to get things mixed
+ * up (especially when some are "void*").)
+ */
+ if (application_data != (void*)&my_app_data)
+ BUG("application_cb: application_data pointer wrong");
+
+ if (!strcmp(command, "quit")) {
+ /*
+ * The client sent a "quit" command. This is an async
+ * request for the server to shutdown.
+ *
+ * We DO NOT send the client a response message
+ * (because we have nothing to say and the other
+ * server threads have not yet stopped).
+ *
+ * Tell the ipc-server layer to start shutting down.
+ * This includes: stop listening for new connections
+ * on the socket/pipe and telling all worker threads
+ * to finish/drain their outgoing responses to other
+ * clients.
+ *
+ * This DOES NOT force an immediate sync shutdown.
+ */
+ return SIMPLE_IPC_QUIT;
+ }
+
+ if (!strcmp(command, "ping")) {
+ const char *answer = "pong";
+ return reply_cb(reply_data, answer, strlen(answer));
+ }
+
+ if (!strcmp(command, "big"))
+ return app__big_command(reply_cb, reply_data);
+
+ if (!strcmp(command, "chunk"))
+ return app__chunk_command(reply_cb, reply_data);
+
+ if (!strcmp(command, "slow"))
+ return app__slow_command(reply_cb, reply_data);
+
+ if (starts_with(command, "sendbytes "))
+ return app__sendbytes_command(command, reply_cb, reply_data);
+
+ return app__unhandled_command(command, reply_cb, reply_data);
+}
+
+struct cl_args
+{
+ const char *subcommand;
+ const char *path;
+ const char *token;
+
+ int nr_threads;
+ int max_wait_sec;
+ int bytecount;
+ int batchsize;
+
+ char bytevalue;
+};
+
+static struct cl_args cl_args = {
+ .subcommand = NULL,
+ .path = "ipc-test",
+ .token = NULL,
+
+ .nr_threads = 5,
+ .max_wait_sec = 60,
+ .bytecount = 1024,
+ .batchsize = 10,
+
+ .bytevalue = 'x',
+};
+
+/*
+ * This process will run as a simple-ipc server and listen for IPC commands
+ * from client processes.
+ */
+static int daemon__run_server(void)
+{
+ int ret;
+
+ struct ipc_server_opts opts = {
+ .nr_threads = cl_args.nr_threads,
+ };
+
+ /*
+ * Synchronously run the ipc-server. We don't need any application
+ * instance data, so pass an arbitrary pointer (that we'll later
+ * verify made the round trip).
+ */
+ ret = ipc_server_run(cl_args.path, &opts, test_app_cb, (void*)&my_app_data);
+ if (ret == -2)
+ error(_("socket/pipe already in use: '%s'"), cl_args.path);
+ else if (ret == -1)
+ error_errno(_("could not start server on: '%s'"), cl_args.path);
+
+ return ret;
+}
+
+#ifndef GIT_WINDOWS_NATIVE
+/*
+ * This is adapted from `daemonize()`. Use `fork()` to directly create and
+ * run the daemon in a child process.
+ */
+static int spawn_server(pid_t *pid)
+{
+ struct ipc_server_opts opts = {
+ .nr_threads = cl_args.nr_threads,
+ };
+
+ *pid = fork();
+
+ switch (*pid) {
+ case 0:
+ if (setsid() == -1)
+ error_errno(_("setsid failed"));
+ close(0);
+ close(1);
+ close(2);
+ sanitize_stdfds();
+
+ return ipc_server_run(cl_args.path, &opts, test_app_cb,
+ (void*)&my_app_data);
+
+ case -1:
+ return error_errno(_("could not spawn daemon in the background"));
+
+ default:
+ return 0;
+ }
+}
+#else
+/*
+ * Conceptually like `daemonize()` but different because Windows does not
+ * have `fork(2)`. Spawn a normal Windows child process but without the
+ * limitations of `start_command()` and `finish_command()`.
+ */
+static int spawn_server(pid_t *pid)
+{
+ char test_tool_exe[MAX_PATH];
+ struct strvec args = STRVEC_INIT;
+ int in, out;
+
+ GetModuleFileNameA(NULL, test_tool_exe, MAX_PATH);
+
+ in = open("/dev/null", O_RDONLY);
+ out = open("/dev/null", O_WRONLY);
+
+ strvec_push(&args, test_tool_exe);
+ strvec_push(&args, "simple-ipc");
+ strvec_push(&args, "run-daemon");
+ strvec_pushf(&args, "--name=%s", cl_args.path);
+ strvec_pushf(&args, "--threads=%d", cl_args.nr_threads);
+
+ *pid = mingw_spawnvpe(args.v[0], args.v, NULL, NULL, in, out, out);
+ close(in);
+ close(out);
+
+ strvec_clear(&args);
+
+ if (*pid < 0)
+ return error(_("could not spawn daemon in the background"));
+
+ return 0;
+}
+#endif
+
+/*
+ * This is adapted from `wait_or_whine()`. Watch the child process and
+ * let it get started and begin listening for requests on the socket
+ * before reporting our success.
+ */
+static int wait_for_server_startup(pid_t pid_child)
+{
+ int status;
+ pid_t pid_seen;
+ enum ipc_active_state s;
+ time_t time_limit, now;
+
+ time(&time_limit);
+ time_limit += cl_args.max_wait_sec;
+
+ for (;;) {
+ pid_seen = waitpid(pid_child, &status, WNOHANG);
+
+ if (pid_seen == -1)
+ return error_errno(_("waitpid failed"));
+
+ else if (pid_seen == 0) {
+ /*
+ * The child is still running (this should be
+ * the normal case). Try to connect to it on
+ * the socket and see if it is ready for
+ * business.
+ *
+ * If there is another daemon already running,
+ * our child will fail to start (possibly
+ * after a timeout on the lock), but we don't
+ * care (who responds) if the socket is live.
+ */
+ s = ipc_get_active_state(cl_args.path);
+ if (s == IPC_STATE__LISTENING)
+ return 0;
+
+ time(&now);
+ if (now > time_limit)
+ return error(_("daemon not online yet"));
+
+ continue;
+ }
+
+ else if (pid_seen == pid_child) {
+ /*
+ * The new child daemon process shutdown while
+ * it was starting up, so it is not listening
+ * on the socket.
+ *
+ * Try to ping the socket in the odd chance
+ * that another daemon started (or was already
+ * running) while our child was starting.
+ *
+ * Again, we don't care who services the socket.
+ */
+ s = ipc_get_active_state(cl_args.path);
+ if (s == IPC_STATE__LISTENING)
+ return 0;
+
+ /*
+ * We don't care about the WEXITSTATUS() nor
+ * any of the WIF*(status) values because
+ * `cmd__simple_ipc()` does the `!!result`
+ * trick on all function return values.
+ *
+ * So it is sufficient to just report the
+ * early shutdown as an error.
+ */
+ return error(_("daemon failed to start"));
+ }
+
+ else
+ return error(_("waitpid is confused"));
+ }
+}
+
+/*
+ * This process will start a simple-ipc server in a background process and
+ * wait for it to become ready. This is like `daemonize()` but gives us
+ * more control and better error reporting (and makes it easier to write
+ * unit tests).
+ */
+static int daemon__start_server(void)
+{
+ pid_t pid_child;
+ int ret;
+
+ /*
+ * Run the actual daemon in a background process.
+ */
+ ret = spawn_server(&pid_child);
+ if (pid_child <= 0)
+ return ret;
+
+ /*
+ * Let the parent wait for the child process to get started
+ * and begin listening for requests on the socket.
+ */
+ ret = wait_for_server_startup(pid_child);
+
+ return ret;
+}
+
+/*
+ * This process will run a quick probe to see if a simple-ipc server
+ * is active on this path.
+ *
+ * Returns 0 if the server is alive.
+ */
+static int client__probe_server(void)
+{
+ enum ipc_active_state s;
+
+ s = ipc_get_active_state(cl_args.path);
+ switch (s) {
+ case IPC_STATE__LISTENING:
+ return 0;
+
+ case IPC_STATE__NOT_LISTENING:
+ return error("no server listening at '%s'", cl_args.path);
+
+ case IPC_STATE__PATH_NOT_FOUND:
+ return error("path not found '%s'", cl_args.path);
+
+ case IPC_STATE__INVALID_PATH:
+ return error("invalid pipe/socket name '%s'", cl_args.path);
+
+ case IPC_STATE__OTHER_ERROR:
+ default:
+ return error("other error for '%s'", cl_args.path);
+ }
+}
+
+/*
+ * Send an IPC command token to an already-running server daemon and
+ * print the response.
+ *
+ * This is a simple 1 word command/token that `test_app_cb()` (in the
+ * daemon process) will understand.
+ */
+static int client__send_ipc(void)
+{
+ const char *command = "(no-command)";
+ struct strbuf buf = STRBUF_INIT;
+ struct ipc_client_connect_options options
+ = IPC_CLIENT_CONNECT_OPTIONS_INIT;
+
+ if (cl_args.token && *cl_args.token)
+ command = cl_args.token;
+
+ options.wait_if_busy = 1;
+ options.wait_if_not_found = 0;
+
+ if (!ipc_client_send_command(cl_args.path, &options, command, &buf)) {
+ if (buf.len) {
+ printf("%s\n", buf.buf);
+ fflush(stdout);
+ }
+ strbuf_release(&buf);
+
+ return 0;
+ }
+
+ return error("failed to send '%s' to '%s'", command, cl_args.path);
+}
+
+/*
+ * Send an IPC command to an already-running server and ask it to
+ * shutdown. "send quit" is an async request and queues a shutdown
+ * event in the server, so we spin and wait here for it to actually
+ * shutdown to make the unit tests a little easier to write.
+ */
+static int client__stop_server(void)
+{
+ int ret;
+ time_t time_limit, now;
+ enum ipc_active_state s;
+
+ time(&time_limit);
+ time_limit += cl_args.max_wait_sec;
+
+ cl_args.token = "quit";
+
+ ret = client__send_ipc();
+ if (ret)
+ return ret;
+
+ for (;;) {
+ sleep_millisec(100);
+
+ s = ipc_get_active_state(cl_args.path);
+
+ if (s != IPC_STATE__LISTENING) {
+ /*
+ * The socket/pipe is gone and/or has stopped
+ * responding. Lets assume that the daemon
+ * process has exited too.
+ */
+ return 0;
+ }
+
+ time(&now);
+ if (now > time_limit)
+ return error(_("daemon has not shutdown yet"));
+ }
+}
+
+/*
+ * Send an IPC command followed by ballast to confirm that a large
+ * message can be sent and that the kernel or pkt-line layers will
+ * properly chunk it and that the daemon receives the entire message.
+ */
+static int do_sendbytes(int bytecount, char byte, const char *path,
+ const struct ipc_client_connect_options *options)
+{
+ struct strbuf buf_send = STRBUF_INIT;
+ struct strbuf buf_resp = STRBUF_INIT;
+
+ strbuf_addstr(&buf_send, "sendbytes ");
+ strbuf_addchars(&buf_send, byte, bytecount);
+
+ if (!ipc_client_send_command(path, options, buf_send.buf, &buf_resp)) {
+ strbuf_rtrim(&buf_resp);
+ printf("sent:%c%08d %s\n", byte, bytecount, buf_resp.buf);
+ fflush(stdout);
+ strbuf_release(&buf_send);
+ strbuf_release(&buf_resp);
+
+ return 0;
+ }
+
+ return error("client failed to sendbytes(%d, '%c') to '%s'",
+ bytecount, byte, path);
+}
+
+/*
+ * Send an IPC command with ballast to an already-running server daemon.
+ */
+static int client__sendbytes(void)
+{
+ struct ipc_client_connect_options options
+ = IPC_CLIENT_CONNECT_OPTIONS_INIT;
+
+ options.wait_if_busy = 1;
+ options.wait_if_not_found = 0;
+ options.uds_disallow_chdir = 0;
+
+ return do_sendbytes(cl_args.bytecount, cl_args.bytevalue, cl_args.path,
+ &options);
+}
+
+struct multiple_thread_data {
+ pthread_t pthread_id;
+ struct multiple_thread_data *next;
+ const char *path;
+ int bytecount;
+ int batchsize;
+ int sum_errors;
+ int sum_good;
+ char letter;
+};
+
+static void *multiple_thread_proc(void *_multiple_thread_data)
+{
+ struct multiple_thread_data *d = _multiple_thread_data;
+ int k;
+ struct ipc_client_connect_options options
+ = IPC_CLIENT_CONNECT_OPTIONS_INIT;
+
+ options.wait_if_busy = 1;
+ options.wait_if_not_found = 0;
+ /*
+ * A multi-threaded client should not be randomly calling chdir().
+ * The test will pass without this restriction because the test is
+ * not otherwise accessing the filesystem, but it makes us honest.
+ */
+ options.uds_disallow_chdir = 1;
+
+ trace2_thread_start("multiple");
+
+ for (k = 0; k < d->batchsize; k++) {
+ if (do_sendbytes(d->bytecount + k, d->letter, d->path, &options))
+ d->sum_errors++;
+ else
+ d->sum_good++;
+ }
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+/*
+ * Start a client-side thread pool. Each thread sends a series of
+ * IPC requests. Each request is on a new connection to the server.
+ */
+static int client__multiple(void)
+{
+ struct multiple_thread_data *list = NULL;
+ int k;
+ int sum_join_errors = 0;
+ int sum_thread_errors = 0;
+ int sum_good = 0;
+
+ for (k = 0; k < cl_args.nr_threads; k++) {
+ struct multiple_thread_data *d = xcalloc(1, sizeof(*d));
+ d->next = list;
+ d->path = cl_args.path;
+ d->bytecount = cl_args.bytecount + cl_args.batchsize*(k/26);
+ d->batchsize = cl_args.batchsize;
+ d->sum_errors = 0;
+ d->sum_good = 0;
+ d->letter = 'A' + (k % 26);
+
+ if (pthread_create(&d->pthread_id, NULL, multiple_thread_proc, d)) {
+ warning("failed to create thread[%d] skipping remainder", k);
+ free(d);
+ break;
+ }
+
+ list = d;
+ }
+
+ while (list) {
+ struct multiple_thread_data *d = list;
+
+ if (pthread_join(d->pthread_id, NULL))
+ sum_join_errors++;
+
+ sum_thread_errors += d->sum_errors;
+ sum_good += d->sum_good;
+
+ list = d->next;
+ free(d);
+ }
+
+ printf("client (good %d) (join %d), (errors %d)\n",
+ sum_good, sum_join_errors, sum_thread_errors);
+
+ return (sum_join_errors + sum_thread_errors) ? 1 : 0;
+}
+
+int cmd__simple_ipc(int argc, const char **argv)
+{
+ const char * const simple_ipc_usage[] = {
+ N_("test-helper simple-ipc is-active [<name>] [<options>]"),
+ N_("test-helper simple-ipc run-daemon [<name>] [<threads>]"),
+ N_("test-helper simple-ipc start-daemon [<name>] [<threads>] [<max-wait>]"),
+ N_("test-helper simple-ipc stop-daemon [<name>] [<max-wait>]"),
+ N_("test-helper simple-ipc send [<name>] [<token>]"),
+ N_("test-helper simple-ipc sendbytes [<name>] [<bytecount>] [<byte>]"),
+ N_("test-helper simple-ipc multiple [<name>] [<threads>] [<bytecount>] [<batchsize>]"),
+ NULL
+ };
+
+ const char *bytevalue = NULL;
+
+ struct option options[] = {
+#ifndef GIT_WINDOWS_NATIVE
+ OPT_STRING(0, "name", &cl_args.path, N_("name"), N_("name or pathname of unix domain socket")),
+#else
+ OPT_STRING(0, "name", &cl_args.path, N_("name"), N_("named-pipe name")),
+#endif
+ OPT_INTEGER(0, "threads", &cl_args.nr_threads, N_("number of threads in server thread pool")),
+ OPT_INTEGER(0, "max-wait", &cl_args.max_wait_sec, N_("seconds to wait for daemon to start or stop")),
+
+ OPT_INTEGER(0, "bytecount", &cl_args.bytecount, N_("number of bytes")),
+ OPT_INTEGER(0, "batchsize", &cl_args.batchsize, N_("number of requests per thread")),
+
+ OPT_STRING(0, "byte", &bytevalue, N_("byte"), N_("ballast character")),
+ OPT_STRING(0, "token", &cl_args.token, N_("token"), N_("command token to send to the server")),
+
+ OPT_END()
+ };
+
+ if (argc < 2)
+ usage_with_options(simple_ipc_usage, options);
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(simple_ipc_usage, options);
+
+ if (argc == 2 && !strcmp(argv[1], "SUPPORTS_SIMPLE_IPC"))
+ return 0;
+
+ cl_args.subcommand = argv[1];
+
+ argc--;
+ argv++;
+
+ argc = parse_options(argc, argv, NULL, options, simple_ipc_usage, 0);
+
+ if (cl_args.nr_threads < 1)
+ cl_args.nr_threads = 1;
+ if (cl_args.max_wait_sec < 0)
+ cl_args.max_wait_sec = 0;
+ if (cl_args.bytecount < 1)
+ cl_args.bytecount = 1;
+ if (cl_args.batchsize < 1)
+ cl_args.batchsize = 1;
+
+ if (bytevalue && *bytevalue)
+ cl_args.bytevalue = bytevalue[0];
+
+ /*
+ * Use '!!' on all dispatch functions to map from `error()` style
+ * (returns -1) style to `test_must_fail` style (expects 1). This
+ * makes shell error messages less confusing.
+ */
+
+ if (!strcmp(cl_args.subcommand, "is-active"))
+ return !!client__probe_server();
+
+ if (!strcmp(cl_args.subcommand, "run-daemon"))
+ return !!daemon__run_server();
+
+ if (!strcmp(cl_args.subcommand, "start-daemon"))
+ return !!daemon__start_server();
+
+ /*
+ * Client commands follow. Ensure a server is running before
+ * sending any data. This might be overkill, but then again
+ * this is a test harness.
+ */
+
+ if (!strcmp(cl_args.subcommand, "stop-daemon")) {
+ if (client__probe_server())
+ return 1;
+ return !!client__stop_server();
+ }
+
+ if (!strcmp(cl_args.subcommand, "send")) {
+ if (client__probe_server())
+ return 1;
+ return !!client__send_ipc();
+ }
+
+ if (!strcmp(cl_args.subcommand, "sendbytes")) {
+ if (client__probe_server())
+ return 1;
+ return !!client__sendbytes();
+ }
+
+ if (!strcmp(cl_args.subcommand, "multiple")) {
+ if (client__probe_server())
+ return 1;
+ return !!client__multiple();
+ }
+
+ die("Unhandled subcommand: '%s'", cl_args.subcommand);
+}
+#endif
diff --git a/t/helper/test-tool.c b/t/helper/test-tool.c
index f97cd9f..25c6a37 100644
--- a/t/helper/test-tool.c
+++ b/t/helper/test-tool.c
@@ -15,6 +15,7 @@ struct test_cmd {
static struct test_cmd cmds[] = {
{ "advise", cmd__advise_if_enabled },
+ { "bitmap", cmd__bitmap },
{ "bloom", cmd__bloom },
{ "chmtime", cmd__chmtime },
{ "config", cmd__config },
@@ -65,6 +66,7 @@ static struct test_cmd cmds[] = {
{ "sha1", cmd__sha1 },
{ "sha256", cmd__sha256 },
{ "sigchain", cmd__sigchain },
+ { "simple-ipc", cmd__simple_ipc },
{ "strcmp-offset", cmd__strcmp_offset },
{ "string-list", cmd__string_list },
{ "submodule-config", cmd__submodule_config },
diff --git a/t/helper/test-tool.h b/t/helper/test-tool.h
index 28072c0..f03c598 100644
--- a/t/helper/test-tool.h
+++ b/t/helper/test-tool.h
@@ -5,6 +5,7 @@
#include "git-compat-util.h"
int cmd__advise_if_enabled(int argc, const char **argv);
+int cmd__bitmap(int argc, const char **argv);
int cmd__bloom(int argc, const char **argv);
int cmd__chmtime(int argc, const char **argv);
int cmd__config(int argc, const char **argv);
@@ -55,6 +56,7 @@ int cmd__sha1(int argc, const char **argv);
int cmd__oid_array(int argc, const char **argv);
int cmd__sha256(int argc, const char **argv);
int cmd__sigchain(int argc, const char **argv);
+int cmd__simple_ipc(int argc, const char **argv);
int cmd__strcmp_offset(int argc, const char **argv);
int cmd__string_list(int argc, const char **argv);
int cmd__submodule_config(int argc, const char **argv);
diff --git a/t/perf/p5310-pack-bitmaps.sh b/t/perf/p5310-pack-bitmaps.sh
index b3e725f..452be01 100755
--- a/t/perf/p5310-pack-bitmaps.sh
+++ b/t/perf/p5310-pack-bitmaps.sh
@@ -15,6 +15,12 @@ test_expect_success 'setup bitmap config' '
git config pack.writebitmaps true
'
+# we need to create the tag up front such that it is covered by the repack and
+# thus by generated bitmaps.
+test_expect_success 'create tags' '
+ git tag --message="tag pointing to HEAD" perf-tag HEAD
+'
+
test_perf 'repack to disk' '
git repack -ad
'
@@ -43,6 +49,14 @@ test_perf 'rev-list (objects)' '
git rev-list --all --use-bitmap-index --objects >/dev/null
'
+test_perf 'rev-list with tag negated via --not --all (objects)' '
+ git rev-list perf-tag --not --all --use-bitmap-index --objects >/dev/null
+'
+
+test_perf 'rev-list with negative tag (objects)' '
+ git rev-list HEAD --not perf-tag --use-bitmap-index --objects >/dev/null
+'
+
test_perf 'rev-list count with blob:none' '
git rev-list --use-bitmap-index --count --objects --all \
--filter=blob:none >/dev/null
diff --git a/t/t0052-simple-ipc.sh b/t/t0052-simple-ipc.sh
new file mode 100755
index 0000000..ff98be3
--- /dev/null
+++ b/t/t0052-simple-ipc.sh
@@ -0,0 +1,122 @@
+#!/bin/sh
+
+test_description='simple command server'
+
+. ./test-lib.sh
+
+test-tool simple-ipc SUPPORTS_SIMPLE_IPC || {
+ skip_all='simple IPC not supported on this platform'
+ test_done
+}
+
+stop_simple_IPC_server () {
+ test-tool simple-ipc stop-daemon
+}
+
+test_expect_success 'start simple command server' '
+ test_atexit stop_simple_IPC_server &&
+ test-tool simple-ipc start-daemon --threads=8 &&
+ test-tool simple-ipc is-active
+'
+
+test_expect_success 'simple command server' '
+ test-tool simple-ipc send --token=ping >actual &&
+ echo pong >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'servers cannot share the same path' '
+ test_must_fail test-tool simple-ipc run-daemon &&
+ test-tool simple-ipc is-active
+'
+
+test_expect_success 'big response' '
+ test-tool simple-ipc send --token=big >actual &&
+ test_line_count -ge 10000 actual &&
+ grep -q "big: [0]*9999\$" actual
+'
+
+test_expect_success 'chunk response' '
+ test-tool simple-ipc send --token=chunk >actual &&
+ test_line_count -ge 10000 actual &&
+ grep -q "big: [0]*9999\$" actual
+'
+
+test_expect_success 'slow response' '
+ test-tool simple-ipc send --token=slow >actual &&
+ test_line_count -ge 100 actual &&
+ grep -q "big: [0]*99\$" actual
+'
+
+# Send an IPC with n=100,000 bytes of ballast. This should be large enough
+# to force both the kernel and the pkt-line layer to chunk the message to the
+# daemon and for the daemon to receive it in chunks.
+#
+test_expect_success 'sendbytes' '
+ test-tool simple-ipc sendbytes --bytecount=100000 --byte=A >actual &&
+ grep "sent:A00100000 rcvd:A00100000" actual
+'
+
+# Start a series of <threads> client threads that each make <batchsize>
+# IPC requests to the server. Each (<threads> * <batchsize>) request
+# will open a new connection to the server and randomly bind to a server
+# thread. Each client thread exits after completing its batch. So the
+# total number of live client threads will be smaller than the total.
+# Each request will send a message containing at least <bytecount> bytes
+# of ballast. (Responses are small.)
+#
+# The purpose here is to test threading in the server and responding to
+# many concurrent client requests (regardless of whether they come from
+# 1 client process or many). And to test that the server side of the
+# named pipe/socket is stable. (On Windows this means that the server
+# pipe is properly recycled.)
+#
+# On Windows it also lets us adjust the connection timeout in the
+# `ipc_client_send_command()`.
+#
+# Note it is easy to drive the system into failure by requesting an
+# insane number of threads on client or server and/or increasing the
+# per-thread batchsize or the per-request bytecount (ballast).
+# On Windows these failures look like "pipe is busy" errors.
+# So I've chosen fairly conservative values for now.
+#
+# We expect output of the form "sent:<letter><length> ..."
+# With terms (7, 19, 13) we expect:
+# <letter> in [A-G]
+# <length> in [19+0 .. 19+(13-1)]
+# and (7 * 13) successful responses.
+#
+test_expect_success 'stress test threads' '
+ test-tool simple-ipc multiple \
+ --threads=7 \
+ --bytecount=19 \
+ --batchsize=13 \
+ >actual &&
+ test_line_count = 92 actual &&
+ grep "good 91" actual &&
+ grep "sent:A" <actual >actual_a &&
+ cat >expect_a <<-EOF &&
+ sent:A00000019 rcvd:A00000019
+ sent:A00000020 rcvd:A00000020
+ sent:A00000021 rcvd:A00000021
+ sent:A00000022 rcvd:A00000022
+ sent:A00000023 rcvd:A00000023
+ sent:A00000024 rcvd:A00000024
+ sent:A00000025 rcvd:A00000025
+ sent:A00000026 rcvd:A00000026
+ sent:A00000027 rcvd:A00000027
+ sent:A00000028 rcvd:A00000028
+ sent:A00000029 rcvd:A00000029
+ sent:A00000030 rcvd:A00000030
+ sent:A00000031 rcvd:A00000031
+ EOF
+ test_cmp expect_a actual_a
+'
+
+test_expect_success 'stop-daemon works' '
+ test-tool simple-ipc stop-daemon &&
+ test_must_fail test-tool simple-ipc is-active &&
+ test_must_fail test-tool simple-ipc send --token=ping
+'
+
+test_done
diff --git a/t/t3206-range-diff.sh b/t/t3206-range-diff.sh
index 1b26c4c..e30bc48 100755
--- a/t/t3206-range-diff.sh
+++ b/t/t3206-range-diff.sh
@@ -521,6 +521,30 @@ test_expect_success 'format-patch --range-diff as commentary' '
grep "> 1: .* new message" 0001-*
'
+test_expect_success 'format-patch --range-diff reroll-count with a non-integer' '
+ git format-patch --range-diff=HEAD~1 -v2.9 HEAD~1 >actual &&
+ test_when_finished "rm v2.9-0001-*" &&
+ test_line_count = 1 actual &&
+ test_i18ngrep "^Range-diff:$" v2.9-0001-* &&
+ grep "> 1: .* new message" v2.9-0001-*
+'
+
+test_expect_success 'format-patch --range-diff reroll-count with a integer' '
+ git format-patch --range-diff=HEAD~1 -v2 HEAD~1 >actual &&
+ test_when_finished "rm v2-0001-*" &&
+ test_line_count = 1 actual &&
+ test_i18ngrep "^Range-diff ..* v1:$" v2-0001-* &&
+ grep "> 1: .* new message" v2-0001-*
+'
+
+test_expect_success 'format-patch --range-diff with v0' '
+ git format-patch --range-diff=HEAD~1 -v0 HEAD~1 >actual &&
+ test_when_finished "rm v0-0001-*" &&
+ test_line_count = 1 actual &&
+ test_i18ngrep "^Range-diff:$" v0-0001-* &&
+ grep "> 1: .* new message" v0-0001-*
+'
+
test_expect_success 'range-diff overrides diff.noprefix internally' '
git -c diff.noprefix=true range-diff HEAD^...
'
diff --git a/t/t3510-cherry-pick-sequence.sh b/t/t3510-cherry-pick-sequence.sh
index b76cb6d..49010aa 100755
--- a/t/t3510-cherry-pick-sequence.sh
+++ b/t/t3510-cherry-pick-sequence.sh
@@ -65,7 +65,7 @@ test_expect_success 'cherry-pick persists opts correctly' '
# gets interrupted, use a high-enough number that is larger
# than the number of parents of any commit we have created
mainline=4 &&
- test_expect_code 128 git cherry-pick -s -m $mainline --strategy=recursive -X patience -X ours initial..anotherpick &&
+ test_expect_code 128 git cherry-pick -s -m $mainline --strategy=recursive -X patience -X ours --edit initial..anotherpick &&
test_path_is_dir .git/sequencer &&
test_path_is_file .git/sequencer/head &&
test_path_is_file .git/sequencer/todo &&
@@ -84,6 +84,36 @@ test_expect_success 'cherry-pick persists opts correctly' '
ours
EOF
git config --file=.git/sequencer/opts --get-all options.strategy-option >actual &&
+ test_cmp expect actual &&
+ echo "true" >expect &&
+ git config --file=.git/sequencer/opts --get-all options.edit >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'revert persists opts correctly' '
+ pristine_detach initial &&
+ # to make sure that the session to revert a sequence
+ # gets interrupted, revert commits that are not in the history
+ # of HEAD.
+ test_expect_code 1 git revert -s --strategy=recursive -X patience -X ours --no-edit picked yetanotherpick &&
+ test_path_is_dir .git/sequencer &&
+ test_path_is_file .git/sequencer/head &&
+ test_path_is_file .git/sequencer/todo &&
+ test_path_is_file .git/sequencer/opts &&
+ echo "true" >expect &&
+ git config --file=.git/sequencer/opts --get-all options.signoff >actual &&
+ test_cmp expect actual &&
+ echo "recursive" >expect &&
+ git config --file=.git/sequencer/opts --get-all options.strategy >actual &&
+ test_cmp expect actual &&
+ cat >expect <<-\EOF &&
+ patience
+ ours
+ EOF
+ git config --file=.git/sequencer/opts --get-all options.strategy-option >actual &&
+ test_cmp expect actual &&
+ echo "false" >expect &&
+ git config --file=.git/sequencer/opts --get-all options.edit >actual &&
test_cmp expect actual
'
diff --git a/t/t4014-format-patch.sh b/t/t4014-format-patch.sh
index cdd3154..712d4b5 100755
--- a/t/t4014-format-patch.sh
+++ b/t/t4014-format-patch.sh
@@ -386,6 +386,30 @@ test_expect_success 'reroll count (-v)' '
! grep -v "^Subject: \[PATCH v4 [0-3]/3\] " subjects
'
+test_expect_success 'reroll count (-v) with a fractional number' '
+ rm -fr patches &&
+ git format-patch -o patches --cover-letter -v4.4 main..side >list &&
+ ! grep -v "^patches/v4.4-000[0-3]-" list &&
+ sed -n -e "/^Subject: /p" $(cat list) >subjects &&
+ ! grep -v "^Subject: \[PATCH v4.4 [0-3]/3\] " subjects
+'
+
+test_expect_success 'reroll (-v) count with a non number' '
+ rm -fr patches &&
+ git format-patch -o patches --cover-letter -v4rev2 main..side >list &&
+ ! grep -v "^patches/v4rev2-000[0-3]-" list &&
+ sed -n -e "/^Subject: /p" $(cat list) >subjects &&
+ ! grep -v "^Subject: \[PATCH v4rev2 [0-3]/3\] " subjects
+'
+
+test_expect_success 'reroll (-v) count with a non-pathname character' '
+ rm -fr patches &&
+ git format-patch -o patches --cover-letter -v4---..././../--1/.2// main..side >list &&
+ ! grep -v "patches/v4-\.-\.-\.-1-\.2-000[0-3]-" list &&
+ sed -n -e "/^Subject: /p" $(cat list) >subjects &&
+ ! grep -v "^Subject: \[PATCH v4---\.\.\./\./\.\./--1/\.2// [0-3]/3\] " subjects
+'
+
check_threading () {
expect="$1" &&
shift &&
@@ -2255,6 +2279,16 @@ test_expect_success 'interdiff: reroll-count' '
test_i18ngrep "^Interdiff ..* v1:$" v2-0000-cover-letter.patch
'
+test_expect_success 'interdiff: reroll-count with a non-integer' '
+ git format-patch --cover-letter --interdiff=boop~2 -v2.2 -1 boop &&
+ test_i18ngrep "^Interdiff:$" v2.2-0000-cover-letter.patch
+'
+
+test_expect_success 'interdiff: reroll-count with a integer' '
+ git format-patch --cover-letter --interdiff=boop~2 -v2 -1 boop &&
+ test_i18ngrep "^Interdiff ..* v1:$" v2-0000-cover-letter.patch
+'
+
test_expect_success 'interdiff: solo-patch' '
cat >expect <<-\EOF &&
+fleep
diff --git a/t/t5310-pack-bitmaps.sh b/t/t5310-pack-bitmaps.sh
index 40b9f63..f53efc8 100755
--- a/t/t5310-pack-bitmaps.sh
+++ b/t/t5310-pack-bitmaps.sh
@@ -554,4 +554,42 @@ test_expect_success 'fetch with bitmaps can reuse old base' '
)
'
+test_expect_success 'pack.preferBitmapTips' '
+ git init repo &&
+ test_when_finished "rm -fr repo" &&
+ (
+ cd repo &&
+
+ # create enough commits that not all are receive bitmap
+ # coverage even if they are all at the tip of some reference.
+ test_commit_bulk --message="%s" 103 &&
+
+ git rev-list HEAD >commits.raw &&
+ sort <commits.raw >commits &&
+
+ git log --format="create refs/tags/%s %H" HEAD >refs &&
+ git update-ref --stdin <refs &&
+
+ git repack -adb &&
+ test-tool bitmap list-commits | sort >bitmaps &&
+
+ # remember which commits did not receive bitmaps
+ comm -13 bitmaps commits >before &&
+ test_file_not_empty before &&
+
+ # mark the commits which did not receive bitmaps as preferred,
+ # and generate the bitmap again
+ perl -pe "s{^}{create refs/tags/include/$. }" <before |
+ git update-ref --stdin &&
+ git -c pack.preferBitmapTips=refs/tags/include repack -adb &&
+
+ # finally, check that the commit(s) without bitmap coverage
+ # are not the same ones as before
+ test-tool bitmap list-commits | sort >bitmaps &&
+ comm -13 bitmaps commits >after &&
+
+ ! test_cmp before after
+ )
+'
+
test_done
diff --git a/t/t5319-multi-pack-index.sh b/t/t5319-multi-pack-index.sh
index b4afab1..5641d15 100755
--- a/t/t5319-multi-pack-index.sh
+++ b/t/t5319-multi-pack-index.sh
@@ -234,6 +234,48 @@ test_expect_success 'warn on improper hash version' '
)
'
+test_expect_success 'midx picks objects from preferred pack' '
+ test_when_finished rm -rf preferred.git &&
+ git init --bare preferred.git &&
+ (
+ cd preferred.git &&
+
+ a=$(echo "a" | git hash-object -w --stdin) &&
+ b=$(echo "b" | git hash-object -w --stdin) &&
+ c=$(echo "c" | git hash-object -w --stdin) &&
+
+ # Set up two packs, duplicating the object "B" at different
+ # offsets.
+ #
+ # Note that the "BC" pack (the one we choose as preferred) sorts
+ # lexically after the "AB" pack, meaning that omitting the
+ # --preferred-pack argument would cause this test to fail (since
+ # the MIDX code would select the copy of "b" in the "AB" pack).
+ git pack-objects objects/pack/test-AB <<-EOF &&
+ $a
+ $b
+ EOF
+ bc=$(git pack-objects objects/pack/test-BC <<-EOF
+ $b
+ $c
+ EOF
+ ) &&
+
+ git multi-pack-index --object-dir=objects \
+ write --preferred-pack=test-BC-$bc.idx 2>err &&
+ test_must_be_empty err &&
+
+ test-tool read-midx --show-objects objects >out &&
+
+ ofs=$(git show-index <objects/pack/test-BC-$bc.idx | grep $b |
+ cut -d" " -f1) &&
+ printf "%s %s\tobjects/pack/test-BC-%s.pack\n" \
+ "$b" "$ofs" "$bc" >expect &&
+ grep ^$b out >actual &&
+
+ test_cmp expect actual
+ )
+'
test_expect_success 'verify multi-pack-index success' '
git multi-pack-index verify --object-dir=$objdir
diff --git a/t/t5601-clone.sh b/t/t5601-clone.sh
index e7e6c08..329ae59 100755
--- a/t/t5601-clone.sh
+++ b/t/t5601-clone.sh
@@ -759,6 +759,15 @@ test_expect_success 'partial clone using HTTP' '
partial_clone "$HTTPD_DOCUMENT_ROOT_PATH/server" "$HTTPD_URL/smart/server"
'
+test_expect_success 'reject cloning shallow repository using HTTP' '
+ test_when_finished "rm -rf repo" &&
+ git clone --bare --no-local --depth=1 src "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
+ test_must_fail git clone --reject-shallow $HTTPD_URL/smart/repo.git repo 2>err &&
+ test_i18ngrep -e "source repository is shallow, reject to clone." err &&
+
+ git clone --no-reject-shallow $HTTPD_URL/smart/repo.git repo
+'
+
# DO NOT add non-httpd-specific tests here, because the last part of this
# test script is only executed when httpd is available and enabled.
diff --git a/t/t5606-clone-options.sh b/t/t5606-clone-options.sh
index 428b0aa..3a595c0 100755
--- a/t/t5606-clone-options.sh
+++ b/t/t5606-clone-options.sh
@@ -11,7 +11,8 @@ test_expect_success 'setup' '
mkdir parent &&
(cd parent && git init &&
echo one >file && git add file &&
- git commit -m one)
+ git commit -m one) &&
+ git clone --depth=1 --no-local parent shallow-repo
'
@@ -45,6 +46,30 @@ test_expect_success 'disallows --bare with --separate-git-dir' '
'
+test_expect_success 'reject cloning shallow repository' '
+ test_when_finished "rm -rf repo" &&
+ test_must_fail git clone --reject-shallow shallow-repo out 2>err &&
+ test_i18ngrep -e "source repository is shallow, reject to clone." err &&
+
+ git clone --no-reject-shallow shallow-repo repo
+'
+
+test_expect_success 'reject cloning non-local shallow repository' '
+ test_when_finished "rm -rf repo" &&
+ test_must_fail git clone --reject-shallow --no-local shallow-repo out 2>err &&
+ test_i18ngrep -e "source repository is shallow, reject to clone." err &&
+
+ git clone --no-reject-shallow --no-local shallow-repo repo
+'
+
+test_expect_success 'succeed cloning normal repository' '
+ test_when_finished "rm -rf chilad1 child2 child3 child4 " &&
+ git clone --reject-shallow parent child1 &&
+ git clone --reject-shallow --no-local parent child2 &&
+ git clone --no-reject-shallow parent child3 &&
+ git clone --no-reject-shallow --no-local parent child4
+'
+
test_expect_success 'uses "origin" for default remote name' '
git clone parent clone-default-origin &&
diff --git a/t/t5611-clone-config.sh b/t/t5611-clone-config.sh
index 9f555b8..f8625f9 100755
--- a/t/t5611-clone-config.sh
+++ b/t/t5611-clone-config.sh
@@ -95,6 +95,31 @@ test_expect_success 'clone -c remote.<remote>.fetch=<refspec> --origin=<name>' '
test_cmp expect actual
'
+test_expect_success 'set up shallow repository' '
+ git clone --depth=1 --no-local . shallow-repo
+'
+
+test_expect_success 'clone.rejectshallow=true should reject cloning shallow repo' '
+ test_when_finished "rm -rf out" &&
+ test_must_fail git -c clone.rejectshallow=true clone --no-local shallow-repo out 2>err &&
+ test_i18ngrep -e "source repository is shallow, reject to clone." err &&
+
+ git -c clone.rejectshallow=false clone --no-local shallow-repo out
+'
+
+test_expect_success 'option --[no-]reject-shallow override clone.rejectshallow config' '
+ test_when_finished "rm -rf out" &&
+ test_must_fail git -c clone.rejectshallow=false clone --reject-shallow --no-local shallow-repo out 2>err &&
+ test_i18ngrep -e "source repository is shallow, reject to clone." err &&
+
+ git -c clone.rejectshallow=true clone --no-reject-shallow --no-local shallow-repo out
+'
+
+test_expect_success 'clone.rejectshallow=true should succeed cloning normal repo' '
+ test_when_finished "rm -rf out" &&
+ git -c clone.rejectshallow=true clone --no-local . out
+'
+
test_expect_success MINGW 'clone -c core.hideDotFiles' '
test_commit attributes .gitattributes "" &&
rm -rf child &&
diff --git a/t/t6300-for-each-ref.sh b/t/t6300-for-each-ref.sh
index cac7f44..2e7c32d 100755
--- a/t/t6300-for-each-ref.sh
+++ b/t/t6300-for-each-ref.sh
@@ -1134,4 +1134,14 @@ test_expect_success 'for-each-ref --ignore-case works on multiple sort keys' '
test_cmp expect actual
'
+test_expect_success 'for-each-ref reports broken tags' '
+ git tag -m "good tag" broken-tag-good HEAD &&
+ git cat-file tag broken-tag-good >good &&
+ sed s/commit/blob/ <good >bad &&
+ bad=$(git hash-object -w -t tag bad) &&
+ git update-ref refs/tags/broken-tag-bad $bad &&
+ test_must_fail git for-each-ref --format="%(*objectname)" \
+ refs/tags/broken-tag-*
+'
+
test_done
diff --git a/t/t6423-merge-rename-directories.sh b/t/t6423-merge-rename-directories.sh
index 5d3b711..379aac0 100755
--- a/t/t6423-merge-rename-directories.sh
+++ b/t/t6423-merge-rename-directories.sh
@@ -4895,6 +4895,77 @@ test_expect_merge_algorithm failure success '12f: Trivial directory resolve, cac
)
'
+# Testcase 12g, Testcase with two kinds of "relevant" renames
+# Commit O: somefile_O, subdir/{a_O,b_O}
+# Commit A: somefile_A, subdir/{a_O,b_O,c_A}
+# Commit B: newfile_B, newdir/{a_B,b_B}
+# Expected: newfile_{merged}, newdir/{a_B,b_B,c_A}
+
+test_setup_12g () {
+ test_create_repo 12g &&
+ (
+ cd 12g &&
+
+ mkdir -p subdir &&
+ test_write_lines upon a time there was a >somefile &&
+ test_write_lines 1 2 3 4 5 6 7 8 9 10 >subdir/a &&
+ test_write_lines one two three four five six >subdir/b &&
+ git add . &&
+ test_tick &&
+ git commit -m "O" &&
+
+ git branch O &&
+ git branch A &&
+ git branch B &&
+
+ git switch A &&
+ test_write_lines once upon a time there was a >somefile &&
+ > subdir/c &&
+ git add somefile subdir/c &&
+ test_tick &&
+ git commit -m "A" &&
+
+ git checkout B &&
+ git mv somefile newfile &&
+ git mv subdir newdir &&
+ echo repo >>newfile &&
+ test_write_lines 1 2 3 4 5 6 7 8 9 10 11 >newdir/a &&
+ test_write_lines one two three four five six seven >newdir/b &&
+ git add newfile newdir &&
+ test_tick &&
+ git commit -m "B"
+ )
+}
+
+test_expect_success '12g: Testcase with two kinds of "relevant" renames' '
+ test_setup_12g &&
+ (
+ cd 12g &&
+
+ git checkout A^0 &&
+
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
+
+ test_write_lines once upon a time there was a repo >expect &&
+ test_cmp expect newfile &&
+
+ git ls-files -s >out &&
+ test_line_count = 4 out &&
+
+ git rev-parse >actual \
+ HEAD:newdir/a HEAD:newdir/b HEAD:newdir/c &&
+ git rev-parse >expect \
+ B:newdir/a B:newdir/b A:subdir/c &&
+ test_cmp expect actual &&
+
+ test_must_fail git rev-parse HEAD:subdir/a &&
+ test_must_fail git rev-parse HEAD:subdir/b &&
+ test_must_fail git rev-parse HEAD:subdir/c &&
+ test_path_is_missing subdir/ &&
+ test_path_is_file newdir/c
+ )
+'
+
###########################################################################
# SECTION 13: Checking informational and conflict messages
#
diff --git a/t/t7502-commit-porcelain.sh b/t/t7502-commit-porcelain.sh
index 6396897..38a532d 100755
--- a/t/t7502-commit-porcelain.sh
+++ b/t/t7502-commit-porcelain.sh
@@ -38,6 +38,16 @@ check_summary_oneline() {
test_cmp exp act
}
+trailer_commit_base () {
+ echo "fun" >>file &&
+ git add file &&
+ git commit -s --trailer "Signed-off-by=C1 E1 " \
+ --trailer "Helped-by:C2 E2 " \
+ --trailer "Reported-by=C3 E3" \
+ --trailer "Mentored-by:C4 E4" \
+ -m "hello"
+}
+
test_expect_success 'output summary format' '
echo new >file1 &&
@@ -154,6 +164,308 @@ test_expect_success 'sign off' '
'
+test_expect_success 'commit --trailer with "="' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ EOF
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "replace" as ifexists' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ Helped-by: C3 E3
+ EOF
+ git -c trailer.ifexists="replace" \
+ commit --trailer "Mentored-by: C4 E4" \
+ --trailer "Helped-by: C3 E3" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "add" as ifexists' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ EOF
+ git -c trailer.ifexists="add" \
+ commit --trailer "Reported-by: C3 E3" \
+ --trailer "Mentored-by: C4 E4" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "donothing" as ifexists' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ Reviewed-by: C6 E6
+ EOF
+ git -c trailer.ifexists="donothing" \
+ commit --trailer "Mentored-by: C5 E5" \
+ --trailer "Reviewed-by: C6 E6" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "addIfDifferent" as ifexists' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ Mentored-by: C5 E5
+ EOF
+ git -c trailer.ifexists="addIfDifferent" \
+ commit --trailer "Reported-by: C3 E3" \
+ --trailer "Mentored-by: C5 E5" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "addIfDifferentNeighbor" as ifexists' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ Reported-by: C3 E3
+ EOF
+ git -c trailer.ifexists="addIfDifferentNeighbor" \
+ commit --trailer "Mentored-by: C4 E4" \
+ --trailer "Reported-by: C3 E3" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "end" as where' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ EOF
+ git -c trailer.where="end" \
+ commit --trailer "Reported-by: C3 E3" \
+ --trailer "Mentored-by: C4 E4" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "start" as where' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C1 E1
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ EOF
+ git -c trailer.where="start" \
+ commit --trailer "Signed-off-by: C O Mitter <committer@example.com>" \
+ --trailer "Signed-off-by: C1 E1" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "after" as where' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ Mentored-by: C5 E5
+ EOF
+ git -c trailer.where="after" \
+ commit --trailer "Mentored-by: C4 E4" \
+ --trailer "Mentored-by: C5 E5" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "before" as where' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C2 E2
+ Mentored-by: C3 E3
+ Mentored-by: C4 E4
+ EOF
+ git -c trailer.where="before" \
+ commit --trailer "Mentored-by: C3 E3" \
+ --trailer "Mentored-by: C2 E2" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "donothing" as ifmissing' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ Helped-by: C5 E5
+ EOF
+ git -c trailer.ifmissing="donothing" \
+ commit --trailer "Helped-by: C5 E5" \
+ --trailer "Based-by: C6 E6" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and "add" as ifmissing' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Reported-by: C3 E3
+ Mentored-by: C4 E4
+ Helped-by: C5 E5
+ Based-by: C6 E6
+ EOF
+ git -c trailer.ifmissing="add" \
+ commit --trailer "Helped-by: C5 E5" \
+ --trailer "Based-by: C6 E6" \
+ --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c ack.key ' '
+ echo "fun" >>file1 &&
+ git add file1 &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Acked-by: Peff
+ EOF
+ git -c trailer.ack.key="Acked-by" \
+ commit --trailer "ack = Peff" -m "hello" &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and ":=#" as separators' '
+ echo "fun" >>file1 &&
+ git add file1 &&
+ cat >expected <<-\EOF &&
+ I hate bug
+
+ Bug #42
+ EOF
+ git -c trailer.separators=":=#" \
+ -c trailer.bug.key="Bug #" \
+ commit --trailer "bug = 42" -m "I hate bug" &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'commit --trailer with -c and command' '
+ trailer_commit_base &&
+ cat >expected <<-\EOF &&
+ hello
+
+ Signed-off-by: C O Mitter <committer@example.com>
+ Signed-off-by: C1 E1
+ Helped-by: C2 E2
+ Mentored-by: C4 E4
+ Reported-by: A U Thor <author@example.com>
+ EOF
+ git -c trailer.report.key="Reported-by: " \
+ -c trailer.report.ifexists="replace" \
+ -c trailer.report.command="NAME=\"\$ARG\"; test -n \"\$NAME\" && \
+ git log --author=\"\$NAME\" -1 --format=\"format:%aN <%aE>\" || true" \
+ commit --trailer "report = author" --amend &&
+ git cat-file commit HEAD >commit.msg &&
+ sed -e "1,/^\$/d" commit.msg >actual &&
+ test_cmp expected actual
+'
+
test_expect_success 'multiple -m' '
>negative &&
diff --git a/transport.c b/transport.c
index 1c4ab67..ef66e73 100644
--- a/transport.c
+++ b/transport.c
@@ -236,6 +236,9 @@ static int set_git_option(struct git_transport_options *opts,
list_objects_filter_die_if_populated(&opts->filter_options);
parse_list_objects_filter(&opts->filter_options, value);
return 0;
+ } else if (!strcmp(name, TRANS_OPT_REJECT_SHALLOW)) {
+ opts->reject_shallow = !!value;
+ return 0;
}
return 1;
}
@@ -370,6 +373,7 @@ static int fetch_refs_via_pack(struct transport *transport,
args.stateless_rpc = transport->stateless_rpc;
args.server_options = transport->server_options;
args.negotiation_tips = data->options.negotiation_tips;
+ args.reject_shallow_remote = transport->smart_options->reject_shallow;
if (!data->got_remote_heads) {
int i;
@@ -1452,6 +1456,8 @@ int transport_disconnect(struct transport *transport)
int ret = 0;
if (transport->vtable->disconnect)
ret = transport->vtable->disconnect(transport);
+ if (transport->got_remote_refs)
+ free_refs((void *)transport->remote_refs);
free(transport);
return ret;
}
diff --git a/transport.h b/transport.h
index 24e1579..4d5db0a 100644
--- a/transport.h
+++ b/transport.h
@@ -14,6 +14,7 @@ struct git_transport_options {
unsigned check_self_contained_and_connected : 1;
unsigned self_contained_and_connected : 1;
unsigned update_shallow : 1;
+ unsigned reject_shallow : 1;
unsigned deepen_relative : 1;
/* see documentation of corresponding flag in fetch-pack.h */
@@ -194,6 +195,9 @@ void transport_check_allowed(const char *type);
/* Aggressively fetch annotated tags if possible */
#define TRANS_OPT_FOLLOWTAGS "followtags"
+/* Reject shallow repo transport */
+#define TRANS_OPT_REJECT_SHALLOW "rejectshallow"
+
/* Accept refs that may update .git/shallow without --depth */
#define TRANS_OPT_UPDATE_SHALLOW "updateshallow"
diff --git a/unix-socket.c b/unix-socket.c
index 19ed48b..e0be1ba 100644
--- a/unix-socket.c
+++ b/unix-socket.c
@@ -1,13 +1,7 @@
#include "cache.h"
#include "unix-socket.h"
-static int unix_stream_socket(void)
-{
- int fd = socket(AF_UNIX, SOCK_STREAM, 0);
- if (fd < 0)
- die_errno("unable to create socket");
- return fd;
-}
+#define DEFAULT_UNIX_STREAM_LISTEN_BACKLOG (5)
static int chdir_len(const char *orig, int len)
{
@@ -36,16 +30,23 @@ static void unix_sockaddr_cleanup(struct unix_sockaddr_context *ctx)
}
static int unix_sockaddr_init(struct sockaddr_un *sa, const char *path,
- struct unix_sockaddr_context *ctx)
+ struct unix_sockaddr_context *ctx,
+ int disallow_chdir)
{
int size = strlen(path) + 1;
ctx->orig_dir = NULL;
if (size > sizeof(sa->sun_path)) {
- const char *slash = find_last_dir_sep(path);
+ const char *slash;
const char *dir;
struct strbuf cwd = STRBUF_INIT;
+ if (disallow_chdir) {
+ errno = ENAMETOOLONG;
+ return -1;
+ }
+
+ slash = find_last_dir_sep(path);
if (!slash) {
errno = ENAMETOOLONG;
return -1;
@@ -71,15 +72,18 @@ static int unix_sockaddr_init(struct sockaddr_un *sa, const char *path,
return 0;
}
-int unix_stream_connect(const char *path)
+int unix_stream_connect(const char *path, int disallow_chdir)
{
- int fd, saved_errno;
+ int fd = -1, saved_errno;
struct sockaddr_un sa;
struct unix_sockaddr_context ctx;
- if (unix_sockaddr_init(&sa, path, &ctx) < 0)
+ if (unix_sockaddr_init(&sa, path, &ctx, disallow_chdir) < 0)
return -1;
- fd = unix_stream_socket();
+ fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (fd < 0)
+ goto fail;
+
if (connect(fd, (struct sockaddr *)&sa, sizeof(sa)) < 0)
goto fail;
unix_sockaddr_cleanup(&ctx);
@@ -87,28 +91,36 @@ int unix_stream_connect(const char *path)
fail:
saved_errno = errno;
+ if (fd != -1)
+ close(fd);
unix_sockaddr_cleanup(&ctx);
- close(fd);
errno = saved_errno;
return -1;
}
-int unix_stream_listen(const char *path)
+int unix_stream_listen(const char *path,
+ const struct unix_stream_listen_opts *opts)
{
- int fd, saved_errno;
+ int fd = -1, saved_errno;
+ int backlog;
struct sockaddr_un sa;
struct unix_sockaddr_context ctx;
unlink(path);
- if (unix_sockaddr_init(&sa, path, &ctx) < 0)
+ if (unix_sockaddr_init(&sa, path, &ctx, opts->disallow_chdir) < 0)
return -1;
- fd = unix_stream_socket();
+ fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (fd < 0)
+ goto fail;
if (bind(fd, (struct sockaddr *)&sa, sizeof(sa)) < 0)
goto fail;
- if (listen(fd, 5) < 0)
+ backlog = opts->listen_backlog_size;
+ if (backlog <= 0)
+ backlog = DEFAULT_UNIX_STREAM_LISTEN_BACKLOG;
+ if (listen(fd, backlog) < 0)
goto fail;
unix_sockaddr_cleanup(&ctx);
@@ -116,8 +128,9 @@ int unix_stream_listen(const char *path)
fail:
saved_errno = errno;
+ if (fd != -1)
+ close(fd);
unix_sockaddr_cleanup(&ctx);
- close(fd);
errno = saved_errno;
return -1;
}
diff --git a/unix-socket.h b/unix-socket.h
index e271aee..8542cdd 100644
--- a/unix-socket.h
+++ b/unix-socket.h
@@ -1,7 +1,15 @@
#ifndef UNIX_SOCKET_H
#define UNIX_SOCKET_H
-int unix_stream_connect(const char *path);
-int unix_stream_listen(const char *path);
+struct unix_stream_listen_opts {
+ int listen_backlog_size;
+ unsigned int disallow_chdir:1;
+};
+
+#define UNIX_STREAM_LISTEN_OPTS_INIT { 0 }
+
+int unix_stream_connect(const char *path, int disallow_chdir);
+int unix_stream_listen(const char *path,
+ const struct unix_stream_listen_opts *opts);
#endif /* UNIX_SOCKET_H */
diff --git a/unix-stream-server.c b/unix-stream-server.c
new file mode 100644
index 0000000..efa2a20
--- /dev/null
+++ b/unix-stream-server.c
@@ -0,0 +1,125 @@
+#include "cache.h"
+#include "lockfile.h"
+#include "unix-socket.h"
+#include "unix-stream-server.h"
+
+#define DEFAULT_LOCK_TIMEOUT (100)
+
+/*
+ * Try to connect to a unix domain socket at `path` (if it exists) and
+ * see if there is a server listening.
+ *
+ * We don't know if the socket exists, whether a server died and
+ * failed to cleanup, or whether we have a live server listening, so
+ * we "poke" it.
+ *
+ * We immediately hangup without sending/receiving any data because we
+ * don't know anything about the protocol spoken and don't want to
+ * block while writing/reading data. It is sufficient to just know
+ * that someone is listening.
+ */
+static int is_another_server_alive(const char *path,
+ const struct unix_stream_listen_opts *opts)
+{
+ int fd = unix_stream_connect(path, opts->disallow_chdir);
+ if (fd >= 0) {
+ close(fd);
+ return 1;
+ }
+
+ return 0;
+}
+
+int unix_ss_create(const char *path,
+ const struct unix_stream_listen_opts *opts,
+ long timeout_ms,
+ struct unix_ss_socket **new_server_socket)
+{
+ struct lock_file lock = LOCK_INIT;
+ int fd_socket;
+ struct unix_ss_socket *server_socket;
+
+ *new_server_socket = NULL;
+
+ if (timeout_ms < 0)
+ timeout_ms = DEFAULT_LOCK_TIMEOUT;
+
+ /*
+ * Create a lock at "<path>.lock" if we can.
+ */
+ if (hold_lock_file_for_update_timeout(&lock, path, 0, timeout_ms) < 0)
+ return -1;
+
+ /*
+ * If another server is listening on "<path>" give up. We do not
+ * want to create a socket and steal future connections from them.
+ */
+ if (is_another_server_alive(path, opts)) {
+ rollback_lock_file(&lock);
+ errno = EADDRINUSE;
+ return -2;
+ }
+
+ /*
+ * Create and bind to a Unix domain socket at "<path>".
+ */
+ fd_socket = unix_stream_listen(path, opts);
+ if (fd_socket < 0) {
+ int saved_errno = errno;
+ rollback_lock_file(&lock);
+ errno = saved_errno;
+ return -1;
+ }
+
+ server_socket = xcalloc(1, sizeof(*server_socket));
+ server_socket->path_socket = strdup(path);
+ server_socket->fd_socket = fd_socket;
+ lstat(path, &server_socket->st_socket);
+
+ *new_server_socket = server_socket;
+
+ /*
+ * Always rollback (just delete) "<path>.lock" because we already created
+ * "<path>" as a socket and do not want to commit_lock to do the atomic
+ * rename trick.
+ */
+ rollback_lock_file(&lock);
+
+ return 0;
+}
+
+void unix_ss_free(struct unix_ss_socket *server_socket)
+{
+ if (!server_socket)
+ return;
+
+ if (server_socket->fd_socket >= 0) {
+ if (!unix_ss_was_stolen(server_socket))
+ unlink(server_socket->path_socket);
+ close(server_socket->fd_socket);
+ }
+
+ free(server_socket->path_socket);
+ free(server_socket);
+}
+
+int unix_ss_was_stolen(struct unix_ss_socket *server_socket)
+{
+ struct stat st_now;
+
+ if (!server_socket)
+ return 0;
+
+ if (lstat(server_socket->path_socket, &st_now) == -1)
+ return 1;
+
+ if (st_now.st_ino != server_socket->st_socket.st_ino)
+ return 1;
+ if (st_now.st_dev != server_socket->st_socket.st_dev)
+ return 1;
+
+ if (!S_ISSOCK(st_now.st_mode))
+ return 1;
+
+ return 0;
+}
diff --git a/unix-stream-server.h b/unix-stream-server.h
new file mode 100644
index 0000000..ae2712b
--- /dev/null
+++ b/unix-stream-server.h
@@ -0,0 +1,33 @@
+#ifndef UNIX_STREAM_SERVER_H
+#define UNIX_STREAM_SERVER_H
+
+#include "unix-socket.h"
+
+struct unix_ss_socket {
+ char *path_socket;
+ struct stat st_socket;
+ int fd_socket;
+};
+
+/*
+ * Create a Unix Domain Socket at the given path under the protection
+ * of a '.lock' lockfile.
+ *
+ * Returns 0 on success, -1 on error, -2 if socket is in use.
+ */
+int unix_ss_create(const char *path,
+ const struct unix_stream_listen_opts *opts,
+ long timeout_ms,
+ struct unix_ss_socket **server_socket);
+
+/*
+ * Close and delete the socket.
+ */
+void unix_ss_free(struct unix_ss_socket *server_socket);
+
+/*
+ * Return 1 if the inode of the pathname to our socket changes.
+ */
+int unix_ss_was_stolen(struct unix_ss_socket *server_socket);
+
+#endif /* UNIX_STREAM_SERVER_H */
diff --git a/unpack-trees.c b/unpack-trees.c
index 29029f3..8a1afbc 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -16,6 +16,7 @@
#include "fsmonitor.h"
#include "object-store.h"
#include "promisor-remote.h"
+#include "entry.h"
/*
* Error messages expected by scripts out of plumbing commands such as