From dc4393cba333c92873671a37e72efaf41bfa4697 Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Mon, 19 Sep 2005 19:50:09 -0700 Subject: GIT 0.99.7a Signed-off-by: Junio C Hamano diff --git a/Makefile b/Makefile index f83c495..93ceb6b 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ # DEFINES += -DUSE_STDEV -GIT_VERSION = 0.99.7 +GIT_VERSION = 0.99.7a CFLAGS = -g -O2 -Wall ALL_CFLAGS = $(CFLAGS) $(PLATFORM_DEFINES) $(DEFINES) diff --git a/debian/changelog b/debian/changelog index 128513a..614494c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +git-core (0.99.7a-0) unstable; urgency=low + + * GIT 0.99.7a + + -- Junio C Hamano Mon, 19 Sep 2005 19:29:07 -0700 + git-core (0.99.7-0) unstable; urgency=low * GIT 0.99.7 -- cgit v0.10.2-6-g49f6 From 089f20dce19711d34f5383ee289a9b1fbd3f3307 Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Wed, 21 Sep 2005 00:58:32 -0700 Subject: Clarify dual license status of subprocess.py file. The author of the file we stole from Python 2.4 distribution, Peter Astrand , OK'ed to add this at the end of the licensing terms section of the file: Use of this file within git is permitted under GPLv2. Signed-off-by: Junio C Hamano diff --git a/compat/subprocess.py b/compat/subprocess.py index d115e87..93323df 100644 --- a/compat/subprocess.py +++ b/compat/subprocess.py @@ -24,6 +24,9 @@ # OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, # NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION # WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +# +# Use of this file within git is permitted under GPLv2. +# r"""subprocess - Subprocesses with accessible I/O streams -- cgit v0.10.2-6-g49f6 From 990f856a62a24bfd56bac1f5e4581381369e4ede Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Thu, 22 Sep 2005 21:53:33 -0700 Subject: GIT 0.99.7b Signed-off-by: Junio C Hamano diff --git a/Makefile b/Makefile index 93ceb6b..ac13ecb 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ # DEFINES += -DUSE_STDEV -GIT_VERSION = 0.99.7a +GIT_VERSION = 0.99.7b CFLAGS = -g -O2 -Wall ALL_CFLAGS = $(CFLAGS) $(PLATFORM_DEFINES) $(DEFINES) diff --git a/debian/changelog b/debian/changelog index 614494c..e6ea7b9 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +git-core (0.99.7b-0) unstable; urgency=low + + * GIT 0.99.7b + + -- Junio C Hamano Thu, 22 Sep 2005 21:46:44 -0700 + git-core (0.99.7a-0) unstable; urgency=low * GIT 0.99.7a -- cgit v0.10.2-6-g49f6 From 25a67553ea8b5c8c750adf21f39a901009750553 Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Sat, 24 Sep 2005 11:19:07 -0700 Subject: Really ignore generated distribution material. Signed-off-by: Junio C Hamano (cherry picked from e558e33b7bdb4a7c633bedd1606f7dd7ef912933 commit) diff --git a/.gitignore b/.gitignore index 92deccb..938669f 100644 --- a/.gitignore +++ b/.gitignore @@ -96,7 +96,8 @@ git-verify-pack git-verify-tag git-whatchanged git-write-tree -#*.tar.gz -#*.dsc -#*.deb -#git-core.spec +git-core-*/?* +*.tar.gz +*.dsc +*.deb +git-core.spec -- cgit v0.10.2-6-g49f6 From 00d8bbd3c4bba72a6dfd48c2c0c9cbaa000f13c2 Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Sat, 24 Sep 2005 11:38:43 -0700 Subject: GIT 0.99.7c Contains the following post-0.99.7b fixes: - rsh.c string termination fix by H. Peter Anvin - further fetch fixes by Sergey Vlasov - diff-tree documentation by Robert Watson. - 'git diff --cached' synonymous to 'git diff --cached HEAD'. - subprocess.py licensing status clarification. Signed-off-by: Junio C Hamano diff --git a/Makefile b/Makefile index ac13ecb..e90f646 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ # DEFINES += -DUSE_STDEV -GIT_VERSION = 0.99.7b +GIT_VERSION = 0.99.7c CFLAGS = -g -O2 -Wall ALL_CFLAGS = $(CFLAGS) $(PLATFORM_DEFINES) $(DEFINES) diff --git a/debian/changelog b/debian/changelog index e6ea7b9..ef6156a7 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +git-core (0.99.7c-0) unstable; urgency=low + + * GIT 0.99.7c + + -- Junio C Hamano Sat, 24 Sep 2005 11:33:36 -0700 + git-core (0.99.7b-0) unstable; urgency=low * GIT 0.99.7b -- cgit v0.10.2-6-g49f6 From d5bc7eecbbb0b9f6122708bf5cd62f78ebdaafd8 Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Sun, 25 Sep 2005 00:36:26 -0700 Subject: GIT v0.99.7d Signed-off-by: Junio C Hamano diff --git a/Makefile b/Makefile index e90f646..cfde69c 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ # DEFINES += -DUSE_STDEV -GIT_VERSION = 0.99.7c +GIT_VERSION = 0.99.7d CFLAGS = -g -O2 -Wall ALL_CFLAGS = $(CFLAGS) $(PLATFORM_DEFINES) $(DEFINES) diff --git a/debian/changelog b/debian/changelog index ef6156a7..a48c889 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +git-core (0.99.7d-0) unstable; urgency=low + + * GIT 0.99.7d + + -- Junio C Hamano Sun, 25 Sep 2005 00:40:46 -0700 + git-core (0.99.7c-0) unstable; urgency=low * GIT 0.99.7c -- cgit v0.10.2-6-g49f6 From b52d9f9ba7276b9a9516ba359849f771b354d65b Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Sat, 5 Nov 2005 13:08:18 -0800 Subject: test: t4102-apply-rename fails with strict umask. We checked the result of patch application for full permission bits, when the only thing we cared about was to make sure the executable bit was correctly set. Noticed by Peter Baumann. Signed-off-by: Junio C Hamano diff --git a/t/t4102-apply-rename.sh b/t/t4102-apply-rename.sh index 530cc4d..0401d7b 100755 --- a/t/t4102-apply-rename.sh +++ b/t/t4102-apply-rename.sh @@ -32,6 +32,6 @@ test_expect_success apply \ 'git-apply --index --stat --summary --apply test-patch' test_expect_success validate \ - 'test -f bar && ls -l bar | grep "^-..x..x..x"' + 'test -f bar && ls -l bar | grep "^-..x......"' test_done -- cgit v0.10.2-6-g49f6 From b748421aaae3fbb8e0e6466ba532bcec1c71e660 Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Sun, 6 Nov 2005 00:21:45 -0800 Subject: git-format-patch: silly typo fix. Signed-off-by: Junio C Hamano diff --git a/git-format-patch.sh b/git-format-patch.sh index 5b93ff8..b43ba39 100755 --- a/git-format-patch.sh +++ b/git-format-patch.sh @@ -101,7 +101,7 @@ case "$#,$1" in ;; 1,?*..) # single "rev1.." should mean "rev1..HEAD" - set x "$1"HEAD" + set x "$1"HEAD shift ;; 1,*) -- cgit v0.10.2-6-g49f6 From 4607166d0735d13e0ee4ffe8df0c2fc899957852 Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Sat, 5 Nov 2005 22:26:52 -0800 Subject: Documentation: pull/clone ref mapping clarification. Josef Weidendorfer points out that git-clone documentation does not mention the initial copying of remote branch heads into corresponding local branches. Also clarify the purpose of the ref mappings description in the "remotes" file and recommended workflow. Signed-off-by: Junio C Hamano diff --git a/Documentation/git-clone.txt b/Documentation/git-clone.txt index dd92cde..cbd83f3 100644 --- a/Documentation/git-clone.txt +++ b/Documentation/git-clone.txt @@ -12,7 +12,21 @@ SYNOPSIS DESCRIPTION ----------- -Clones a repository into a newly created directory. +Clones a repository into a newly created directory. All remote +branch heads are copied under `$GIT_DIR/refs/heads/`, except +that the remote `master` is also copied to `origin` branch. + +In addition, `$GIT_DIR/remotes/origin` file is set up to have +this line: + + Pull: master:origin + +This is to help the typical workflow of working off of the +remote `master` branch. Every time `git pull` without argument +is run, the progress on the remote `master` branch is tracked by +copying it into the local `origin` branch, and merged into the +branch you are currently working on. + OPTIONS ------- @@ -28,9 +42,10 @@ OPTIONS --shared:: -s:: When the repository to clone is on the local machine, - instead of using hard links automatically setup + instead of using hard links, automatically setup .git/objects/info/alternatives to share the objects - with the source repository + with the source repository. The resulting repository + starts out without any object of its own. --quiet:: -q:: @@ -49,14 +64,13 @@ OPTIONS :: The (possibly remote) repository to clone from. It can - be an "rsync://host/dir" URL, an "http://host/dir" URL, - or [:]/dir notation that is used by 'git-clone-pack'. - Currently http transport is not supported. + be any URL git-fetch supports. :: The name of a new directory to be cloned into. It is an error to specify an existing directory. + Author ------ Written by Linus Torvalds diff --git a/Documentation/pull-fetch-param.txt b/Documentation/pull-fetch-param.txt index 57e9ddf..5c2888e 100644 --- a/Documentation/pull-fetch-param.txt +++ b/Documentation/pull-fetch-param.txt @@ -82,14 +82,19 @@ must know this is the expected usage pattern for a branch. [NOTE] You never do your own development on branches that appear on the right hand side of a colon on `Pull:` lines; -they are to be updated by `git-fetch`. The corollary is that -a local branch should be introduced and named on a -right-hand-side if you intend to do development derived from -that branch. -This leads to the common `Pull: master:origin` mapping of a -remote `master` branch to a local `origin` branch, which -is then merged to a local development branch, again typically -named `master`. +they are to be updated by `git-fetch`. If you intend to do +development derived from a remote branch `B`, have a `Pull:` +line to track it (i.e. `Pull: B:remote-B`), and have a separate +branch `my-B` to do your development on top of it. The latter +is created by `git branch my-B remote-B` (or its equivalent `git +checkout -b my-B remote-B`). Run `git fetch` to keep track of +the progress of the remote side, and when you see something new +on the remote branch, merge it into your development branch with +`git pull . remote-B`, while you are on `my-B` branch. +The common `Pull: master:origin` mapping of a remote `master` +branch to a local `origin` branch, which is then merged to a +ocal development branch, again typically named `master`, is made +when you run `git clone` for you to follow this pattern. + [NOTE] There is a difference between listing multiple -- cgit v0.10.2-6-g49f6 From 9e5d2b40967059dd5f35d02fba323751ef22ac4e Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Sun, 6 Nov 2005 00:09:59 -0800 Subject: git-fetch: fail if specified refspec does not match remote. 'git-fetch remote no-such-ref' succeeded without fetching any ref from the remote. Detect such case and report an error. Note that this makes 'git-fetch remote master master' to fail, because the remote branch 'master' matches the first refspec, and the second refspec is left unmatched, which is detected by the error checking logic. This is somewhat unintuitive, but giving the same refspec more than once to git-fetch is useless in any case so it should not be much of a problem. I'd accept a patch to change this if somebody cares enough, though. Signed-off-by: Junio C Hamano diff --git a/fetch-pack.c b/fetch-pack.c index cb21715..6565982 100644 --- a/fetch-pack.c +++ b/fetch-pack.c @@ -458,5 +458,19 @@ int main(int argc, char **argv) close(fd[0]); close(fd[1]); finish_connect(pid); + + if (!ret && nr_heads) { + /* If the heads to pull were given, we should have + * consumed all of them by matching the remote. + * Otherwise, 'git-fetch remote no-such-ref' would + * silently succeed without issuing an error. + */ + for (i = 0; i < nr_heads; i++) + if (heads[i] && heads[i][0]) { + error("no such remote ref %s", heads[i]); + ret = 1; + } + } + return ret; } -- cgit v0.10.2-6-g49f6 From 12aa7456c937f758df9aafcc0123395ed7660fca Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Fri, 4 Nov 2005 23:50:09 -0800 Subject: Simplify CFLAGS/DEFINES in Makefile I think the original intention was to make CFLAGS overridable from the make command line, but somehow we ended up accumulating conditional makefile sections that wrongly appends values to CFLAGs. These assignments do not work when the user actually override them from the make command line! DEFINES are handled the same way; it was seemingly overridable, but the makefile sections had assignments, which meant overriding it from the command line broke things. This simplifies things by limiting the internal futzing to ALL_CFLAGS, and by removing DEFINES altogether. Overriding CFLAGS from the command line should start working with this change. Signed-off-by: Junio C Hamano diff --git a/Makefile b/Makefile index 6c01dc2..6064672 100644 --- a/Makefile +++ b/Makefile @@ -37,25 +37,21 @@ # 1461501637330902918203684832716283019655932542976 hashes do not give you # sufficient guarantee that no collisions between objects will ever happen. -# DEFINES += -DCOLLISION_CHECK - # Define USE_NSEC below if you want git to care about sub-second file mtimes # and ctimes. Note that you need recent glibc (at least 2.2.4) for this, and # it will BREAK YOUR LOCAL DIFFS! show-diff and anything using it will likely # randomly break unless your underlying filesystem supports those sub-second # times (my ext3 doesn't). -# DEFINES += -DUSE_NSEC - # Define USE_STDEV below if you want git to care about the underlying device # change being considered an inode change from the update-cache perspective. -# DEFINES += -DUSE_STDEV - GIT_VERSION = 0.99.9.GIT +# CFLAGS is for the users to override from the command line. + CFLAGS = -g -O2 -Wall -ALL_CFLAGS = $(CFLAGS) $(PLATFORM_DEFINES) $(DEFINES) +ALL_CFLAGS = $(CFLAGS) prefix = $(HOME) bindir = $(prefix)/bin @@ -194,19 +190,19 @@ ifeq ($(uname_S),SunOS) NO_STRCASESTR = YesPlease INSTALL = ginstall TAR = gtar - PLATFORM_DEFINES += -D__EXTENSIONS__ + ALL_CFLAGS += -D__EXTENSIONS__ endif ifeq ($(uname_O),Cygwin) NO_STRCASESTR = YesPlease NEEDS_LIBICONV = YesPlease NO_IPV6 = YesPlease X = .exe - PLATFORM_DEFINES += -DUSE_SYMLINK_HEAD=0 + ALL_CFLAGS += -DUSE_SYMLINK_HEAD=0 endif ifeq ($(uname_S),OpenBSD) NO_STRCASESTR = YesPlease NEEDS_LIBICONV = YesPlease - PLATFORM_DEFINES += -I/usr/local/include -L/usr/local/lib + ALL_CFLAGS += -I/usr/local/include -L/usr/local/lib endif ifneq (,$(findstring arm,$(uname_M))) ARM_SHA1 = YesPlease @@ -217,7 +213,7 @@ endif ifndef NO_CURL ifdef CURLDIR # This is still problematic -- gcc does not want -R. - CFLAGS += -I$(CURLDIR)/include + ALL_CFLAGS += -I$(CURLDIR)/include CURL_LIBCURL = -L$(CURLDIR)/lib -R$(CURLDIR)/lib -lcurl else CURL_LIBCURL = -lcurl @@ -240,13 +236,13 @@ ifndef NO_OPENSSL OPENSSL_LIBSSL = -lssl ifdef OPENSSLDIR # Again this may be problematic -- gcc does not always want -R. - CFLAGS += -I$(OPENSSLDIR)/include + ALL_CFLAGS += -I$(OPENSSLDIR)/include OPENSSL_LINK = -L$(OPENSSLDIR)/lib -R$(OPENSSLDIR)/lib else OPENSSL_LINK = endif else - DEFINES += -DNO_OPENSSL + ALL_CFLAGS += -DNO_OPENSSL MOZILLA_SHA1 = 1 OPENSSL_LIBSSL = endif @@ -258,7 +254,7 @@ endif ifdef NEEDS_LIBICONV ifdef ICONVDIR # Again this may be problematic -- gcc does not always want -R. - CFLAGS += -I$(ICONVDIR)/include + ALL_CFLAGS += -I$(ICONVDIR)/include ICONV_LINK = -L$(ICONVDIR)/lib -R$(ICONVDIR)/lib else ICONV_LINK = @@ -276,15 +272,15 @@ ifdef NEEDS_NSL SIMPLE_LIB += -lnsl endif ifdef NO_STRCASESTR - DEFINES += -Dstrcasestr=gitstrcasestr -DNO_STRCASESTR=1 + ALL_CFLAGS += -Dstrcasestr=gitstrcasestr -DNO_STRCASESTR=1 LIB_OBJS += compat/strcasestr.o endif ifdef NO_MMAP - DEFINES += -Dmmap=gitfakemmap -Dmunmap=gitfakemunmap -DNO_MMAP + ALL_CFLAGS += -Dmmap=gitfakemmap -Dmunmap=gitfakemunmap -DNO_MMAP LIB_OBJS += compat/mmap.o endif ifdef NO_IPV6 - DEFINES += -DNO_IPV6 -Dsockaddr_storage=sockaddr_in + ALL_CFLAGS += -DNO_IPV6 -Dsockaddr_storage=sockaddr_in endif ifdef PPC_SHA1 @@ -305,7 +301,7 @@ endif endif endif -DEFINES += -DSHA1_HEADER=$(call shellquote,$(SHA1_HEADER)) +ALL_CFLAGS += -DSHA1_HEADER=$(call shellquote,$(SHA1_HEADER)) SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) \ $(patsubst %.perl,%,$(SCRIPT_PERL)) \ -- cgit v0.10.2-6-g49f6 From d071e8dbb6849d40038db2d98a601395fe7b0b73 Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Sat, 5 Nov 2005 02:39:42 -0800 Subject: Package split: Debian. As discussed on the list, split the foreign SCM interoperability packages and documentation from the git-core binary package. Signed-off-by: Junio C Hamano diff --git a/Makefile b/Makefile index 6064672..76d33b4 100644 --- a/Makefile +++ b/Makefile @@ -450,8 +450,8 @@ clean: rm -f git-core.spec *.pyc *.pyo rm -rf $(GIT_TARNAME) rm -f $(GIT_TARNAME).tar.gz git-core_$(GIT_VERSION)-*.tar.gz - rm -f git-core_$(GIT_VERSION)-*.deb git-core_$(GIT_VERSION)-*.dsc - rm -f git-tk_$(GIT_VERSION)-*.deb + rm -f git-core_$(GIT_VERSION)-*.dsc + rm -f git-*_$(GIT_VERSION)-*.deb $(MAKE) -C Documentation/ clean $(MAKE) -C templates clean $(MAKE) -C t/ clean diff --git a/debian/changelog b/debian/changelog index 5fd31b7..9ea3139 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,10 @@ +git-core (0.99.9-1) unstable; urgency=low + + * Split the git-core binary package into core, doc, and foreign SCM + interoperability modules. + + -- Junio C Hamano Sat, 5 Nov 2005 11:18:13 -0800 + git-core (0.99.9-0) unstable; urgency=low * GIT 0.99.9 diff --git a/debian/control b/debian/control index 1f45f93..f03ca49 100644 --- a/debian/control +++ b/debian/control @@ -8,7 +8,7 @@ Standards-Version: 3.6.1 Package: git-core Architecture: any Depends: ${shlibs:Depends}, ${perl:Depends}, ${misc:Depends}, rcs -Recommends: rsync, curl, ssh, libmail-sendmail-perl, libemail-valid-perl, libsvn-core-perl (>= 1.2.1), python (>= 2.4.0), less +Recommends: rsync, curl, ssh, python (>= 2.4.0), less Suggests: cogito, patch Conflicts: git, cogito (<< 0.13) Description: The git content addressable filesystem @@ -18,9 +18,46 @@ Description: The git content addressable filesystem enables human beings to work with the database in a manner to a degree similar to other SCM tools. +Package: git-doc +Architecture: all +Depends: ${shlibs:Depends}, ${misc:Depends}, git-core, tk8.4 +Description: The git content addressable filesystem, Documentation + This package contains documentation for GIT. + Package: git-tk Architecture: all Depends: ${shlibs:Depends}, ${misc:Depends}, git-core, tk8.4 Description: The git content addressable filesystem, GUI add-on This package contains 'gitk', the git revision tree visualizer. +Package: git-svn +Architecture: all +Depends: ${shlibs:Depends}, ${misc:Depends}, ${perl:Depends}, git-core, libsvn-core-perl (>= 1.2.1) +Suggests: subversion +Description: The git content addressable filesystem, SVN interoperability + This package contains 'git-svnimport', to import development history from + SVN repositories. + +Package: git-arch +Architecture: all +Depends: ${shlibs:Depends}, ${misc:Depends}, ${perl:Depends}, git-core +Suggests: tla, bazaar +Description: The git content addressable filesystem, GNUArch interoperability + This package contains 'git-archimport', to import development history from + GNUArch repositories. + +Package: git-cvs +Architecture: all +Depends: ${shlibs:Depends}, ${misc:Depends}, ${perl:Depends}, git-core +Suggests: cvs +Description: The git content addressable filesystem, CVS interoperability + This package contains 'git-cvsimport', to import development history from + CVS repositories. + +Package: git-email +Architecture: all +Depends: ${shlibs:Depends}, ${misc:Depends}, git-core, libmail-sendmail-perl, libemail-valid-perl +Description: The git content addressable filesystem, e-mail add-on + This package contains 'git-send-email', to send a series of patch e-mails. + + diff --git a/debian/git-arch.files b/debian/git-arch.files new file mode 100644 index 0000000..1ad4656 --- /dev/null +++ b/debian/git-arch.files @@ -0,0 +1,2 @@ +/usr/bin/git-archimport +/usr/share/doc/git-core/git-archimport.* diff --git a/debian/git-cvs.files b/debian/git-cvs.files new file mode 100644 index 0000000..8bf5090 --- /dev/null +++ b/debian/git-cvs.files @@ -0,0 +1,2 @@ +/usr/bin/git-cvsimport +/usr/share/doc/git-core/git-cvsimport.* diff --git a/debian/git-doc.files b/debian/git-doc.files new file mode 100644 index 0000000..0762f37 --- /dev/null +++ b/debian/git-doc.files @@ -0,0 +1,7 @@ +/usr/share/doc/git-core/git-*.txt +/usr/share/doc/git-core/git-*.html +/usr/share/doc/git-core/*/*.html +/usr/share/doc/git-core/*/*.txt + + + diff --git a/debian/git-email.files b/debian/git-email.files new file mode 100644 index 0000000..236754c --- /dev/null +++ b/debian/git-email.files @@ -0,0 +1,2 @@ +/usr/bin/git-send-email +/usr/share/doc/git-core/git-send-email.* diff --git a/debian/git-svn.files b/debian/git-svn.files new file mode 100644 index 0000000..317b12a --- /dev/null +++ b/debian/git-svn.files @@ -0,0 +1,2 @@ +/usr/bin/git-svnimport +/usr/share/doc/git-core/git-svnimport.* diff --git a/debian/rules b/debian/rules index 568d430..9f95146 100755 --- a/debian/rules +++ b/debian/rules @@ -41,7 +41,7 @@ MAN_DESTDIR := $(DESTDIR)/$(MANDIR) build: debian/build-stamp debian/build-stamp: dh_testdir - $(MAKE) prefix=$(PREFIX) PYTHON_PATH=/usr/bin/python2.4 all doc test + $(MAKE) prefix=$(PREFIX) PYTHON_PATH=/usr/bin/python2.4 all test doc touch debian/build-stamp debian-clean: @@ -65,7 +65,12 @@ install: build mkdir -p $(DOC_DESTDIR) find $(DOC) '(' -name '*.txt' -o -name '*.html' ')' -exec install {} $(DOC_DESTDIR) ';' + dh_movefiles -p git-arch + dh_movefiles -p git-cvs + dh_movefiles -p git-svn dh_movefiles -p git-tk + dh_movefiles -p git-email + dh_movefiles -p git-doc dh_movefiles -p git-core find debian/tmp -type d -o -print | sed -e 's/^/? /' -- cgit v0.10.2-6-g49f6 From 54c2533da0228244bbde7f94abfdb0a7fbb859c1 Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Sat, 5 Nov 2005 12:52:56 -0800 Subject: Install asciidoc sources as well. Signed-off-by: Junio C Hamano diff --git a/Documentation/install-webdoc.sh b/Documentation/install-webdoc.sh index d593ab9..50638c7 100755 --- a/Documentation/install-webdoc.sh +++ b/Documentation/install-webdoc.sh @@ -2,7 +2,7 @@ T="$1" -for h in *.html howto/*.txt howto/*.html +for h in *.html *.txt howto/*.txt howto/*.html do diff -u -I'Last updated [0-9][0-9]-[A-Z][a-z][a-z]-' "$T/$h" "$h" || { echo >&2 "# install $h $T/$h" @@ -12,7 +12,7 @@ do } done strip_leading=`echo "$T/" | sed -e 's|.|.|g'` -for th in "$T"/*.html "$T"/howto/*.txt "$T"/howto/*.html +for th in "$T"/*.html "$T"/*.txt "$T"/howto/*.txt "$T"/howto/*.html do h=`expr "$th" : "$strip_leading"'\(.*\)'` case "$h" in -- cgit v0.10.2-6-g49f6 From 9eba845f2088c33de3efe04492b9e33114c5fb52 Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Sat, 5 Nov 2005 12:54:25 -0800 Subject: Further Debian split fixes. The doc installation was flattened, breaking links to howto/. Silly cut&paste error made git-doc depend on tk8.4. Doh. Move most of the documentation (except manuals) to git-doc. Signed-off-by: Junio C Hamano diff --git a/debian/control b/debian/control index f03ca49..8cc527e 100644 --- a/debian/control +++ b/debian/control @@ -20,7 +20,7 @@ Description: The git content addressable filesystem Package: git-doc Architecture: all -Depends: ${shlibs:Depends}, ${misc:Depends}, git-core, tk8.4 +Depends: ${shlibs:Depends}, ${misc:Depends}, git-core Description: The git content addressable filesystem, Documentation This package contains documentation for GIT. diff --git a/debian/git-doc.files b/debian/git-doc.files index 0762f37..567f5d7 100644 --- a/debian/git-doc.files +++ b/debian/git-doc.files @@ -1,5 +1,5 @@ -/usr/share/doc/git-core/git-*.txt -/usr/share/doc/git-core/git-*.html +/usr/share/doc/git-core/*.txt +/usr/share/doc/git-core/*.html /usr/share/doc/git-core/*/*.html /usr/share/doc/git-core/*/*.txt diff --git a/debian/rules b/debian/rules index 9f95146..4ab221c 100755 --- a/debian/rules +++ b/debian/rules @@ -62,8 +62,8 @@ install: build make DESTDIR=$(DESTDIR) prefix=$(PREFIX) mandir=$(MANDIR) \ install install-doc - mkdir -p $(DOC_DESTDIR) - find $(DOC) '(' -name '*.txt' -o -name '*.html' ')' -exec install {} $(DOC_DESTDIR) ';' + make -C Documentation DESTDIR=$(DESTDIR) prefix=$(PREFIX) \ + WEBDOC_DEST=$(DOC_DESTDIR) install-webdoc dh_movefiles -p git-arch dh_movefiles -p git-cvs -- cgit v0.10.2-6-g49f6 From c0a2ed1b491deaf8160a311d6b39bae7e266248c Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Sat, 5 Nov 2005 12:54:40 -0800 Subject: Debian: test build. Update version number in changelog to match the 0.99.9.GIT version number, to allow building private deb from wip. Signed-off-by: Junio C Hamano diff --git a/debian/changelog b/debian/changelog index 9ea3139..0c6e34b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +git-core (0.99.9.GIT-0) unstable; urgency=low + + * Test Build. + + -- Junio C Hamano Sat, 5 Nov 2005 11:18:13 -0800 + git-core (0.99.9-1) unstable; urgency=low * Split the git-core binary package into core, doc, and foreign SCM -- cgit v0.10.2-6-g49f6 From 58e60dd203362ecb9fdea765dcc2eb573892dbaf Mon Sep 17 00:00:00 2001 From: Nick Hengeveld Date: Wed, 2 Nov 2005 11:19:24 -0800 Subject: Add support for pushing to a remote repository using HTTP/DAV Add support for pushing to a remote repository using HTTP/DAV Signed-off-by: Nick Hengeveld Signed-off-by: Junio C Hamano diff --git a/Documentation/git-http-push.txt b/Documentation/git-http-push.txt new file mode 100644 index 0000000..c7066d6 --- /dev/null +++ b/Documentation/git-http-push.txt @@ -0,0 +1,89 @@ +git-http-push(1) +================ + +NAME +---- +git-http-push - Push missing objects using HTTP/DAV. + + +SYNOPSIS +-------- +'git-http-push' [--complete] [--force] [--verbose] [...] + +DESCRIPTION +----------- +Sends missing objects to remote repository, and updates the +remote branch. + + +OPTIONS +------- +--complete:: + Do not assume that the remote repository is complete in its + current state, and verify all objects in the entire local + ref's history exist in the remote repository. + +--force:: + Usually, the command refuses to update a remote ref that + is not an ancestor of the local ref used to overwrite it. + This flag disables the check. What this means is that + the remote repository can lose commits; use it with + care. + +--verbose:: + Report the list of objects being walked locally and the + list of objects successfully sent to the remote repository. + +...: + The remote refs to update. + + +Specifying the Refs +------------------- + +A '' specification can be either a single pattern, or a pair +of such patterns separated by a colon ":" (this means that a ref name +cannot have a colon in it). A single pattern '' is just a +shorthand for ':'. + +Each pattern pair consists of the source side (before the colon) +and the destination side (after the colon). The ref to be +pushed is determined by finding a match that matches the source +side, and where it is pushed is determined by using the +destination side. + + - It is an error if does not match exactly one of the + local refs. + + - If does not match any remote ref, either + + * it has to start with "refs/"; is used as the + destination literally in this case. + + * == and the ref that matched the must not + exist in the set of remote refs; the ref matched + locally is used as the name of the destination. + +Without '--force', the ref is stored at the remote only if + does not exist, or is a proper subset (i.e. an +ancestor) of . This check, known as "fast forward check", +is performed in order to avoid accidentally overwriting the +remote ref and lose other peoples' commits from there. + +With '--force', the fast forward check is disabled for all refs. + +Optionally, a parameter can be prefixed with a plus '+' sign +to disable the fast-forward check only on that ref. + + +Author +------ +Written by Nick Hengeveld + +Documentation +-------------- +Documentation by Nick Hengeveld + +GIT +--- +Part of the gitlink:git[7] suite diff --git a/Makefile b/Makefile index 6c01dc2..f4fe31e 100644 --- a/Makefile +++ b/Makefile @@ -6,12 +6,16 @@ # Define NO_OPENSSL environment variable if you do not have OpenSSL. You will # miss out git-rev-list --merge-order. This also implies MOZILLA_SHA1. # -# Define NO_CURL if you do not have curl installed. git-http-pull is not -# built, and you cannot use http:// and https:// transports. +# Define NO_CURL if you do not have curl installed. git-http-pull and +# git-http-push are not built, and you cannot use http:// and https:// +# transports. # # Define CURLDIR=/foo/bar if your curl header and library files are in # /foo/bar/include and /foo/bar/lib directories. # +# Define NO_EXPAT if you do not have expat installed. git-http-push is +# not built, and you cannot push using http:// and https:// transports. +# # Define NO_STRCASESTR if you don't have strcasestr. # # Define PPC_SHA1 environment variable when running make to make use of @@ -223,6 +227,10 @@ ifndef NO_CURL CURL_LIBCURL = -lcurl endif PROGRAMS += git-http-fetch$X + ifndef NO_EXPAT + EXPAT_LIBEXPAT = -lexpat + PROGRAMS += git-http-push$X + endif endif ifndef SHELL_PATH @@ -375,6 +383,7 @@ git-ssh-pull$X: rsh.o fetch.o git-ssh-push$X: rsh.o git-http-fetch$X: LIBS += $(CURL_LIBCURL) +git-http-push$X: LIBS += $(CURL_LIBCURL) $(EXPAT_LIBEXPAT) git-rev-list$X: LIBS += $(OPENSSL_LIBSSL) init-db.o: init-db.c diff --git a/http-push.c b/http-push.c new file mode 100644 index 0000000..e85f1c1 --- /dev/null +++ b/http-push.c @@ -0,0 +1,1625 @@ +#include "cache.h" +#include "commit.h" +#include "pack.h" +#include "fetch.h" +#include "tag.h" +#include "blob.h" + +#include +#include +#include "expat.h" + +static const char http_push_usage[] = +"git-http-push [--complete] [--force] [--verbose] [...]\n"; + +#if LIBCURL_VERSION_NUM >= 0x070908 +#define USE_CURL_MULTI +#define DEFAULT_MAX_REQUESTS 5 +#endif + +#if LIBCURL_VERSION_NUM < 0x070704 +#define curl_global_cleanup() do { /* nothing */ } while(0) +#endif +#if LIBCURL_VERSION_NUM < 0x070800 +#define curl_global_init(a) do { /* nothing */ } while(0) +#endif + +#if LIBCURL_VERSION_NUM < 0x070c04 +#define NO_CURL_EASY_DUPHANDLE +#endif + +#define RANGE_HEADER_SIZE 30 + +/* DAV method names and request body templates */ +#define DAV_LOCK "LOCK" +#define DAV_MKCOL "MKCOL" +#define DAV_MOVE "MOVE" +#define DAV_PROPFIND "PROPFIND" +#define DAV_PUT "PUT" +#define DAV_UNLOCK "UNLOCK" +#define PROPFIND_REQUEST "\n\n\n\n\n" +#define LOCK_REQUEST "\n\n\n\n\nmailto:%s\n\n" + +static int active_requests = 0; +static int data_received; +static int pushing = 0; +static int aborted = 0; + +#ifdef USE_CURL_MULTI +static int max_requests = -1; +static CURLM *curlm; +#endif +#ifndef NO_CURL_EASY_DUPHANDLE +static CURL *curl_default; +#endif +static struct curl_slist *no_pragma_header; +static struct curl_slist *default_headers; +static char curl_errorstr[CURL_ERROR_SIZE]; +static char *lock_token = NULL; + +static int push_verbosely = 0; +static int push_all = 0; +static int force_all = 0; + +struct buffer +{ + size_t posn; + size_t size; + void *buffer; +}; + +struct repo +{ + char *url; + struct packed_git *packs; +}; + +static struct repo *remote = NULL; + +enum transfer_state { + NEED_CHECK, + RUN_HEAD, + NEED_PUSH, + RUN_MKCOL, + RUN_PUT, + RUN_MOVE, + ABORTED, + COMPLETE, +}; + +struct transfer_request +{ + unsigned char sha1[20]; + char *url; + char *dest; + char *lock_token; + struct curl_slist *headers; + struct buffer buffer; + char filename[PATH_MAX]; + char tmpfile[PATH_MAX]; + enum transfer_state state; + CURLcode curl_result; + char errorstr[CURL_ERROR_SIZE]; + long http_code; + unsigned char real_sha1[20]; + SHA_CTX c; + z_stream stream; + int zret; + int rename; + struct active_request_slot *slot; + struct transfer_request *next; +}; + +struct active_request_slot +{ + CURL *curl; + FILE *local; + int in_use; + int done; + CURLcode curl_result; + long http_code; + struct active_request_slot *next; +}; + +static struct transfer_request *request_queue_head = NULL; +static struct active_request_slot *active_queue_head = NULL; + +static int curl_ssl_verify = -1; +static char *ssl_cert = NULL; +#if LIBCURL_VERSION_NUM >= 0x070902 +static char *ssl_key = NULL; +#endif +#if LIBCURL_VERSION_NUM >= 0x070908 +static char *ssl_capath = NULL; +#endif +static char *ssl_cainfo = NULL; +static long curl_low_speed_limit = -1; +static long curl_low_speed_time = -1; + +struct lockprop +{ + int supported_lock; + int lock_entry; + int lock_scope; + int lock_type; + int lock_exclusive; + int lock_exclusive_write; +}; + +static int http_options(const char *var, const char *value) +{ + if (!strcmp("http.sslverify", var)) { + if (curl_ssl_verify == -1) { + curl_ssl_verify = git_config_bool(var, value); + } + return 0; + } + + if (!strcmp("http.sslcert", var)) { + if (ssl_cert == NULL) { + ssl_cert = xmalloc(strlen(value)+1); + strcpy(ssl_cert, value); + } + return 0; + } +#if LIBCURL_VERSION_NUM >= 0x070902 + if (!strcmp("http.sslkey", var)) { + if (ssl_key == NULL) { + ssl_key = xmalloc(strlen(value)+1); + strcpy(ssl_key, value); + } + return 0; + } +#endif +#if LIBCURL_VERSION_NUM >= 0x070908 + if (!strcmp("http.sslcapath", var)) { + if (ssl_capath == NULL) { + ssl_capath = xmalloc(strlen(value)+1); + strcpy(ssl_capath, value); + } + return 0; + } +#endif + if (!strcmp("http.sslcainfo", var)) { + if (ssl_cainfo == NULL) { + ssl_cainfo = xmalloc(strlen(value)+1); + strcpy(ssl_cainfo, value); + } + return 0; + } + +#ifdef USE_CURL_MULTI + if (!strcmp("http.maxrequests", var)) { + if (max_requests == -1) + max_requests = git_config_int(var, value); + return 0; + } +#endif + + if (!strcmp("http.lowspeedlimit", var)) { + if (curl_low_speed_limit == -1) + curl_low_speed_limit = (long)git_config_int(var, value); + return 0; + } + if (!strcmp("http.lowspeedtime", var)) { + if (curl_low_speed_time == -1) + curl_low_speed_time = (long)git_config_int(var, value); + return 0; + } + + /* Fall back on the default ones */ + return git_default_config(var, value); +} + +static size_t fread_buffer(void *ptr, size_t eltsize, size_t nmemb, + struct buffer *buffer) +{ + size_t size = eltsize * nmemb; + if (size > buffer->size - buffer->posn) + size = buffer->size - buffer->posn; + memcpy(ptr, buffer->buffer + buffer->posn, size); + buffer->posn += size; + return size; +} + +static size_t fwrite_buffer_dynamic(const void *ptr, size_t eltsize, + size_t nmemb, struct buffer *buffer) +{ + size_t size = eltsize * nmemb; + if (size > buffer->size - buffer->posn) { + buffer->size = buffer->size * 3 / 2; + if (buffer->size < buffer->posn + size) + buffer->size = buffer->posn + size; + buffer->buffer = xrealloc(buffer->buffer, buffer->size); + } + memcpy(buffer->buffer + buffer->posn, ptr, size); + buffer->posn += size; + data_received++; + return size; +} + +static size_t fwrite_null(const void *ptr, size_t eltsize, + size_t nmemb, struct buffer *buffer) +{ + data_received++; + return eltsize * nmemb; +} + +#ifdef USE_CURL_MULTI +static void process_curl_messages(void); +static void process_request_queue(void); +#endif + +static CURL* get_curl_handle(void) +{ + CURL* result = curl_easy_init(); + + curl_easy_setopt(result, CURLOPT_SSL_VERIFYPEER, curl_ssl_verify); +#if LIBCURL_VERSION_NUM >= 0x070907 + curl_easy_setopt(result, CURLOPT_NETRC, CURL_NETRC_OPTIONAL); +#endif + + if (ssl_cert != NULL) + curl_easy_setopt(result, CURLOPT_SSLCERT, ssl_cert); +#if LIBCURL_VERSION_NUM >= 0x070902 + if (ssl_key != NULL) + curl_easy_setopt(result, CURLOPT_SSLKEY, ssl_key); +#endif +#if LIBCURL_VERSION_NUM >= 0x070908 + if (ssl_capath != NULL) + curl_easy_setopt(result, CURLOPT_CAPATH, ssl_capath); +#endif + if (ssl_cainfo != NULL) + curl_easy_setopt(result, CURLOPT_CAINFO, ssl_cainfo); + curl_easy_setopt(result, CURLOPT_FAILONERROR, 1); + + if (curl_low_speed_limit > 0 && curl_low_speed_time > 0) { + curl_easy_setopt(result, CURLOPT_LOW_SPEED_LIMIT, + curl_low_speed_limit); + curl_easy_setopt(result, CURLOPT_LOW_SPEED_TIME, + curl_low_speed_time); + } + + return result; +} + +static struct active_request_slot *get_active_slot(void) +{ + struct active_request_slot *slot = active_queue_head; + struct active_request_slot *newslot; + +#ifdef USE_CURL_MULTI + int num_transfers; + + /* Wait for a slot to open up if the queue is full */ + while (active_requests >= max_requests) { + curl_multi_perform(curlm, &num_transfers); + if (num_transfers < active_requests) { + process_curl_messages(); + } + } +#endif + + while (slot != NULL && slot->in_use) { + slot = slot->next; + } + if (slot == NULL) { + newslot = xmalloc(sizeof(*newslot)); + newslot->curl = NULL; + newslot->in_use = 0; + newslot->next = NULL; + + slot = active_queue_head; + if (slot == NULL) { + active_queue_head = newslot; + } else { + while (slot->next != NULL) { + slot = slot->next; + } + slot->next = newslot; + } + slot = newslot; + } + + if (slot->curl == NULL) { +#ifdef NO_CURL_EASY_DUPHANDLE + slot->curl = get_curl_handle(); +#else + slot->curl = curl_easy_duphandle(curl_default); +#endif + } + + active_requests++; + slot->in_use = 1; + slot->done = 0; + slot->local = NULL; + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, default_headers); + curl_easy_setopt(slot->curl, CURLOPT_ERRORBUFFER, curl_errorstr); + + return slot; +} + +static int start_active_slot(struct active_request_slot *slot) +{ +#ifdef USE_CURL_MULTI + CURLMcode curlm_result = curl_multi_add_handle(curlm, slot->curl); + + if (curlm_result != CURLM_OK && + curlm_result != CURLM_CALL_MULTI_PERFORM) { + active_requests--; + slot->in_use = 0; + return 0; + } +#endif + return 1; +} + +static void run_active_slot(struct active_request_slot *slot) +{ +#ifdef USE_CURL_MULTI + int num_transfers; + long last_pos = 0; + long current_pos; + fd_set readfds; + fd_set writefds; + fd_set excfds; + int max_fd; + struct timeval select_timeout; + CURLMcode curlm_result; + + while (!slot->done) { + data_received = 0; + do { + curlm_result = curl_multi_perform(curlm, + &num_transfers); + } while (curlm_result == CURLM_CALL_MULTI_PERFORM); + if (num_transfers < active_requests) { + process_curl_messages(); + process_request_queue(); + } + + if (!data_received && slot->local != NULL) { + current_pos = ftell(slot->local); + if (current_pos > last_pos) + data_received++; + last_pos = current_pos; + } + + if (!slot->done && !data_received) { + max_fd = 0; + FD_ZERO(&readfds); + FD_ZERO(&writefds); + FD_ZERO(&excfds); + select_timeout.tv_sec = 0; + select_timeout.tv_usec = 50000; + select(max_fd, &readfds, &writefds, + &excfds, &select_timeout); + } + } +#else + slot->curl_result = curl_easy_perform(slot->curl); + active_requests--; +#endif +} + +static void start_check(struct transfer_request *request) +{ + char *hex = sha1_to_hex(request->sha1); + struct active_request_slot *slot; + char *posn; + + request->url = xmalloc(strlen(remote->url) + 55); + strcpy(request->url, remote->url); + posn = request->url + strlen(remote->url); + strcpy(posn, "objects/"); + posn += 8; + memcpy(posn, hex, 2); + posn += 2; + *(posn++) = '/'; + strcpy(posn, hex + 2); + + slot = get_active_slot(); + curl_easy_setopt(slot->curl, CURLOPT_ERRORBUFFER, request->errorstr); + curl_easy_setopt(slot->curl, CURLOPT_URL, request->url); + curl_easy_setopt(slot->curl, CURLOPT_NOBODY, 1); + + if (start_active_slot(slot)) { + request->slot = slot; + request->state = RUN_HEAD; + } else { + request->state = ABORTED; + free(request->url); + } +} + +static void start_mkcol(struct transfer_request *request) +{ + char *hex = sha1_to_hex(request->sha1); + struct active_request_slot *slot; + char *posn; + + request->url = xmalloc(strlen(remote->url) + 13); + strcpy(request->url, remote->url); + posn = request->url + strlen(remote->url); + strcpy(posn, "objects/"); + posn += 8; + memcpy(posn, hex, 2); + posn += 2; + strcpy(posn, "/"); + + slot = get_active_slot(); + curl_easy_setopt(slot->curl, CURLOPT_HTTPGET, 1); /* undo PUT setup */ + curl_easy_setopt(slot->curl, CURLOPT_URL, request->url); + curl_easy_setopt(slot->curl, CURLOPT_ERRORBUFFER, request->errorstr); + curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_MKCOL); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null); + + if (start_active_slot(slot)) { + request->slot = slot; + request->state = RUN_MKCOL; + } else { + request->state = ABORTED; + free(request->url); + } +} + +static void start_put(struct transfer_request *request) +{ + char *hex = sha1_to_hex(request->sha1); + struct active_request_slot *slot; + char *posn; + char type[20]; + char hdr[50]; + void *unpacked; + unsigned long len; + int hdrlen; + ssize_t size; + z_stream stream; + + unpacked = read_sha1_file(request->sha1, type, &len); + hdrlen = sprintf(hdr, "%s %lu", type, len) + 1; + + /* Set it up */ + memset(&stream, 0, sizeof(stream)); + deflateInit(&stream, Z_BEST_COMPRESSION); + size = deflateBound(&stream, len + hdrlen); + request->buffer.buffer = xmalloc(size); + + /* Compress it */ + stream.next_out = request->buffer.buffer; + stream.avail_out = size; + + /* First header.. */ + stream.next_in = (void *)hdr; + stream.avail_in = hdrlen; + while (deflate(&stream, 0) == Z_OK) + /* nothing */; + + /* Then the data itself.. */ + stream.next_in = unpacked; + stream.avail_in = len; + while (deflate(&stream, Z_FINISH) == Z_OK) + /* nothing */; + deflateEnd(&stream); + free(unpacked); + + request->buffer.size = stream.total_out; + request->buffer.posn = 0; + + if (request->url != NULL) + free(request->url); + request->url = xmalloc(strlen(remote->url) + + strlen(request->lock_token) + 51); + strcpy(request->url, remote->url); + posn = request->url + strlen(remote->url); + strcpy(posn, "objects/"); + posn += 8; + memcpy(posn, hex, 2); + posn += 2; + *(posn++) = '/'; + strcpy(posn, hex + 2); + request->dest = xmalloc(strlen(request->url) + 14); + sprintf(request->dest, "Destination: %s", request->url); + posn += 38; + *(posn++) = '.'; + strcpy(posn, request->lock_token); + + slot = get_active_slot(); + curl_easy_setopt(slot->curl, CURLOPT_INFILE, &request->buffer); + curl_easy_setopt(slot->curl, CURLOPT_INFILESIZE, request->buffer.size); + curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, fread_buffer); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null); + curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_PUT); + curl_easy_setopt(slot->curl, CURLOPT_UPLOAD, 1); + curl_easy_setopt(slot->curl, CURLOPT_PUT, 1); + curl_easy_setopt(slot->curl, CURLOPT_NOBODY, 0); + curl_easy_setopt(slot->curl, CURLOPT_URL, request->url); + + if (start_active_slot(slot)) { + request->slot = slot; + request->state = RUN_PUT; + } else { + request->state = ABORTED; + free(request->url); + } +} + +static void start_move(struct transfer_request *request) +{ + struct active_request_slot *slot; + struct curl_slist *dav_headers = NULL; + + slot = get_active_slot(); + curl_easy_setopt(slot->curl, CURLOPT_HTTPGET, 1); /* undo PUT setup */ + curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_MOVE); + dav_headers = curl_slist_append(dav_headers, request->dest); + dav_headers = curl_slist_append(dav_headers, "Overwrite: T"); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null); + curl_easy_setopt(slot->curl, CURLOPT_URL, request->url); + + if (start_active_slot(slot)) { + request->slot = slot; + request->state = RUN_MOVE; + } else { + request->state = ABORTED; + free(request->url); + } +} + +static void finish_request(struct transfer_request *request) +{ + request->curl_result = request->slot->curl_result; + request->http_code = request->slot->http_code; + request->slot = NULL; + if (request->headers != NULL) + curl_slist_free_all(request->headers); + if (request->state == RUN_HEAD) { + if (request->http_code == 404) { + request->state = NEED_PUSH; + } else if (request->curl_result == CURLE_OK) { + request->state = COMPLETE; + } else { + fprintf(stderr, "HEAD %s failed, aborting (%d/%ld)\n", + sha1_to_hex(request->sha1), + request->curl_result, request->http_code); + request->state = ABORTED; + aborted = 1; + } + } else if (request->state == RUN_MKCOL) { + if (request->curl_result == CURLE_OK || + request->http_code == 405) { + start_put(request); + } else { + fprintf(stderr, "MKCOL %s failed, aborting (%d/%ld)\n", + sha1_to_hex(request->sha1), + request->curl_result, request->http_code); + request->state = ABORTED; + aborted = 1; + } + } else if (request->state == RUN_PUT) { + if (request->curl_result == CURLE_OK) { + start_move(request); + } else { + fprintf(stderr, "PUT %s failed, aborting (%d/%ld)\n", + sha1_to_hex(request->sha1), + request->curl_result, request->http_code); + request->state = ABORTED; + aborted = 1; + } + } else if (request->state == RUN_MOVE) { + if (request->curl_result == CURLE_OK) { + if (push_verbosely) + fprintf(stderr, + "sent %s\n", + sha1_to_hex(request->sha1)); + request->state = COMPLETE; + } else { + fprintf(stderr, "MOVE %s failed, aborting (%d/%ld)\n", + sha1_to_hex(request->sha1), + request->curl_result, request->http_code); + request->state = ABORTED; + aborted = 1; + } + } +} + +static void release_request(struct transfer_request *request) +{ + struct transfer_request *entry = request_queue_head; + + if (request == request_queue_head) { + request_queue_head = request->next; + } else { + while (entry->next != NULL && entry->next != request) + entry = entry->next; + if (entry->next == request) + entry->next = entry->next->next; + } + + free(request->url); + free(request); +} + +#ifdef USE_CURL_MULTI +void process_curl_messages(void) +{ + int num_messages; + struct active_request_slot *slot; + struct transfer_request *request = NULL; + CURLMsg *curl_message = curl_multi_info_read(curlm, &num_messages); + + while (curl_message != NULL) { + if (curl_message->msg == CURLMSG_DONE) { + slot = active_queue_head; + while (slot != NULL && + slot->curl != curl_message->easy_handle) + slot = slot->next; + if (slot != NULL) { + curl_multi_remove_handle(curlm, slot->curl); + active_requests--; + slot->done = 1; + slot->in_use = 0; + slot->curl_result = curl_message->data.result; + curl_easy_getinfo(slot->curl, + CURLINFO_HTTP_CODE, + &slot->http_code); + request = request_queue_head; + while (request != NULL && + request->slot != slot) + request = request->next; + if (request != NULL) + finish_request(request); + } else { + fprintf(stderr, "Received DONE message for unknown request!\n"); + } + } else { + fprintf(stderr, "Unknown CURL message received: %d\n", + (int)curl_message->msg); + } + curl_message = curl_multi_info_read(curlm, &num_messages); + } +} + +void process_request_queue(void) +{ + struct transfer_request *request = request_queue_head; + struct active_request_slot *slot = active_queue_head; + int num_transfers; + + if (aborted) + return; + + while (active_requests < max_requests && request != NULL) { + if (!pushing && request->state == NEED_CHECK) { + start_check(request); + curl_multi_perform(curlm, &num_transfers); + } else if (pushing && request->state == NEED_PUSH) { + start_mkcol(request); + curl_multi_perform(curlm, &num_transfers); + } + request = request->next; + } + + while (slot != NULL) { + if (!slot->in_use && slot->curl != NULL) { + curl_easy_cleanup(slot->curl); + slot->curl = NULL; + } + slot = slot->next; + } +} +#endif + +void process_waiting_requests(void) +{ + struct active_request_slot *slot = active_queue_head; + + while (slot != NULL) + if (slot->in_use) { + run_active_slot(slot); + slot = active_queue_head; + } else { + slot = slot->next; + } +} + +void add_request(unsigned char *sha1, char *lock_token) +{ + struct transfer_request *request = request_queue_head; + struct transfer_request *tail; + struct packed_git *target; + + while (request != NULL && memcmp(request->sha1, sha1, 20)) + request = request->next; + if (request != NULL) + return; + + target = find_sha1_pack(sha1, remote->packs); + if (target) + return; + + request = xmalloc(sizeof(*request)); + memcpy(request->sha1, sha1, 20); + request->url = NULL; + request->lock_token = lock_token; + request->headers = NULL; + request->state = NEED_CHECK; + request->next = NULL; + + if (request_queue_head == NULL) { + request_queue_head = request; + } else { + tail = request_queue_head; + while (tail->next != NULL) { + tail = tail->next; + } + tail->next = request; + } +#ifdef USE_CURL_MULTI + process_request_queue(); + process_curl_messages(); +#endif +} + +static int fetch_index(unsigned char *sha1) +{ + char *hex = sha1_to_hex(sha1); + char *filename; + char *url; + char tmpfile[PATH_MAX]; + long prev_posn = 0; + char range[RANGE_HEADER_SIZE]; + struct curl_slist *range_header = NULL; + + FILE *indexfile; + struct active_request_slot *slot; + + if (has_pack_index(sha1)) + return 0; + + if (push_verbosely) + fprintf(stderr, "Getting index for pack %s\n", hex); + + url = xmalloc(strlen(remote->url) + 64); + sprintf(url, "%s/objects/pack/pack-%s.idx", remote->url, hex); + + filename = sha1_pack_index_name(sha1); + snprintf(tmpfile, sizeof(tmpfile), "%s.temp", filename); + indexfile = fopen(tmpfile, "a"); + if (!indexfile) + return error("Unable to open local file %s for pack index", + filename); + + slot = get_active_slot(); + curl_easy_setopt(slot->curl, CURLOPT_FILE, indexfile); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite); + curl_easy_setopt(slot->curl, CURLOPT_URL, url); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, no_pragma_header); + slot->local = indexfile; + + /* If there is data present from a previous transfer attempt, + resume where it left off */ + prev_posn = ftell(indexfile); + if (prev_posn>0) { + if (push_verbosely) + fprintf(stderr, + "Resuming fetch of index for pack %s at byte %ld\n", + hex, prev_posn); + sprintf(range, "Range: bytes=%ld-", prev_posn); + range_header = curl_slist_append(range_header, range); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, range_header); + } + + if (start_active_slot(slot)) { + run_active_slot(slot); + if (slot->curl_result != CURLE_OK) { + free(url); + fclose(indexfile); + return error("Unable to get pack index %s\n%s", url, + curl_errorstr); + } + } else { + free(url); + return error("Unable to start request"); + } + + free(url); + fclose(indexfile); + + return move_temp_to_file(tmpfile, filename); +} + +static int setup_index(unsigned char *sha1) +{ + struct packed_git *new_pack; + if (has_pack_file(sha1)) + return 0; // don't list this as something we can get + + if (fetch_index(sha1)) + return -1; + + new_pack = parse_pack_index(sha1); + new_pack->next = remote->packs; + remote->packs = new_pack; + return 0; +} + +static int fetch_indices() +{ + unsigned char sha1[20]; + char *url; + struct buffer buffer; + char *data; + int i = 0; + + struct active_request_slot *slot; + + data = xmalloc(4096); + memset(data, 0, 4096); + buffer.size = 4096; + buffer.posn = 0; + buffer.buffer = data; + + if (push_verbosely) + fprintf(stderr, "Getting pack list\n"); + + url = xmalloc(strlen(remote->url) + 21); + sprintf(url, "%s/objects/info/packs", remote->url); + + slot = get_active_slot(); + curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, + fwrite_buffer_dynamic); + curl_easy_setopt(slot->curl, CURLOPT_URL, url); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, NULL); + if (start_active_slot(slot)) { + run_active_slot(slot); + if (slot->curl_result != CURLE_OK) { + free(buffer.buffer); + free(url); + if (slot->http_code == 404) + return 0; + else + return error("%s", curl_errorstr); + } + } else { + free(buffer.buffer); + free(url); + return error("Unable to start request"); + } + free(url); + + data = buffer.buffer; + while (i < buffer.posn) { + switch (data[i]) { + case 'P': + i++; + if (i + 52 < buffer.posn && + !strncmp(data + i, " pack-", 6) && + !strncmp(data + i + 46, ".pack\n", 6)) { + get_sha1_hex(data + i + 6, sha1); + setup_index(sha1); + i += 51; + break; + } + default: + while (data[i] != '\n') + i++; + } + i++; + } + + free(buffer.buffer); + return 0; +} + +static inline int needs_quote(int ch) +{ + switch (ch) { + case '/': case '-': case '.': + case 'A'...'Z': case 'a'...'z': case '0'...'9': + return 0; + default: + return 1; + } +} + +static inline int hex(int v) +{ + if (v < 10) return '0' + v; + else return 'A' + v - 10; +} + +static char *quote_ref_url(const char *base, const char *ref) +{ + const char *cp; + char *dp, *qref; + int len, baselen, ch; + + baselen = strlen(base); + len = baselen + 12; /* "refs/heads/" + NUL */ + for (cp = ref; (ch = *cp) != 0; cp++, len++) + if (needs_quote(ch)) + len += 2; /* extra two hex plus replacement % */ + qref = xmalloc(len); + memcpy(qref, base, baselen); + memcpy(qref + baselen, "refs/heads/", 11); + for (cp = ref, dp = qref + baselen + 11; (ch = *cp) != 0; cp++) { + if (needs_quote(ch)) { + *dp++ = '%'; + *dp++ = hex((ch >> 4) & 0xF); + *dp++ = hex(ch & 0xF); + } + else + *dp++ = ch; + } + *dp = 0; + + return qref; +} + +int fetch_ref(char *ref, unsigned char *sha1) +{ + char *url; + char hex[42]; + struct buffer buffer; + char *base = remote->url; + struct active_request_slot *slot; + buffer.size = 41; + buffer.posn = 0; + buffer.buffer = hex; + hex[41] = '\0'; + + url = quote_ref_url(base, ref); + slot = get_active_slot(); + curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, + fwrite_buffer_dynamic); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, NULL); + curl_easy_setopt(slot->curl, CURLOPT_URL, url); + if (start_active_slot(slot)) { + run_active_slot(slot); + if (slot->curl_result != CURLE_OK) + return error("Couldn't get %s for %s\n%s", + url, ref, curl_errorstr); + } else { + return error("Unable to start request"); + } + + hex[40] = '\0'; + get_sha1_hex(hex, sha1); + return 0; +} + +static void +start_lockprop_element(void *userData, const char *name, const char **atts) +{ + struct lockprop *prop = (struct lockprop *)userData; + + if (prop->lock_type && !strcmp(name, "D:write")) { + if (prop->lock_exclusive) { + prop->lock_exclusive_write = 1; + } + } else if (prop->lock_scope && !strcmp(name, "D:exclusive")) { + prop->lock_exclusive = 1; + } else if (prop->lock_entry) { + if (!strcmp(name, "D:lockscope")) { + prop->lock_scope = 1; + } else if (!strcmp(name, "D:locktype")) { + prop->lock_type = 1; + } + } else if (prop->supported_lock) { + if (!strcmp(name, "D:lockentry")) { + prop->lock_entry = 1; + } + } else if (!strcmp(name, "D:supportedlock")) { + prop->supported_lock = 1; + } +} + +static void +end_lockprop_element(void *userData, const char *name) +{ + struct lockprop *prop = (struct lockprop *)userData; + + if (!strcmp(name, "D:lockentry")) { + prop->lock_entry = 0; + prop->lock_scope = 0; + prop->lock_type = 0; + prop->lock_exclusive = 0; + } else if (!strcmp(name, "D:supportedlock")) { + prop->supported_lock = 0; + } +} + +size_t process_lock_header( void *ptr, size_t size, size_t nmemb, void *stream) +{ + size_t header_size = size*nmemb; + char *start; + char *end; + + if (!strncmp(ptr, "Lock-Token: '; + end--) {} + if (end > start) { + lock_token = xmalloc(end - start + 1); + memcpy(lock_token, start, end - start); + lock_token[end - start] = 0; + } + } + + return header_size; +} + +char *lock_remote(char *file, int timeout) +{ + struct active_request_slot *slot; + struct buffer out_buffer; + char *out_data; + char *url; + char timeout_header[25]; + struct curl_slist *dav_headers = NULL; + + if (lock_token != NULL) + free(lock_token); + + out_buffer.size = strlen(LOCK_REQUEST) + strlen(git_default_email) - 2; + out_data = xmalloc(out_buffer.size + 1); + snprintf(out_data, out_buffer.size + 1, LOCK_REQUEST, git_default_email); + out_buffer.posn = 0; + out_buffer.buffer = out_data; + + sprintf(timeout_header, "Timeout: Second-%d", timeout); + url = xmalloc(strlen(remote->url) + strlen(file) + 1); + sprintf(url, "%s%s", remote->url, file); + dav_headers = curl_slist_append(dav_headers, timeout_header); + dav_headers = curl_slist_append(dav_headers, "Content-Type: text/xml"); + + slot = get_active_slot(); + curl_easy_setopt(slot->curl, CURLOPT_INFILE, &out_buffer); + curl_easy_setopt(slot->curl, CURLOPT_INFILESIZE, out_buffer.size); + curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, fread_buffer); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null); + curl_easy_setopt(slot->curl, CURLOPT_HEADERFUNCTION, + process_lock_header); + curl_easy_setopt(slot->curl, CURLOPT_URL, url); + curl_easy_setopt(slot->curl, CURLOPT_UPLOAD, 1); + curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_LOCK); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); + + if (start_active_slot(slot)) { + run_active_slot(slot); + free(out_data); + if (slot->curl_result != CURLE_OK) { + fprintf(stderr, "Got HTTP error %ld\n", slot->http_code); + return NULL; + } + } else { + free(out_data); + fprintf(stderr, "Unable to start request\n"); + } + + return strdup(lock_token); +} + +int unlock_remote(char *file, char *lock_token) +{ + struct active_request_slot *slot; + char *url; + char *lock_token_header; + struct curl_slist *dav_headers = NULL; + int rc = 0; + + if (lock_token == NULL) { + fprintf(stderr, "Unable to unlock, no lock token"); + return 0; + } + + lock_token_header = xmalloc(strlen(lock_token) + 31); + sprintf(lock_token_header, "Lock-Token: ", + lock_token); + url = xmalloc(strlen(remote->url) + strlen(file) + 1); + sprintf(url, "%s%s", remote->url, file); + dav_headers = curl_slist_append(dav_headers, lock_token_header); + + slot = get_active_slot(); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null); + curl_easy_setopt(slot->curl, CURLOPT_URL, url); + curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_UNLOCK); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); + + if (start_active_slot(slot)) { + run_active_slot(slot); + if (slot->curl_result == CURLE_OK) + rc = 1; + else + fprintf(stderr, "Got HTTP error %ld\n", + slot->http_code); + } else { + fprintf(stderr, "Unable to start request\n"); + } + + curl_slist_free_all(dav_headers); + free(lock_token_header); + free(url); + + return rc; +} + +int check_locking() +{ + struct active_request_slot *slot; + struct buffer in_buffer; + struct buffer out_buffer; + char *in_data; + char *out_data; + XML_Parser parser = XML_ParserCreate(NULL); + enum XML_Status result; + struct lockprop supported_lock; + struct curl_slist *dav_headers = NULL; + + out_buffer.size = strlen(PROPFIND_REQUEST) + strlen(remote->url) - 2; + out_data = xmalloc(out_buffer.size + 1); + snprintf(out_data, out_buffer.size + 1, PROPFIND_REQUEST, remote->url); + out_buffer.posn = 0; + out_buffer.buffer = out_data; + + in_buffer.size = 4096; + in_data = xmalloc(in_buffer.size); + in_buffer.posn = 0; + in_buffer.buffer = in_data; + + dav_headers = curl_slist_append(dav_headers, "Depth: 0"); + dav_headers = curl_slist_append(dav_headers, "Content-Type: text/xml"); + + slot = get_active_slot(); + curl_easy_setopt(slot->curl, CURLOPT_INFILE, &out_buffer); + curl_easy_setopt(slot->curl, CURLOPT_INFILESIZE, out_buffer.size); + curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, fread_buffer); + curl_easy_setopt(slot->curl, CURLOPT_FILE, &in_buffer); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, + fwrite_buffer_dynamic); + curl_easy_setopt(slot->curl, CURLOPT_URL, remote->url); + curl_easy_setopt(slot->curl, CURLOPT_UPLOAD, 1); + curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_PROPFIND); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); + + if (start_active_slot(slot)) { + run_active_slot(slot); + free(out_data); + if (slot->curl_result != CURLE_OK) { + free(in_buffer.buffer); + return -1; + } + + XML_SetUserData(parser, &supported_lock); + XML_SetElementHandler(parser, start_lockprop_element, + end_lockprop_element); + result = XML_Parse(parser, in_buffer.buffer, in_buffer.posn, 1); + free(in_buffer.buffer); + if (result != XML_STATUS_OK) + return error("%s", XML_ErrorString( + XML_GetErrorCode(parser))); + } else { + free(out_data); + free(in_buffer.buffer); + return error("Unable to start request"); + } + + if (supported_lock.lock_exclusive_write) + return 0; + else + return 1; +} + +int is_ancestor(unsigned char *sha1, struct commit *commit) +{ + struct commit_list *parents; + + if (parse_commit(commit)) + return 0; + parents = commit->parents; + for (; parents; parents = parents->next) { + if (!memcmp(sha1, parents->item->object.sha1, 20)) { + return 1; + } else if (parents->item->object.type == commit_type) { + if (is_ancestor( + sha1, + (struct commit *)&parents->item->object + )) + return 1; + } + } + return 0; +} + +void get_delta(unsigned char *sha1, struct object *obj, char *lock_token) +{ + struct commit *commit; + struct commit_list *parents; + struct tree *tree; + struct tree_entry_list *entry; + + if (sha1 && !memcmp(sha1, obj->sha1, 20)) + return; + + if (aborted) + return; + + if (obj->type == commit_type) { + if (push_verbosely) + fprintf(stderr, "walk %s\n", sha1_to_hex(obj->sha1)); + add_request(obj->sha1, lock_token); + commit = (struct commit *)obj; + if (parse_commit(commit)) { + fprintf(stderr, "Error parsing commit %s\n", + sha1_to_hex(obj->sha1)); + aborted = 1; + return; + } + parents = commit->parents; + for (; parents; parents = parents->next) + if (sha1 == NULL || + memcmp(sha1, parents->item->object.sha1, 20)) + get_delta(sha1, &parents->item->object, + lock_token); + get_delta(sha1, &commit->tree->object, lock_token); + } else if (obj->type == tree_type) { + if (push_verbosely) + fprintf(stderr, "walk %s\n", sha1_to_hex(obj->sha1)); + add_request(obj->sha1, lock_token); + tree = (struct tree *)obj; + if (parse_tree(tree)) { + fprintf(stderr, "Error parsing tree %s\n", + sha1_to_hex(obj->sha1)); + aborted = 1; + return; + } + entry = tree->entries; + tree->entries = NULL; + while (entry) { + struct tree_entry_list *next = entry->next; + get_delta(sha1, entry->item.any, lock_token); + free(entry->name); + free(entry); + entry = next; + } + } else if (obj->type == blob_type || obj->type == tag_type) { + add_request(obj->sha1, lock_token); + } +} + +int update_remote(char *remote_path, unsigned char *sha1, char *lock_token) +{ + struct active_request_slot *slot; + char *url; + char *out_data; + char *if_header; + struct buffer out_buffer; + struct curl_slist *dav_headers = NULL; + int i; + + url = xmalloc(strlen(remote->url) + strlen(remote_path) + 1); + sprintf(url, "%s%s", remote->url, remote_path); + + if_header = xmalloc(strlen(lock_token) + 25); + sprintf(if_header, "If: ()", lock_token); + dav_headers = curl_slist_append(dav_headers, if_header); + + out_buffer.size = 41; + out_data = xmalloc(out_buffer.size + 1); + i = snprintf(out_data, out_buffer.size + 1, "%s\n", sha1_to_hex(sha1)); + if (i != out_buffer.size) { + fprintf(stderr, "Unable to initialize PUT request body\n"); + return 0; + } + out_buffer.posn = 0; + out_buffer.buffer = out_data; + + slot = get_active_slot(); + curl_easy_setopt(slot->curl, CURLOPT_INFILE, &out_buffer); + curl_easy_setopt(slot->curl, CURLOPT_INFILESIZE, out_buffer.size); + curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, fread_buffer); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null); + curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_PUT); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); + curl_easy_setopt(slot->curl, CURLOPT_UPLOAD, 1); + curl_easy_setopt(slot->curl, CURLOPT_PUT, 1); + curl_easy_setopt(slot->curl, CURLOPT_URL, url); + + if (start_active_slot(slot)) { + run_active_slot(slot); + free(out_data); + free(if_header); + free(url); + if (slot->curl_result != CURLE_OK) { + fprintf(stderr, + "PUT error: curl result=%d, HTTP code=%ld\n", + slot->curl_result, slot->http_code); + /* We should attempt recovery? */ + return 0; + } + } else { + free(out_data); + free(if_header); + free(url); + fprintf(stderr, "Unable to start PUT request\n"); + return 0; + } + + return 1; +} + +int main(int argc, char **argv) +{ + struct active_request_slot *slot; + struct active_request_slot *next_slot; + struct transfer_request *request; + struct transfer_request *next_request; + int nr_refspec = 0; + char **refspec = NULL; + int do_remote_update; + int new_branch; + int force_this; + char *local_ref; + unsigned char local_sha1[20]; + struct object *local_object = NULL; + char *remote_ref = NULL; + unsigned char remote_sha1[20]; + char *remote_lock = NULL; + char *remote_path = NULL; + char *low_speed_limit; + char *low_speed_time; + int rc = 0; + int i; + + setup_ident(); + + remote = xmalloc(sizeof(*remote)); + remote->url = NULL; + remote->packs = NULL; + + argv++; + for (i = 1; i < argc; i++, argv++) { + char *arg = *argv; + + if (*arg == '-') { + if (!strcmp(arg, "--complete")) { + push_all = 1; + continue; + } + if (!strcmp(arg, "--force")) { + force_all = 1; + continue; + } + if (!strcmp(arg, "--verbose")) { + push_verbosely = 1; + continue; + } + usage(http_push_usage); + } + if (!remote->url) { + remote->url = arg; + continue; + } + refspec = argv; + nr_refspec = argc - i; + break; + } + + curl_global_init(CURL_GLOBAL_ALL); + +#ifdef USE_CURL_MULTI + { + char *http_max_requests = getenv("GIT_HTTP_MAX_REQUESTS"); + if (http_max_requests != NULL) + max_requests = atoi(http_max_requests); + } + + curlm = curl_multi_init(); + if (curlm == NULL) { + fprintf(stderr, "Error creating curl multi handle.\n"); + return 1; + } +#endif + + if (getenv("GIT_SSL_NO_VERIFY")) + curl_ssl_verify = 0; + + ssl_cert = getenv("GIT_SSL_CERT"); +#if LIBCURL_VERSION_NUM >= 0x070902 + ssl_key = getenv("GIT_SSL_KEY"); +#endif +#if LIBCURL_VERSION_NUM >= 0x070908 + ssl_capath = getenv("GIT_SSL_CAPATH"); +#endif + ssl_cainfo = getenv("GIT_SSL_CAINFO"); + + low_speed_limit = getenv("GIT_HTTP_LOW_SPEED_LIMIT"); + if (low_speed_limit != NULL) + curl_low_speed_limit = strtol(low_speed_limit, NULL, 10); + low_speed_time = getenv("GIT_HTTP_LOW_SPEED_TIME"); + if (low_speed_time != NULL) + curl_low_speed_time = strtol(low_speed_time, NULL, 10); + + git_config(http_options); + + if (curl_ssl_verify == -1) + curl_ssl_verify = 1; + +#ifdef USE_CURL_MULTI + if (max_requests < 1) + max_requests = DEFAULT_MAX_REQUESTS; +#endif + + no_pragma_header = curl_slist_append(no_pragma_header, "Pragma:"); + default_headers = curl_slist_append(default_headers, "Range:"); + default_headers = curl_slist_append(default_headers, "Destination:"); + default_headers = curl_slist_append(default_headers, "If:"); + default_headers = curl_slist_append(default_headers, + "Pragma: no-cache"); + +#ifndef NO_CURL_EASY_DUPHANDLE + curl_default = get_curl_handle(); +#endif + + /* Verify DAV compliance/lock support */ + if (check_locking() != 0) { + fprintf(stderr, "Error: no DAV locking support on remote repo %s\n", remote->url); + rc = 1; + goto cleanup; + } + + /* Process each refspec */ + for (i = 0; i < nr_refspec; i++) { + char *ep; + force_this = 0; + do_remote_update = 0; + new_branch = 0; + local_ref = refspec[i]; + if (*local_ref == '+') { + force_this = 1; + local_ref++; + } + ep = strchr(local_ref, ':'); + if (ep) { + remote_ref = ep + 1; + *ep = 0; + } + else + remote_ref = local_ref; + + /* Lock remote branch ref */ + if (remote_path) + free(remote_path); + remote_path = xmalloc(strlen(remote_ref) + 12); + sprintf(remote_path, "refs/heads/%s", remote_ref); + remote_lock = lock_remote(remote_path, 3600); + if (remote_lock == NULL) { + fprintf(stderr, "Unable to lock remote branch %s\n", + remote_ref); + rc = 1; + continue; + } + + /* Resolve local and remote refs */ + if (fetch_ref(remote_ref, remote_sha1) != 0) { + fprintf(stderr, + "Remote branch %s does not exist on %s\n", + remote_ref, remote->url); + new_branch = 1; + } + if (get_sha1(local_ref, local_sha1) != 0) { + fprintf(stderr, "Error resolving local branch %s\n", + local_ref); + rc = 1; + goto unlock; + } + + /* Find relationship between local and remote */ + local_object = parse_object(local_sha1); + if (!local_object) { + fprintf(stderr, "Unable to parse local object %s\n", + sha1_to_hex(local_sha1)); + rc = 1; + goto unlock; + } else if (new_branch) { + do_remote_update = 1; + } else { + if (!memcmp(local_sha1, remote_sha1, 20)) { + fprintf(stderr, + "* %s: same as branch '%s' of %s\n", + local_ref, remote_ref, remote->url); + } else if (is_ancestor(remote_sha1, + (struct commit *)local_object)) { + fprintf(stderr, + "Remote %s will fast-forward to local %s\n", + remote_ref, local_ref); + do_remote_update = 1; + } else if (force_all || force_this) { + fprintf(stderr, + "* %s on %s does not fast forward to local branch '%s', overwriting\n", + remote_ref, remote->url, local_ref); + do_remote_update = 1; + } else { + fprintf(stderr, + "* %s on %s does not fast forward to local branch '%s'\n", + remote_ref, remote->url, local_ref); + rc = 1; + goto unlock; + } + } + + /* Generate and check list of required objects */ + pushing = 0; + if (do_remote_update || push_all) + fetch_indices(); + get_delta(push_all ? NULL : remote_sha1, + local_object, remote_lock); + process_waiting_requests(); + + /* Push missing objects to remote, this would be a + convenient time to pack them first if appropriate. */ + pushing = 1; + process_request_queue(); + process_waiting_requests(); + + /* Update the remote branch if all went well */ + if (do_remote_update) { + if (!aborted && update_remote(remote_path, + local_sha1, + remote_lock)) { + fprintf(stderr, "%s remote branch %s\n", + new_branch ? "Created" : "Updated", + remote_ref); + } else { + fprintf(stderr, + "Unable to %s remote branch %s\n", + new_branch ? "create" : "update", + remote_ref); + rc = 1; + goto unlock; + } + } + + unlock: + unlock_remote(remote_path, remote_lock); + free(remote_path); + free(remote_lock); + } + + cleanup: + free(remote); + + curl_slist_free_all(no_pragma_header); + curl_slist_free_all(default_headers); + + slot = active_queue_head; + while (slot != NULL) { + next_slot = slot->next; + if (slot->curl != NULL) + curl_easy_cleanup(slot->curl); + free(slot); + slot = next_slot; + } + + request = request_queue_head; + while (request != NULL) { + next_request = request->next; + release_request(request); + free(request); + request = next_request; + } + +#ifndef NO_CURL_EASY_DUPHANDLE + curl_easy_cleanup(curl_default); +#endif +#ifdef USE_CURL_MULTI + curl_multi_cleanup(curlm); +#endif + curl_global_cleanup(); + return rc; +} -- cgit v0.10.2-6-g49f6 From c17fb6ee074cb0c9d71e5de42e2ad00786fedc18 Mon Sep 17 00:00:00 2001 From: Nick Hengeveld Date: Fri, 4 Nov 2005 14:22:18 -0800 Subject: Verify remote packs, speed up pending request queue Verify that remote packs exist before using the pack index, add requests to the beginning of the queue to locate pending requests faster. Signed-off-by: Nick Hengeveld Signed-off-by: Junio C Hamano diff --git a/http-push.c b/http-push.c index e85f1c1..134d405 100644 --- a/http-push.c +++ b/http-push.c @@ -727,7 +727,6 @@ void process_waiting_requests(void) void add_request(unsigned char *sha1, char *lock_token) { struct transfer_request *request = request_queue_head; - struct transfer_request *tail; struct packed_git *target; while (request != NULL && memcmp(request->sha1, sha1, 20)) @@ -745,17 +744,8 @@ void add_request(unsigned char *sha1, char *lock_token) request->lock_token = lock_token; request->headers = NULL; request->state = NEED_CHECK; - request->next = NULL; - - if (request_queue_head == NULL) { - request_queue_head = request; - } else { - tail = request_queue_head; - while (tail->next != NULL) { - tail = tail->next; - } - tail->next = request; - } + request->next = request_queue_head; + request_queue_head = request; #ifdef USE_CURL_MULTI process_request_queue(); process_curl_messages(); @@ -775,13 +765,29 @@ static int fetch_index(unsigned char *sha1) FILE *indexfile; struct active_request_slot *slot; + /* Don't use the index if the pack isn't there */ + url = xmalloc(strlen(remote->url) + 65); + sprintf(url, "%s/objects/pack/pack-%s.pack", remote->url, hex); + slot = get_active_slot(); + curl_easy_setopt(slot->curl, CURLOPT_URL, url); + curl_easy_setopt(slot->curl, CURLOPT_NOBODY, 1); + if (start_active_slot(slot)) { + run_active_slot(slot); + if (slot->curl_result != CURLE_OK) { + free(url); + return error("Unable to verify pack %s is available", + hex); + } + } else { + return error("Unable to start request"); + } + if (has_pack_index(sha1)) return 0; if (push_verbosely) fprintf(stderr, "Getting index for pack %s\n", hex); - url = xmalloc(strlen(remote->url) + 64); sprintf(url, "%s/objects/pack/pack-%s.idx", remote->url, hex); filename = sha1_pack_index_name(sha1); @@ -792,6 +798,8 @@ static int fetch_index(unsigned char *sha1) filename); slot = get_active_slot(); + curl_easy_setopt(slot->curl, CURLOPT_NOBODY, 0); + curl_easy_setopt(slot->curl, CURLOPT_HTTPGET, 1); curl_easy_setopt(slot->curl, CURLOPT_FILE, indexfile); curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite); curl_easy_setopt(slot->curl, CURLOPT_URL, url); @@ -833,8 +841,6 @@ static int fetch_index(unsigned char *sha1) static int setup_index(unsigned char *sha1) { struct packed_git *new_pack; - if (has_pack_file(sha1)) - return 0; // don't list this as something we can get if (fetch_index(sha1)) return -1; @@ -1610,7 +1616,6 @@ int main(int argc, char **argv) while (request != NULL) { next_request = request->next; release_request(request); - free(request); request = next_request; } -- cgit v0.10.2-6-g49f6 From 0772b9a6331357913417722eab672f8b5aa69e50 Mon Sep 17 00:00:00 2001 From: Nick Hengeveld Date: Fri, 4 Nov 2005 14:22:25 -0800 Subject: Support remote references with slashes in their names Support remote references with slashes in their names Signed-off-by: Nick Hengeveld Signed-off-by: Junio C Hamano diff --git a/http-push.c b/http-push.c index 134d405..6a241aa 100644 --- a/http-push.c +++ b/http-push.c @@ -1066,12 +1066,44 @@ char *lock_remote(char *file, int timeout) struct buffer out_buffer; char *out_data; char *url; + char *ep; char timeout_header[25]; struct curl_slist *dav_headers = NULL; if (lock_token != NULL) free(lock_token); + url = xmalloc(strlen(remote->url) + strlen(file) + 1); + sprintf(url, "%s%s", remote->url, file); + + /* Make sure leading directories exist for the remote ref */ + ep = strchr(url + strlen(remote->url) + 11, '/'); + while (ep) { + *ep = 0; + slot = get_active_slot(); + curl_easy_setopt(slot->curl, CURLOPT_HTTPGET, 1); + curl_easy_setopt(slot->curl, CURLOPT_URL, url); + curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_MKCOL); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null); + if (start_active_slot(slot)) { + run_active_slot(slot); + if (slot->curl_result != CURLE_OK && + slot->http_code != 405) { + fprintf(stderr, + "Unable to create branch path %s\n", + url); + free(url); + return NULL; + } + } else { + fprintf(stderr, "Unable to start request\n"); + free(url); + return NULL; + } + *ep = '/'; + ep = strchr(ep + 1, '/'); + } + out_buffer.size = strlen(LOCK_REQUEST) + strlen(git_default_email) - 2; out_data = xmalloc(out_buffer.size + 1); snprintf(out_data, out_buffer.size + 1, LOCK_REQUEST, git_default_email); @@ -1079,8 +1111,6 @@ char *lock_remote(char *file, int timeout) out_buffer.buffer = out_data; sprintf(timeout_header, "Timeout: Second-%d", timeout); - url = xmalloc(strlen(remote->url) + strlen(file) + 1); - sprintf(url, "%s%s", remote->url, file); dav_headers = curl_slist_append(dav_headers, timeout_header); dav_headers = curl_slist_append(dav_headers, "Content-Type: text/xml"); @@ -1098,16 +1128,22 @@ char *lock_remote(char *file, int timeout) if (start_active_slot(slot)) { run_active_slot(slot); - free(out_data); if (slot->curl_result != CURLE_OK) { fprintf(stderr, "Got HTTP error %ld\n", slot->http_code); + free(url); + free(out_data); return NULL; } } else { + free(url); free(out_data); fprintf(stderr, "Unable to start request\n"); + return NULL; } + free(url); + free(out_data); + return strdup(lock_token); } -- cgit v0.10.2-6-g49f6 From 26349b2e5ee2311cbb12db1cb92fa48fb0f26ef8 Mon Sep 17 00:00:00 2001 From: Nick Hengeveld Date: Fri, 4 Nov 2005 14:22:31 -0800 Subject: Improve lock handling Improve lock handling: parse the server response for the timeout, owner, and lock token Signed-off-by: Nick Hengeveld Signed-off-by: Junio C Hamano diff --git a/http-push.c b/http-push.c index 6a241aa..85cd595 100644 --- a/http-push.c +++ b/http-push.c @@ -55,7 +55,6 @@ static CURL *curl_default; static struct curl_slist *no_pragma_header; static struct curl_slist *default_headers; static char curl_errorstr[CURL_ERROR_SIZE]; -static char *lock_token = NULL; static int push_verbosely = 0; static int push_all = 0; @@ -92,7 +91,7 @@ struct transfer_request unsigned char sha1[20]; char *url; char *dest; - char *lock_token; + struct active_lock *lock; struct curl_slist *headers; struct buffer buffer; char filename[PATH_MAX]; @@ -136,6 +135,20 @@ static char *ssl_cainfo = NULL; static long curl_low_speed_limit = -1; static long curl_low_speed_time = -1; +struct active_lock +{ + int ctx_activelock; + int ctx_owner; + int ctx_owner_href; + int ctx_timeout; + int ctx_locktoken; + int ctx_locktoken_href; + char *owner; + time_t start_time; + long timeout; + char *token; +}; + struct lockprop { int supported_lock; @@ -509,7 +522,7 @@ static void start_put(struct transfer_request *request) if (request->url != NULL) free(request->url); request->url = xmalloc(strlen(remote->url) + - strlen(request->lock_token) + 51); + strlen(request->lock->token) + 51); strcpy(request->url, remote->url); posn = request->url + strlen(remote->url); strcpy(posn, "objects/"); @@ -522,7 +535,7 @@ static void start_put(struct transfer_request *request) sprintf(request->dest, "Destination: %s", request->url); posn += 38; *(posn++) = '.'; - strcpy(posn, request->lock_token); + strcpy(posn, request->lock->token); slot = get_active_slot(); curl_easy_setopt(slot->curl, CURLOPT_INFILE, &request->buffer); @@ -724,7 +737,7 @@ void process_waiting_requests(void) } } -void add_request(unsigned char *sha1, char *lock_token) +void add_request(unsigned char *sha1, struct active_lock *lock) { struct transfer_request *request = request_queue_head; struct packed_git *target; @@ -741,7 +754,7 @@ void add_request(unsigned char *sha1, char *lock_token) request = xmalloc(sizeof(*request)); memcpy(request->sha1, sha1, 20); request->url = NULL; - request->lock_token = lock_token; + request->lock = lock; request->headers = NULL; request->state = NEED_CHECK; request->next = request_queue_head; @@ -999,6 +1012,68 @@ int fetch_ref(char *ref, unsigned char *sha1) } static void +start_activelock_element(void *userData, const char *name, const char **atts) +{ + struct active_lock *lock = (struct active_lock *)userData; + + if (lock->ctx_activelock && !strcmp(name, "D:timeout")) + lock->ctx_timeout = 1; + else if (lock->ctx_owner && strstr(name, "href")) + lock->ctx_owner_href = 1; + else if (lock->ctx_activelock && strstr(name, "owner")) + lock->ctx_owner = 1; + else if (lock->ctx_locktoken && !strcmp(name, "D:href")) + lock->ctx_locktoken_href = 1; + else if (lock->ctx_activelock && !strcmp(name, "D:locktoken")) + lock->ctx_locktoken = 1; + else if (!strcmp(name, "D:activelock")) + lock->ctx_activelock = 1; +} + +static void +end_activelock_element(void *userData, const char *name) +{ + struct active_lock *lock = (struct active_lock *)userData; + + if (lock->ctx_timeout && !strcmp(name, "D:timeout")) { + lock->ctx_timeout = 0; + } else if (lock->ctx_owner_href && strstr(name, "href")) { + lock->ctx_owner_href = 0; + } else if (lock->ctx_owner && strstr(name, "owner")) { + lock->ctx_owner = 0; + } else if (lock->ctx_locktoken_href && !strcmp(name, "D:href")) { + lock->ctx_locktoken_href = 0; + } else if (lock->ctx_locktoken && !strcmp(name, "D:locktoken")) { + lock->ctx_locktoken = 0; + } else if (lock->ctx_activelock && !strcmp(name, "D:activelock")) { + lock->ctx_activelock = 0; + } +} + +static void +activelock_cdata(void *userData, const XML_Char *s, int len) +{ + struct active_lock *lock = (struct active_lock *)userData; + char *this = malloc(len+1); + strncpy(this, s, len); + + if (lock->ctx_owner_href) { + lock->owner = malloc(len+1); + strcpy(lock->owner, this); + } else if (lock->ctx_locktoken_href) { + if (!strncmp(this, "opaquelocktoken:", 16)) { + lock->token = malloc(len-15); + strcpy(lock->token, this+16); + } + } else if (lock->ctx_timeout) { + if (!strncmp(this, "Second-", 7)) + lock->timeout = strtol(this+7, NULL, 10); + } + + free(this); +} + +static void start_lockprop_element(void *userData, const char *name, const char **atts) { struct lockprop *prop = (struct lockprop *)userData; @@ -1039,40 +1114,21 @@ end_lockprop_element(void *userData, const char *name) } } -size_t process_lock_header( void *ptr, size_t size, size_t nmemb, void *stream) -{ - size_t header_size = size*nmemb; - char *start; - char *end; - - if (!strncmp(ptr, "Lock-Token: '; - end--) {} - if (end > start) { - lock_token = xmalloc(end - start + 1); - memcpy(lock_token, start, end - start); - lock_token[end - start] = 0; - } - } - - return header_size; -} - -char *lock_remote(char *file, int timeout) +struct active_lock *lock_remote(char *file, int timeout) { struct active_request_slot *slot; struct buffer out_buffer; + struct buffer in_buffer; char *out_data; + char *in_data; char *url; char *ep; char timeout_header[25]; + struct active_lock *new_lock; + XML_Parser parser = XML_ParserCreate(NULL); + enum XML_Status result; struct curl_slist *dav_headers = NULL; - if (lock_token != NULL) - free(lock_token); - url = xmalloc(strlen(remote->url) + strlen(file) + 1); sprintf(url, "%s%s", remote->url, file); @@ -1110,6 +1166,16 @@ char *lock_remote(char *file, int timeout) out_buffer.posn = 0; out_buffer.buffer = out_data; + in_buffer.size = 4096; + in_data = xmalloc(in_buffer.size); + in_buffer.posn = 0; + in_buffer.buffer = in_data; + + new_lock = xmalloc(sizeof(*new_lock)); + new_lock->owner = NULL; + new_lock->token = NULL; + new_lock->timeout = -1; + sprintf(timeout_header, "Timeout: Second-%d", timeout); dav_headers = curl_slist_append(dav_headers, timeout_header); dav_headers = curl_slist_append(dav_headers, "Content-Type: text/xml"); @@ -1118,9 +1184,9 @@ char *lock_remote(char *file, int timeout) curl_easy_setopt(slot->curl, CURLOPT_INFILE, &out_buffer); curl_easy_setopt(slot->curl, CURLOPT_INFILESIZE, out_buffer.size); curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, fread_buffer); - curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null); - curl_easy_setopt(slot->curl, CURLOPT_HEADERFUNCTION, - process_lock_header); + curl_easy_setopt(slot->curl, CURLOPT_FILE, &in_buffer); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, + fwrite_buffer_dynamic); curl_easy_setopt(slot->curl, CURLOPT_URL, url); curl_easy_setopt(slot->curl, CURLOPT_UPLOAD, 1); curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_LOCK); @@ -1130,13 +1196,17 @@ char *lock_remote(char *file, int timeout) run_active_slot(slot); if (slot->curl_result != CURLE_OK) { fprintf(stderr, "Got HTTP error %ld\n", slot->http_code); + free(new_lock); free(url); free(out_data); + free(in_data); return NULL; } } else { + free(new_lock); free(url); free(out_data); + free(in_data); fprintf(stderr, "Unable to start request\n"); return NULL; } @@ -1144,10 +1214,33 @@ char *lock_remote(char *file, int timeout) free(url); free(out_data); - return strdup(lock_token); + XML_SetUserData(parser, new_lock); + XML_SetElementHandler(parser, start_activelock_element, + end_activelock_element); + XML_SetCharacterDataHandler(parser, activelock_cdata); + result = XML_Parse(parser, in_buffer.buffer, in_buffer.posn, 1); + free(in_data); + if (result != XML_STATUS_OK) { + fprintf(stderr, "%s", XML_ErrorString( + XML_GetErrorCode(parser))); + free(new_lock); + return NULL; + } + + if (new_lock->token == NULL || new_lock->timeout <= 0) { + if (new_lock->token != NULL) + free(new_lock->token); + if (new_lock->owner != NULL) + free(new_lock->owner); + free(new_lock); + return NULL; + } + + new_lock->start_time = time(NULL); + return new_lock; } -int unlock_remote(char *file, char *lock_token) +int unlock_remote(char *file, struct active_lock *lock) { struct active_request_slot *slot; char *url; @@ -1155,14 +1248,9 @@ int unlock_remote(char *file, char *lock_token) struct curl_slist *dav_headers = NULL; int rc = 0; - if (lock_token == NULL) { - fprintf(stderr, "Unable to unlock, no lock token"); - return 0; - } - - lock_token_header = xmalloc(strlen(lock_token) + 31); + lock_token_header = xmalloc(strlen(lock->token) + 31); sprintf(lock_token_header, "Lock-Token: ", - lock_token); + lock->token); url = xmalloc(strlen(remote->url) + strlen(file) + 1); sprintf(url, "%s%s", remote->url, file); dav_headers = curl_slist_append(dav_headers, lock_token_header); @@ -1278,7 +1366,8 @@ int is_ancestor(unsigned char *sha1, struct commit *commit) return 0; } -void get_delta(unsigned char *sha1, struct object *obj, char *lock_token) +void get_delta(unsigned char *sha1, struct object *obj, + struct active_lock *lock) { struct commit *commit; struct commit_list *parents; @@ -1294,7 +1383,7 @@ void get_delta(unsigned char *sha1, struct object *obj, char *lock_token) if (obj->type == commit_type) { if (push_verbosely) fprintf(stderr, "walk %s\n", sha1_to_hex(obj->sha1)); - add_request(obj->sha1, lock_token); + add_request(obj->sha1, lock); commit = (struct commit *)obj; if (parse_commit(commit)) { fprintf(stderr, "Error parsing commit %s\n", @@ -1307,12 +1396,12 @@ void get_delta(unsigned char *sha1, struct object *obj, char *lock_token) if (sha1 == NULL || memcmp(sha1, parents->item->object.sha1, 20)) get_delta(sha1, &parents->item->object, - lock_token); - get_delta(sha1, &commit->tree->object, lock_token); + lock); + get_delta(sha1, &commit->tree->object, lock); } else if (obj->type == tree_type) { if (push_verbosely) fprintf(stderr, "walk %s\n", sha1_to_hex(obj->sha1)); - add_request(obj->sha1, lock_token); + add_request(obj->sha1, lock); tree = (struct tree *)obj; if (parse_tree(tree)) { fprintf(stderr, "Error parsing tree %s\n", @@ -1324,17 +1413,18 @@ void get_delta(unsigned char *sha1, struct object *obj, char *lock_token) tree->entries = NULL; while (entry) { struct tree_entry_list *next = entry->next; - get_delta(sha1, entry->item.any, lock_token); + get_delta(sha1, entry->item.any, lock); free(entry->name); free(entry); entry = next; } } else if (obj->type == blob_type || obj->type == tag_type) { - add_request(obj->sha1, lock_token); + add_request(obj->sha1, lock); } } -int update_remote(char *remote_path, unsigned char *sha1, char *lock_token) +int update_remote(char *remote_path, unsigned char *sha1, + struct active_lock *lock) { struct active_request_slot *slot; char *url; @@ -1347,8 +1437,8 @@ int update_remote(char *remote_path, unsigned char *sha1, char *lock_token) url = xmalloc(strlen(remote->url) + strlen(remote_path) + 1); sprintf(url, "%s%s", remote->url, remote_path); - if_header = xmalloc(strlen(lock_token) + 25); - sprintf(if_header, "If: ()", lock_token); + if_header = xmalloc(strlen(lock->token) + 25); + sprintf(if_header, "If: ()", lock->token); dav_headers = curl_slist_append(dav_headers, if_header); out_buffer.size = 41; @@ -1411,7 +1501,7 @@ int main(int argc, char **argv) struct object *local_object = NULL; char *remote_ref = NULL; unsigned char remote_sha1[20]; - char *remote_lock = NULL; + struct active_lock *remote_lock; char *remote_path = NULL; char *low_speed_limit; char *low_speed_time; @@ -1630,6 +1720,9 @@ int main(int argc, char **argv) unlock: unlock_remote(remote_path, remote_lock); free(remote_path); + if (remote_lock->owner != NULL) + free(remote_lock->owner); + free(remote_lock->token); free(remote_lock); } -- cgit v0.10.2-6-g49f6 From 75187c9debdc57d47e0842f97e30cb912d364ff2 Mon Sep 17 00:00:00 2001 From: Nick Hengeveld Date: Fri, 4 Nov 2005 14:22:35 -0800 Subject: Refresh the remote lock if it is about to expire Refresh the remote lock if it is about to expire Signed-off-by: Nick Hengeveld Signed-off-by: Junio C Hamano diff --git a/http-push.c b/http-push.c index 85cd595..c10067c 100644 --- a/http-push.c +++ b/http-push.c @@ -40,6 +40,9 @@ static const char http_push_usage[] = #define PROPFIND_REQUEST "\n\n\n\n\n" #define LOCK_REQUEST "\n\n\n\n\nmailto:%s\n\n" +#define LOCK_TIME 600 +#define LOCK_REFRESH 30 + static int active_requests = 0; static int data_received; static int pushing = 0; @@ -143,10 +146,12 @@ struct active_lock int ctx_timeout; int ctx_locktoken; int ctx_locktoken_href; + char *url; char *owner; + char *token; time_t start_time; long timeout; - char *token; + int refreshing; }; struct lockprop @@ -580,11 +585,65 @@ static void start_move(struct transfer_request *request) } } +int refresh_lock(struct active_lock *lock) +{ + struct active_request_slot *slot; + char *if_header; + char timeout_header[25]; + struct curl_slist *dav_headers = NULL; + int rc = 0; + + lock->refreshing = 1; + + if_header = xmalloc(strlen(lock->token) + 25); + sprintf(if_header, "If: ()", lock->token); + sprintf(timeout_header, "Timeout: Second-%ld", lock->timeout); + dav_headers = curl_slist_append(dav_headers, if_header); + dav_headers = curl_slist_append(dav_headers, timeout_header); + + slot = get_active_slot(); + curl_easy_setopt(slot->curl, CURLOPT_HTTPGET, 1); + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null); + curl_easy_setopt(slot->curl, CURLOPT_URL, lock->url); + curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_LOCK); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); + + if (start_active_slot(slot)) { + run_active_slot(slot); + if (slot->curl_result != CURLE_OK) { + fprintf(stderr, "Got HTTP error %ld\n", slot->http_code); + } else { + lock->start_time = time(NULL); + rc = 1; + } + } + + lock->refreshing = 0; + curl_slist_free_all(dav_headers); + free(if_header); + + return rc; +} + static void finish_request(struct transfer_request *request) { + time_t current_time = time(NULL); + int time_remaining; + request->curl_result = request->slot->curl_result; request->http_code = request->slot->http_code; request->slot = NULL; + + /* Refresh the lock if it is close to timing out */ + time_remaining = request->lock->start_time + request->lock->timeout + - current_time; + if (time_remaining < LOCK_REFRESH && !request->lock->refreshing) { + if (!refresh_lock(request->lock)) { + fprintf(stderr, "Unable to refresh remote lock\n"); + aborted = 1; + } + } + if (request->headers != NULL) curl_slist_free_all(request->headers); if (request->state == RUN_HEAD) { @@ -1114,7 +1173,7 @@ end_lockprop_element(void *userData, const char *name) } } -struct active_lock *lock_remote(char *file, int timeout) +struct active_lock *lock_remote(char *file, long timeout) { struct active_request_slot *slot; struct buffer out_buffer; @@ -1175,8 +1234,9 @@ struct active_lock *lock_remote(char *file, int timeout) new_lock->owner = NULL; new_lock->token = NULL; new_lock->timeout = -1; + new_lock->refreshing = 0; - sprintf(timeout_header, "Timeout: Second-%d", timeout); + sprintf(timeout_header, "Timeout: Second-%ld", timeout); dav_headers = curl_slist_append(dav_headers, timeout_header); dav_headers = curl_slist_append(dav_headers, "Content-Type: text/xml"); @@ -1211,7 +1271,6 @@ struct active_lock *lock_remote(char *file, int timeout) return NULL; } - free(url); free(out_data); XML_SetUserData(parser, new_lock); @@ -1223,6 +1282,7 @@ struct active_lock *lock_remote(char *file, int timeout) if (result != XML_STATUS_OK) { fprintf(stderr, "%s", XML_ErrorString( XML_GetErrorCode(parser))); + free(url); free(new_lock); return NULL; } @@ -1232,18 +1292,19 @@ struct active_lock *lock_remote(char *file, int timeout) free(new_lock->token); if (new_lock->owner != NULL) free(new_lock->owner); + free(url); free(new_lock); return NULL; } + new_lock->url = url; new_lock->start_time = time(NULL); return new_lock; } -int unlock_remote(char *file, struct active_lock *lock) +int unlock_remote(struct active_lock *lock) { struct active_request_slot *slot; - char *url; char *lock_token_header; struct curl_slist *dav_headers = NULL; int rc = 0; @@ -1251,13 +1312,11 @@ int unlock_remote(char *file, struct active_lock *lock) lock_token_header = xmalloc(strlen(lock->token) + 31); sprintf(lock_token_header, "Lock-Token: ", lock->token); - url = xmalloc(strlen(remote->url) + strlen(file) + 1); - sprintf(url, "%s%s", remote->url, file); dav_headers = curl_slist_append(dav_headers, lock_token_header); slot = get_active_slot(); curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_null); - curl_easy_setopt(slot->curl, CURLOPT_URL, url); + curl_easy_setopt(slot->curl, CURLOPT_URL, lock->url); curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_UNLOCK); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); @@ -1274,7 +1333,12 @@ int unlock_remote(char *file, struct active_lock *lock) curl_slist_free_all(dav_headers); free(lock_token_header); - free(url); + + if (lock->owner != NULL) + free(lock->owner); + free(lock->url); + free(lock->token); + free(lock); return rc; } @@ -1423,20 +1487,15 @@ void get_delta(unsigned char *sha1, struct object *obj, } } -int update_remote(char *remote_path, unsigned char *sha1, - struct active_lock *lock) +int update_remote(unsigned char *sha1, struct active_lock *lock) { struct active_request_slot *slot; - char *url; char *out_data; char *if_header; struct buffer out_buffer; struct curl_slist *dav_headers = NULL; int i; - url = xmalloc(strlen(remote->url) + strlen(remote_path) + 1); - sprintf(url, "%s%s", remote->url, remote_path); - if_header = xmalloc(strlen(lock->token) + 25); sprintf(if_header, "If: ()", lock->token); dav_headers = curl_slist_append(dav_headers, if_header); @@ -1460,13 +1519,12 @@ int update_remote(char *remote_path, unsigned char *sha1, curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); curl_easy_setopt(slot->curl, CURLOPT_UPLOAD, 1); curl_easy_setopt(slot->curl, CURLOPT_PUT, 1); - curl_easy_setopt(slot->curl, CURLOPT_URL, url); + curl_easy_setopt(slot->curl, CURLOPT_URL, lock->url); if (start_active_slot(slot)) { run_active_slot(slot); free(out_data); free(if_header); - free(url); if (slot->curl_result != CURLE_OK) { fprintf(stderr, "PUT error: curl result=%d, HTTP code=%ld\n", @@ -1477,7 +1535,6 @@ int update_remote(char *remote_path, unsigned char *sha1, } else { free(out_data); free(if_header); - free(url); fprintf(stderr, "Unable to start PUT request\n"); return 0; } @@ -1629,7 +1686,7 @@ int main(int argc, char **argv) free(remote_path); remote_path = xmalloc(strlen(remote_ref) + 12); sprintf(remote_path, "refs/heads/%s", remote_ref); - remote_lock = lock_remote(remote_path, 3600); + remote_lock = lock_remote(remote_path, LOCK_TIME); if (remote_lock == NULL) { fprintf(stderr, "Unable to lock remote branch %s\n", remote_ref); @@ -1701,8 +1758,7 @@ int main(int argc, char **argv) /* Update the remote branch if all went well */ if (do_remote_update) { - if (!aborted && update_remote(remote_path, - local_sha1, + if (!aborted && update_remote(local_sha1, remote_lock)) { fprintf(stderr, "%s remote branch %s\n", new_branch ? "Created" : "Updated", @@ -1718,12 +1774,8 @@ int main(int argc, char **argv) } unlock: - unlock_remote(remote_path, remote_lock); + unlock_remote(remote_lock); free(remote_path); - if (remote_lock->owner != NULL) - free(remote_lock->owner); - free(remote_lock->token); - free(remote_lock); } cleanup: -- cgit v0.10.2-6-g49f6 From 3402f1d6a3d8f4205fe59286e3a1223a9d28aea6 Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Sat, 5 Nov 2005 11:12:05 -0800 Subject: Document expat dependency when using http-push. Signed-off-by: Junio C Hamano diff --git a/INSTALL b/INSTALL index 6e336d7..bbb13f3 100644 --- a/INSTALL +++ b/INSTALL @@ -46,6 +46,9 @@ Issues of note: transfer, you are probabaly OK if you do not have them. + - expat library; git-http-push uses it for remote lock + management over DAV. Similar to "curl" above, this is optional. + - "GNU diff" to generate patches. Of course, you don't _have_ to generate patches if you don't want to, but let's face it, you'll be wanting to. Or why did you get git in the first place? -- cgit v0.10.2-6-g49f6 From bb73d73c0885fce357e0d70aa51c2215a8e38a4e Mon Sep 17 00:00:00 2001 From: Jon Loeliger Date: Sun, 6 Nov 2005 10:26:07 -0600 Subject: Refactor merge strategies into separate includable file. Signed-off-by: Jon Loeliger Signed-off-by: Junio C Hamano diff --git a/Documentation/git-merge.txt b/Documentation/git-merge.txt index 3e058db..b3ef19b 100644 --- a/Documentation/git-merge.txt +++ b/Documentation/git-merge.txt @@ -34,6 +34,8 @@ include::merge-pull-opts.txt[] least one . Specifying more than one obviously means you are trying an Octopus. +include::merge-strategies.txt[] + SEE ALSO -------- diff --git a/Documentation/git-pull.txt b/Documentation/git-pull.txt index ec10a2f..7ebb08d 100644 --- a/Documentation/git-pull.txt +++ b/Documentation/git-pull.txt @@ -31,42 +31,8 @@ include::pull-fetch-param.txt[] include::merge-pull-opts.txt[] +include::merge-strategies.txt[] -MERGE STRATEGIES ----------------- - -resolve:: - This can only resolve two heads (i.e. the current branch - and another branch you pulled from) using 3-way merge - algorithm. It tries to carefully detect criss-cross - merge ambiguities and is considered generally safe and - fast. This is the default merge strategy when pulling - one branch. - -recursive:: - This can only resolve two heads using 3-way merge - algorithm. When there are more than one common - ancestors that can be used for 3-way merge, it creates a - merged tree of the common ancestores and uses that as - the reference tree for the 3-way merge. This has been - reported to result in fewer merge conflicts without - causing mis-merges by tests done on actual merge commits - taken from Linux 2.6 kernel development history. - Additionally this can detect and handle merges involving - renames. - -octopus:: - This resolves more than two-head case, but refuses to do - complex merge that needs manual resolution. It is - primarily meant to be used for bundling topic branch - heads together. This is the default merge strategy when - pulling more than one branch. - -ours:: - This resolves any number of heads, but the result of the - merge is always the current branch head. It is meant to - be used to supersede old development history of side - branches. EXAMPLES diff --git a/Documentation/merge-strategies.txt b/Documentation/merge-strategies.txt new file mode 100644 index 0000000..3ec56d2 --- /dev/null +++ b/Documentation/merge-strategies.txt @@ -0,0 +1,35 @@ +MERGE STRATEGIES +---------------- + +resolve:: + This can only resolve two heads (i.e. the current branch + and another branch you pulled from) using 3-way merge + algorithm. It tries to carefully detect criss-cross + merge ambiguities and is considered generally safe and + fast. This is the default merge strategy when pulling + one branch. + +recursive:: + This can only resolve two heads using 3-way merge + algorithm. When there are more than one common + ancestors that can be used for 3-way merge, it creates a + merged tree of the common ancestores and uses that as + the reference tree for the 3-way merge. This has been + reported to result in fewer merge conflicts without + causing mis-merges by tests done on actual merge commits + taken from Linux 2.6 kernel development history. + Additionally this can detect and handle merges involving + renames. + +octopus:: + This resolves more than two-head case, but refuses to do + complex merge that needs manual resolution. It is + primarily meant to be used for bundling topic branch + heads together. This is the default merge strategy when + pulling more than one branch. + +ours:: + This resolves any number of heads, but the result of the + merge is always the current branch head. It is meant to + be used to supersede old development history of side + branches. -- cgit v0.10.2-6-g49f6 From 0cfddacdcca229deca9a1dcdf8628bf168171908 Mon Sep 17 00:00:00 2001 From: "Randal L. Schwartz" Date: Sun, 6 Nov 2005 04:33:07 -0800 Subject: Use fink/darwinport paths for OSX There's no standard libexpat for OSX, so if you install it after-market, it can end up in various directories. Give paths used by fink and darwinports by default to CFLAGS. Signed-off-by: Junio C Hamano diff --git a/Makefile b/Makefile index 6f9b0d1..f89e0bd 100644 --- a/Makefile +++ b/Makefile @@ -185,6 +185,10 @@ uname_O := $(shell sh -c 'uname -o 2>/dev/null || echo not') ifeq ($(uname_S),Darwin) NEEDS_SSL_WITH_CRYPTO = YesPlease NEEDS_LIBICONV = YesPlease + ## fink + ALL_CFLAGS += -I/sw/include -L/sw/lib + ## darwinports + ALL_CFLAGS += -I/opt/local/include -L/opt/local/lib endif ifeq ($(uname_S),SunOS) NEEDS_SOCKET = YesPlease -- cgit v0.10.2-6-g49f6 From fcbc3083e37f3c025d85d7b2c8a1c53d07a81fac Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Sun, 6 Nov 2005 17:26:31 -0800 Subject: ls-files: --others should not say unmerged paths are unknown. Jon Loeliger noticed that an unmerged path appears as "Untracked" in git-status output, even though we show the same path as updated/changed. Since --others means "we have not told git about that path", we should not show unmerged paths -- obviously, git knows about them; it just does not know what we want to do about them yet. Signed-off-by: Junio C Hamano diff --git a/ls-files.c b/ls-files.c index d9c8b21..f7653e7 100644 --- a/ls-files.c +++ b/ls-files.c @@ -348,6 +348,29 @@ static void show_dir_entry(const char *tag, struct nond_on_fs *ent) putchar(line_terminator); } +static void show_other_files(void) +{ + int i; + for (i = 0; i < nr_dir; i++) { + /* We should not have a matching entry, but we + * may have an unmerged entry for this path. + */ + struct nond_on_fs *ent = dir[i]; + int pos = cache_name_pos(ent->name, ent->len); + struct cache_entry *ce; + if (0 <= pos) + die("bug in show-other-files"); + pos = -pos - 1; + if (pos < active_nr) { + ce = active_cache[pos]; + if (ce_namelen(ce) == ent->len && + !memcmp(ce->name, ent->name, ent->len)) + continue; /* Yup, this one exists unmerged */ + } + show_dir_entry(tag_other, ent); + } +} + static void show_killed_files(void) { int i; @@ -438,8 +461,7 @@ static void show_files(void) read_directory(path, base, baselen); qsort(dir, nr_dir, sizeof(struct nond_on_fs *), cmp_name); if (show_others) - for (i = 0; i < nr_dir; i++) - show_dir_entry(tag_other, dir[i]); + show_other_files(); if (show_killed) show_killed_files(); } -- cgit v0.10.2-6-g49f6 From f1790448628262e46496861bb6da76be63a2a247 Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Sun, 6 Nov 2005 17:37:40 -0800 Subject: git-status: do not mark unmerged paths as committable. An unmerged path appears as both "Updated but not checked in" list, and "Changed but not updated" list. We are not going to commit that path until it is resolved, so remove it from the former list. Signed-off-by: Junio C Hamano diff --git a/git-status.sh b/git-status.sh index 62a24a9..837f334 100755 --- a/git-status.sh +++ b/git-status.sh @@ -41,7 +41,7 @@ git-update-index -q --unmerged --refresh || exit if GIT_DIR="$GIT_DIR" git-rev-parse --verify HEAD >/dev/null 2>&1 then - git-diff-index -M --cached --name-status HEAD | + git-diff-index -M --cached --name-status --diff-filter=MDTCRA HEAD | sed -e ' s/\\/\\\\/g s/ /\\ /g -- cgit v0.10.2-6-g49f6 From 95d117b6051c01726b67d3151f83e47dd41c0743 Mon Sep 17 00:00:00 2001 From: Junio C Hamano Date: Sun, 6 Nov 2005 00:52:57 -0800 Subject: Set up remotes/origin to track all remote branches. This implements the idea Daniel Barkalow came up with, to match the remotes/origin created by clone by default to the workflow I use myself in my guinea pig repository, to have me eat my own dog food. We probably would want to use either .git/refs/local/heads/* (idea by Linus) or .git/refs/heads/origin/* instead to reduce the local ref namespace pollution. Signed-off-by: Junio C Hamano diff --git a/Documentation/git-clone.txt b/Documentation/git-clone.txt index cbd83f3..fefd298 100644 --- a/Documentation/git-clone.txt +++ b/Documentation/git-clone.txt @@ -25,7 +25,8 @@ This is to help the typical workflow of working off of the remote `master` branch. Every time `git pull` without argument is run, the progress on the remote `master` branch is tracked by copying it into the local `origin` branch, and merged into the -branch you are currently working on. +branch you are currently working on. Remote branches other than +`master` are also added there to be tracked. OPTIONS diff --git a/git-clone.sh b/git-clone.sh index 1adf604..4fdd652 100755 --- a/git-clone.sh +++ b/git-clone.sh @@ -202,8 +202,16 @@ then mkdir -p .git/remotes && echo >.git/remotes/origin \ "URL: $repo -Pull: $head_points_at:origin" - cp ".git/refs/heads/$head_points_at" .git/refs/heads/origin +Pull: $head_points_at:origin" && + cp ".git/refs/heads/$head_points_at" .git/refs/heads/origin && + find .git/refs/heads -type f -print | + while read ref + do + head=`expr "$ref" : '.git/refs/heads/\(.*\)'` && + test "$head_points_at" = "$head" || + test "origin" = "$head" || + echo "Pull: ${head}:${head}" + done >>.git/remotes/origin esac case "$no_checkout" in -- cgit v0.10.2-6-g49f6 From bee8e79da08f3e50fd7e3b3b203aea284a933fe1 Mon Sep 17 00:00:00 2001 From: Paul Collins Date: Sun, 6 Nov 2005 21:07:45 +0000 Subject: http-push.c: include with angle bracket, not dq. Do not search the current directory when including expat.h, since it is not supplied by git. Signed-off-by: Paul Collins Signed-off-by: Junio C Hamano diff --git a/http-push.c b/http-push.c index c10067c..89fda42 100644 --- a/http-push.c +++ b/http-push.c @@ -7,7 +7,7 @@ #include #include -#include "expat.h" +#include static const char http_push_usage[] = "git-http-push [--complete] [--force] [--verbose] [...]\n"; -- cgit v0.10.2-6-g49f6 From 67ffdf4c0af47679b8a4ac2b70e722e2f65fc4f9 Mon Sep 17 00:00:00 2001 From: Johannes Schindelin Date: Mon, 7 Nov 2005 00:36:15 +0100 Subject: Allow GIT_DIR to be an absolute path This fixes a problem in safe_create_leading_directories() when the argument starts with a '/' (i.e. the path is absolute). Signed-off-by: Johannes Schindelin Signed-off-by: Junio C Hamano diff --git a/sha1_file.c b/sha1_file.c index 642f00d..946a353 100644 --- a/sha1_file.c +++ b/sha1_file.c @@ -51,6 +51,8 @@ int get_sha1_hex(const char *hex, unsigned char *sha1) int safe_create_leading_directories(char *path) { char *pos = path; + if (*pos == '/') + pos++; while (pos) { pos = strchr(pos, '/'); -- cgit v0.10.2-6-g49f6 From 90279074ca5cc336a8bfffd47d19d089b291b432 Mon Sep 17 00:00:00 2001 From: Johannes Schindelin Date: Mon, 7 Nov 2005 00:59:39 +0100 Subject: http-fetch: do not use curl_message after releasing it When curl_message is released using curl_multi_remove_handle(), it's contents are undefined. Therefore, get the information before releasing it. Signed-off-by: Johannes Schindelin Signed-off-by: Junio C Hamano diff --git a/http-fetch.c b/http-fetch.c index b12779d..ea8af1b 100644 --- a/http-fetch.c +++ b/http-fetch.c @@ -578,6 +578,7 @@ void process_curl_messages(void) while (curl_message != NULL) { if (curl_message->msg == CURLMSG_DONE) { + int curl_result = curl_message->data.result; slot = active_queue_head; while (slot != NULL && slot->curl != curl_message->easy_handle) @@ -587,7 +588,7 @@ void process_curl_messages(void) active_requests--; slot->done = 1; slot->in_use = 0; - slot->curl_result = curl_message->data.result; + slot->curl_result = curl_result; curl_easy_getinfo(slot->curl, CURLINFO_HTTP_CODE, &slot->http_code); @@ -599,8 +600,7 @@ void process_curl_messages(void) fprintf(stderr, "Received DONE message for unknown request!\n"); } if (request != NULL) { - request->curl_result = - curl_message->data.result; + request->curl_result = curl_result; request->http_code = slot->http_code; request->slot = NULL; request->state = COMPLETE; -- cgit v0.10.2-6-g49f6