summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJunio C Hamano <gitster@pobox.com>2014-06-03 19:06:41 (GMT)
committerJunio C Hamano <gitster@pobox.com>2014-06-03 19:06:41 (GMT)
commit9af098c29b2faa89225556258ae0e3676741c981 (patch)
treeba6752e3109e5206465c08fdf2538deb9ee4704b
parent2cc70cefdd4a249fab895943890d21071e03f8c7 (diff)
parent426ddeead6112955dfb50ccf9bb4af05d1ca9082 (diff)
downloadgit-9af098c29b2faa89225556258ae0e3676741c981.zip
git-9af098c29b2faa89225556258ae0e3676741c981.tar.gz
git-9af098c29b2faa89225556258ae0e3676741c981.tar.bz2
Merge branch 'ym/fix-opportunistic-index-update-race'
Read-only operations such as "git status" that internally refreshes the index write out the refreshed index to the disk to optimize future accesses to the working tree, but this could race with a "read-write" operation that modify the index while it is running. Detect such a race and avoid overwriting the index. Duy raised a good point that we may need to do the same for the normal writeout codepath, not just the "opportunistic" update codepath. While that is true, nobody sane would be running two simultaneous operations that are clearly write-oriented competing with each other against the same index file. So in that sense that can be done as a less urgent follow-up for this topic. * ym/fix-opportunistic-index-update-race: read-cache.c: verify index file before we opportunistically update it wrapper.c: add xpread() similar to xread()
-rw-r--r--builtin/index-pack.c2
-rw-r--r--cache.h3
-rw-r--r--compat/mmap.c4
-rw-r--r--git-compat-util.h1
-rw-r--r--read-cache.c47
-rw-r--r--wrapper.c38
6 files changed, 90 insertions, 5 deletions
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index b9f6e12..1bac0f5 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -542,7 +542,7 @@ static void *unpack_data(struct object_entry *obj,
do {
ssize_t n = (len < 64*1024) ? len : 64*1024;
- n = pread(pack_fd, inbuf, n, from);
+ n = xpread(pack_fd, inbuf, n, from);
if (n < 0)
die_errno(_("cannot pread pack file"));
if (!n)
diff --git a/cache.h b/cache.h
index ef4412d..557bd6a 100644
--- a/cache.h
+++ b/cache.h
@@ -294,6 +294,7 @@ struct index_state {
initialized : 1;
struct hashmap name_hash;
struct hashmap dir_hash;
+ unsigned char sha1[20];
};
extern struct index_state the_index;
@@ -1337,6 +1338,8 @@ extern void fsync_or_die(int fd, const char *);
extern ssize_t read_in_full(int fd, void *buf, size_t count);
extern ssize_t write_in_full(int fd, const void *buf, size_t count);
+extern ssize_t pread_in_full(int fd, void *buf, size_t count, off_t offset);
+
static inline ssize_t write_str_in_full(int fd, const char *str)
{
return write_in_full(fd, str, strlen(str));
diff --git a/compat/mmap.c b/compat/mmap.c
index c9d46d1..7f662fe 100644
--- a/compat/mmap.c
+++ b/compat/mmap.c
@@ -14,7 +14,7 @@ void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t of
}
while (n < length) {
- ssize_t count = pread(fd, (char *)start + n, length - n, offset + n);
+ ssize_t count = xpread(fd, (char *)start + n, length - n, offset + n);
if (count == 0) {
memset((char *)start+n, 0, length-n);
@@ -22,8 +22,6 @@ void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t of
}
if (count < 0) {
- if (errno == EAGAIN || errno == EINTR)
- continue;
free(start);
errno = EACCES;
return MAP_FAILED;
diff --git a/git-compat-util.h b/git-compat-util.h
index 7fe1ffda0..76910e6 100644
--- a/git-compat-util.h
+++ b/git-compat-util.h
@@ -539,6 +539,7 @@ extern void *xcalloc(size_t nmemb, size_t size);
extern void *xmmap(void *start, size_t length, int prot, int flags, int fd, off_t offset);
extern ssize_t xread(int fd, void *buf, size_t len);
extern ssize_t xwrite(int fd, const void *buf, size_t len);
+extern ssize_t xpread(int fd, void *buf, size_t len, off_t offset);
extern int xdup(int fd);
extern FILE *xfdopen(int fd, const char *mode);
extern int xmkstemp(char *template);
diff --git a/read-cache.c b/read-cache.c
index ba13353..7f5645e 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -1477,6 +1477,7 @@ int read_index_from(struct index_state *istate, const char *path)
if (verify_hdr(hdr, mmap_size) < 0)
goto unmap;
+ hashcpy(istate->sha1, (unsigned char *)hdr + mmap_size - 20);
istate->version = ntohl(hdr->hdr_version);
istate->cache_nr = ntohl(hdr->hdr_entries);
istate->cache_alloc = alloc_nr(istate->cache_nr);
@@ -1760,6 +1761,50 @@ static int ce_write_entry(git_SHA_CTX *c, int fd, struct cache_entry *ce,
return result;
}
+/*
+ * This function verifies if index_state has the correct sha1 of the
+ * index file. Don't die if we have any other failure, just return 0.
+ */
+static int verify_index_from(const struct index_state *istate, const char *path)
+{
+ int fd;
+ ssize_t n;
+ struct stat st;
+ unsigned char sha1[20];
+
+ if (!istate->initialized)
+ return 0;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return 0;
+
+ if (fstat(fd, &st))
+ goto out;
+
+ if (st.st_size < sizeof(struct cache_header) + 20)
+ goto out;
+
+ n = pread_in_full(fd, sha1, 20, st.st_size - 20);
+ if (n != 20)
+ goto out;
+
+ if (hashcmp(istate->sha1, sha1))
+ goto out;
+
+ close(fd);
+ return 1;
+
+out:
+ close(fd);
+ return 0;
+}
+
+static int verify_index(const struct index_state *istate)
+{
+ return verify_index_from(istate, get_index_file());
+}
+
static int has_racy_timestamp(struct index_state *istate)
{
int entries = istate->cache_nr;
@@ -1779,7 +1824,7 @@ static int has_racy_timestamp(struct index_state *istate)
void update_index_if_able(struct index_state *istate, struct lock_file *lockfile)
{
if ((istate->cache_changed || has_racy_timestamp(istate)) &&
- !write_index(istate, lockfile->fd))
+ verify_index(istate) && !write_index(istate, lockfile->fd))
commit_locked_index(lockfile);
else
rollback_lock_file(lockfile);
diff --git a/wrapper.c b/wrapper.c
index 0cc5636..bc1bfb8 100644
--- a/wrapper.c
+++ b/wrapper.c
@@ -174,6 +174,24 @@ ssize_t xwrite(int fd, const void *buf, size_t len)
}
}
+/*
+ * xpread() is the same as pread(), but it automatically restarts pread()
+ * operations with a recoverable error (EAGAIN and EINTR). xpread() DOES
+ * NOT GUARANTEE that "len" bytes is read even if the data is available.
+ */
+ssize_t xpread(int fd, void *buf, size_t len, off_t offset)
+{
+ ssize_t nr;
+ if (len > MAX_IO_SIZE)
+ len = MAX_IO_SIZE;
+ while (1) {
+ nr = pread(fd, buf, len, offset);
+ if ((nr < 0) && (errno == EAGAIN || errno == EINTR))
+ continue;
+ return nr;
+ }
+}
+
ssize_t read_in_full(int fd, void *buf, size_t count)
{
char *p = buf;
@@ -214,6 +232,26 @@ ssize_t write_in_full(int fd, const void *buf, size_t count)
return total;
}
+ssize_t pread_in_full(int fd, void *buf, size_t count, off_t offset)
+{
+ char *p = buf;
+ ssize_t total = 0;
+
+ while (count > 0) {
+ ssize_t loaded = xpread(fd, p, count, offset);
+ if (loaded < 0)
+ return -1;
+ if (loaded == 0)
+ return total;
+ count -= loaded;
+ p += loaded;
+ total += loaded;
+ offset += loaded;
+ }
+
+ return total;
+}
+
int xdup(int fd)
{
int ret = dup(fd);