summaryrefslogtreecommitdiff
path: root/read-cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'read-cache.c')
-rw-r--r--read-cache.c892
1 files changed, 613 insertions, 279 deletions
diff --git a/read-cache.c b/read-cache.c
index 77961a3..e1723ad 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -3,30 +3,46 @@
*
* Copyright (C) Linus Torvalds, 2005
*/
-#include "cache.h"
+#include "git-compat-util.h"
+#include "bulk-checkin.h"
#include "config.h"
+#include "date.h"
#include "diff.h"
#include "diffcore.h"
+#include "hex.h"
#include "tempfile.h"
#include "lockfile.h"
#include "cache-tree.h"
#include "refs.h"
#include "dir.h"
-#include "object-store.h"
+#include "object-file.h"
+#include "object-store-ll.h"
+#include "oid-array.h"
#include "tree.h"
#include "commit.h"
-#include "blob.h"
+#include "environment.h"
+#include "gettext.h"
+#include "mem-pool.h"
+#include "name-hash.h"
+#include "object-name.h"
+#include "path.h"
+#include "preload-index.h"
+#include "read-cache.h"
#include "resolve-undo.h"
-#include "run-command.h"
+#include "revision.h"
#include "strbuf.h"
+#include "trace2.h"
#include "varint.h"
#include "split-index.h"
+#include "symlinks.h"
#include "utf8.h"
#include "fsmonitor.h"
#include "thread-utils.h"
#include "progress.h"
#include "sparse-index.h"
#include "csum-file.h"
+#include "promisor-remote.h"
+#include "hook.h"
/* Mask for the name length in ce_flags in the on-disk index */
@@ -67,6 +83,11 @@
*/
#define CACHE_ENTRY_PATH_LENGTH 80
+enum index_search_mode {
+ NO_EXPAND_SPARSE = 0,
+ EXPAND_SPARSE = 1
+};
+
static inline struct cache_entry *mem_pool__ce_alloc(struct mem_pool *mem_pool, size_t len)
{
struct cache_entry *ce;
@@ -105,7 +126,7 @@ static const char *alternate_index_output;
static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
{
if (S_ISSPARSEDIR(ce->ce_mode))
- istate->sparse_index = 1;
+ istate->sparse_index = INDEX_COLLAPSED;
istate->cache[nr] = ce;
add_name_hash(istate, ce);
@@ -127,7 +148,7 @@ static void replace_index_entry(struct index_state *istate, int nr, struct cache
void rename_index_entry_at(struct index_state *istate, int nr, const char *new_name)
{
- struct cache_entry *old_entry = istate->cache[nr], *new_entry;
+ struct cache_entry *old_entry = istate->cache[nr], *new_entry, *refreshed;
int namelen = strlen(new_name);
new_entry = make_empty_cache_entry(istate, namelen);
@@ -140,62 +161,20 @@ void rename_index_entry_at(struct index_state *istate, int nr, const char *new_n
cache_tree_invalidate_path(istate, old_entry->name);
untracked_cache_remove_from_index(istate, old_entry->name);
remove_index_entry_at(istate, nr);
- add_index_entry(istate, new_entry, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
-}
-
-void fill_stat_data(struct stat_data *sd, struct stat *st)
-{
- sd->sd_ctime.sec = (unsigned int)st->st_ctime;
- sd->sd_mtime.sec = (unsigned int)st->st_mtime;
- sd->sd_ctime.nsec = ST_CTIME_NSEC(*st);
- sd->sd_mtime.nsec = ST_MTIME_NSEC(*st);
- sd->sd_dev = st->st_dev;
- sd->sd_ino = st->st_ino;
- sd->sd_uid = st->st_uid;
- sd->sd_gid = st->st_gid;
- sd->sd_size = st->st_size;
-}
-
-int match_stat_data(const struct stat_data *sd, struct stat *st)
-{
- int changed = 0;
- if (sd->sd_mtime.sec != (unsigned int)st->st_mtime)
- changed |= MTIME_CHANGED;
- if (trust_ctime && check_stat &&
- sd->sd_ctime.sec != (unsigned int)st->st_ctime)
- changed |= CTIME_CHANGED;
-
-#ifdef USE_NSEC
- if (check_stat && sd->sd_mtime.nsec != ST_MTIME_NSEC(*st))
- changed |= MTIME_CHANGED;
- if (trust_ctime && check_stat &&
- sd->sd_ctime.nsec != ST_CTIME_NSEC(*st))
- changed |= CTIME_CHANGED;
-#endif
-
- if (check_stat) {
- if (sd->sd_uid != (unsigned int) st->st_uid ||
- sd->sd_gid != (unsigned int) st->st_gid)
- changed |= OWNER_CHANGED;
- if (sd->sd_ino != (unsigned int) st->st_ino)
- changed |= INODE_CHANGED;
- }
-
-#ifdef USE_STDEV
/*
- * st_dev breaks on network filesystems where different
- * clients will have different views of what "device"
- * the filesystem is on
+ * Refresh the new index entry. Using 'refresh_cache_entry' ensures
+ * we only update stat info if the entry is otherwise up-to-date (i.e.,
+ * the contents/mode haven't changed). This ensures that we reflect the
+ * 'ctime' of the rename in the index without (incorrectly) updating
+ * the cached stat info to reflect unstaged changes on disk.
*/
- if (check_stat && sd->sd_dev != (unsigned int) st->st_dev)
- changed |= INODE_CHANGED;
-#endif
-
- if (sd->sd_size != (unsigned int) st->st_size)
- changed |= DATA_CHANGED;
-
- return changed;
+ refreshed = refresh_cache_entry(istate, new_entry, CE_MATCH_REFRESH);
+ if (refreshed && refreshed != new_entry) {
+ add_index_entry(istate, refreshed, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
+ discard_cache_entry(new_entry);
+ } else
+ add_index_entry(istate, new_entry, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
}
/*
@@ -216,6 +195,33 @@ void fill_stat_cache_info(struct index_state *istate, struct cache_entry *ce, st
}
}
+static unsigned int st_mode_from_ce(const struct cache_entry *ce)
+{
+ extern int trust_executable_bit, has_symlinks;
+
+ switch (ce->ce_mode & S_IFMT) {
+ case S_IFLNK:
+ return has_symlinks ? S_IFLNK : (S_IFREG | 0644);
+ case S_IFREG:
+ return (ce->ce_mode & (trust_executable_bit ? 0755 : 0644)) | S_IFREG;
+ case S_IFGITLINK:
+ return S_IFDIR | 0755;
+ case S_IFDIR:
+ return ce->ce_mode;
+ default:
+ BUG("unsupported ce_mode: %o", ce->ce_mode);
+ }
+}
+
+int fake_lstat(const struct cache_entry *ce, struct stat *st)
+{
+ fake_lstat_data(&ce->ce_stat_data, st);
+ st->st_mode = st_mode_from_ce(ce);
+
+ /* always succeed as lstat() replacement */
+ return 0;
+}
+
static int ce_compare_data(struct index_state *istate,
const struct cache_entry *ce,
struct stat *st)
@@ -243,7 +249,7 @@ static int ce_compare_link(const struct cache_entry *ce, size_t expected_size)
if (strbuf_readlink(&sb, ce->name, expected_size))
return -1;
- buffer = read_object_file(&ce->oid, &type, &size);
+ buffer = repo_read_object_file(the_repository, &ce->oid, &type, &size);
if (buffer) {
if (size == sb.len)
match = memcmp(buffer, sb.buf, size);
@@ -468,89 +474,36 @@ int ie_modified(struct index_state *istate,
return 0;
}
-int base_name_compare(const char *name1, int len1, int mode1,
- const char *name2, int len2, int mode2)
+static int cache_name_stage_compare(const char *name1, int len1, int stage1,
+ const char *name2, int len2, int stage2)
{
- unsigned char c1, c2;
- int len = len1 < len2 ? len1 : len2;
int cmp;
- cmp = memcmp(name1, name2, len);
- if (cmp)
- return cmp;
- c1 = name1[len];
- c2 = name2[len];
- if (!c1 && S_ISDIR(mode1))
- c1 = '/';
- if (!c2 && S_ISDIR(mode2))
- c2 = '/';
- return (c1 < c2) ? -1 : (c1 > c2) ? 1 : 0;
-}
-
-/*
- * df_name_compare() is identical to base_name_compare(), except it
- * compares conflicting directory/file entries as equal. Note that
- * while a directory name compares as equal to a regular file, they
- * then individually compare _differently_ to a filename that has
- * a dot after the basename (because '\0' < '.' < '/').
- *
- * This is used by routines that want to traverse the git namespace
- * but then handle conflicting entries together when possible.
- */
-int df_name_compare(const char *name1, int len1, int mode1,
- const char *name2, int len2, int mode2)
-{
- int len = len1 < len2 ? len1 : len2, cmp;
- unsigned char c1, c2;
-
- cmp = memcmp(name1, name2, len);
+ cmp = name_compare(name1, len1, name2, len2);
if (cmp)
return cmp;
- /* Directories and files compare equal (same length, same name) */
- if (len1 == len2)
- return 0;
- c1 = name1[len];
- if (!c1 && S_ISDIR(mode1))
- c1 = '/';
- c2 = name2[len];
- if (!c2 && S_ISDIR(mode2))
- c2 = '/';
- if (c1 == '/' && !c2)
- return 0;
- if (c2 == '/' && !c1)
- return 0;
- return c1 - c2;
-}
-int name_compare(const char *name1, size_t len1, const char *name2, size_t len2)
-{
- size_t min_len = (len1 < len2) ? len1 : len2;
- int cmp = memcmp(name1, name2, min_len);
- if (cmp)
- return cmp;
- if (len1 < len2)
+ if (stage1 < stage2)
return -1;
- if (len1 > len2)
+ if (stage1 > stage2)
return 1;
return 0;
}
-int cache_name_stage_compare(const char *name1, int len1, int stage1, const char *name2, int len2, int stage2)
+int cmp_cache_name_compare(const void *a_, const void *b_)
{
- int cmp;
+ const struct cache_entry *ce1, *ce2;
- cmp = name_compare(name1, len1, name2, len2);
- if (cmp)
- return cmp;
-
- if (stage1 < stage2)
- return -1;
- if (stage1 > stage2)
- return 1;
- return 0;
+ ce1 = *((const struct cache_entry **)a_);
+ ce2 = *((const struct cache_entry **)b_);
+ return cache_name_stage_compare(ce1->name, ce1->ce_namelen, ce_stage(ce1),
+ ce2->name, ce2->ce_namelen, ce_stage(ce2));
}
-static int index_name_stage_pos(struct index_state *istate, const char *name, int namelen, int stage)
+static int index_name_stage_pos(struct index_state *istate,
+ const char *name, int namelen,
+ int stage,
+ enum index_search_mode search_mode)
{
int first, last;
@@ -569,7 +522,7 @@ static int index_name_stage_pos(struct index_state *istate, const char *name, in
first = next+1;
}
- if (istate->sparse_index &&
+ if (search_mode == EXPAND_SPARSE && istate->sparse_index &&
first > 0) {
/* Note: first <= istate->cache_nr */
struct cache_entry *ce = istate->cache[first - 1];
@@ -585,7 +538,7 @@ static int index_name_stage_pos(struct index_state *istate, const char *name, in
ce_namelen(ce) < namelen &&
!strncmp(name, ce->name, ce_namelen(ce))) {
ensure_full_index(istate);
- return index_name_stage_pos(istate, name, namelen, stage);
+ return index_name_stage_pos(istate, name, namelen, stage, search_mode);
}
}
@@ -594,7 +547,17 @@ static int index_name_stage_pos(struct index_state *istate, const char *name, in
int index_name_pos(struct index_state *istate, const char *name, int namelen)
{
- return index_name_stage_pos(istate, name, namelen, 0);
+ return index_name_stage_pos(istate, name, namelen, 0, EXPAND_SPARSE);
+}
+
+int index_name_pos_sparse(struct index_state *istate, const char *name, int namelen)
+{
+ return index_name_stage_pos(istate, name, namelen, 0, NO_EXPAND_SPARSE);
+}
+
+int index_entry_exists(struct index_state *istate, const char *name, int namelen)
+{
+ return index_name_stage_pos(istate, name, namelen, 0, NO_EXPAND_SPARSE) >= 0;
}
int remove_index_entry_at(struct index_state *istate, int pos)
@@ -721,7 +684,7 @@ static struct cache_entry *create_alias_ce(struct index_state *istate,
void set_object_name_for_intent_to_add_entry(struct cache_entry *ce)
{
struct object_id oid;
- if (write_object_file("", 0, blob_type, &oid))
+ if (write_object_file("", 0, OBJ_BLOB, &oid))
die(_("cannot create an empty blob in the object database"));
oidcpy(&ce->oid, &oid);
}
@@ -737,7 +700,7 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st,
int intent_only = flags & ADD_CACHE_INTENT;
int add_option = (ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE|
(intent_only ? ADD_CACHE_NEW_ONLY : 0));
- int hash_flags = HASH_WRITE_OBJECT;
+ unsigned hash_flags = pretend ? 0 : HASH_WRITE_OBJECT;
struct object_id oid;
if (flags & ADD_CACHE_RENORMALIZE)
@@ -848,6 +811,19 @@ struct cache_entry *make_empty_transient_cache_entry(size_t len,
return xcalloc(1, cache_entry_size(len));
}
+enum verify_path_result {
+ PATH_OK,
+ PATH_INVALID,
+ PATH_DIR_WITH_SEP,
+};
+
+static enum verify_path_result verify_path_internal(const char *, unsigned);
+
+int verify_path(const char *path, unsigned mode)
+{
+ return verify_path_internal(path, mode) == PATH_OK;
+}
+
struct cache_entry *make_cache_entry(struct index_state *istate,
unsigned int mode,
const struct object_id *oid,
@@ -858,7 +834,7 @@ struct cache_entry *make_cache_entry(struct index_state *istate,
struct cache_entry *ce, *ret;
int len;
- if (!verify_path(path, mode)) {
+ if (verify_path_internal(path, mode) == PATH_INVALID) {
error(_("invalid path '%s'"), path);
return NULL;
}
@@ -992,60 +968,62 @@ static int verify_dotfile(const char *rest, unsigned mode)
return 1;
}
-int verify_path(const char *path, unsigned mode)
+static enum verify_path_result verify_path_internal(const char *path,
+ unsigned mode)
{
char c = 0;
if (has_dos_drive_prefix(path))
- return 0;
+ return PATH_INVALID;
if (!is_valid_path(path))
- return 0;
+ return PATH_INVALID;
goto inside;
for (;;) {
if (!c)
- return 1;
+ return PATH_OK;
if (is_dir_sep(c)) {
inside:
if (protect_hfs) {
if (is_hfs_dotgit(path))
- return 0;
+ return PATH_INVALID;
if (S_ISLNK(mode)) {
if (is_hfs_dotgitmodules(path))
- return 0;
+ return PATH_INVALID;
}
}
if (protect_ntfs) {
#if defined GIT_WINDOWS_NATIVE || defined __CYGWIN__
if (c == '\\')
- return 0;
+ return PATH_INVALID;
#endif
if (is_ntfs_dotgit(path))
- return 0;
+ return PATH_INVALID;
if (S_ISLNK(mode)) {
if (is_ntfs_dotgitmodules(path))
- return 0;
+ return PATH_INVALID;
}
}
c = *path++;
if ((c == '.' && !verify_dotfile(path, mode)) ||
is_dir_sep(c))
- return 0;
+ return PATH_INVALID;
/*
* allow terminating directory separators for
* sparse directory entries.
*/
if (c == '\0')
- return S_ISDIR(mode);
+ return S_ISDIR(mode) ? PATH_DIR_WITH_SEP :
+ PATH_INVALID;
} else if (c == '\\' && protect_ntfs) {
if (is_ntfs_dotgit(path))
- return 0;
+ return PATH_INVALID;
if (S_ISLNK(mode)) {
if (is_ntfs_dotgitmodules(path))
- return 0;
+ return PATH_INVALID;
}
}
@@ -1221,7 +1199,7 @@ static int has_dir_name(struct index_state *istate,
*/
}
- pos = index_name_stage_pos(istate, name, len, stage);
+ pos = index_name_stage_pos(istate, name, len, stage, EXPAND_SPARSE);
if (pos >= 0) {
/*
* Found one, but not so fast. This could
@@ -1310,9 +1288,6 @@ static int add_index_entry_with_check(struct index_state *istate, struct cache_e
int skip_df_check = option & ADD_CACHE_SKIP_DFCHECK;
int new_only = option & ADD_CACHE_NEW_ONLY;
- if (!(option & ADD_CACHE_KEEP_CACHE_TREE))
- cache_tree_invalidate_path(istate, ce->name);
-
/*
* If this entry's path sorts after the last entry in the index,
* we can avoid searching for it.
@@ -1321,7 +1296,14 @@ static int add_index_entry_with_check(struct index_state *istate, struct cache_e
strcmp(ce->name, istate->cache[istate->cache_nr - 1]->name) > 0)
pos = index_pos_to_insert_pos(istate->cache_nr);
else
- pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce));
+ pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce), EXPAND_SPARSE);
+
+ /*
+ * Cache tree path should be invalidated only after index_name_stage_pos,
+ * in case it expands a sparse index.
+ */
+ if (!(option & ADD_CACHE_KEEP_CACHE_TREE))
+ cache_tree_invalidate_path(istate, ce->name);
/* existing match? Just replace it. */
if (pos >= 0) {
@@ -1348,7 +1330,7 @@ static int add_index_entry_with_check(struct index_state *istate, struct cache_e
if (!ok_to_add)
return -1;
- if (!verify_path(ce->name, ce->ce_mode))
+ if (verify_path_internal(ce->name, ce->ce_mode) == PATH_INVALID)
return error(_("invalid path '%s'"), ce->name);
if (!skip_df_check &&
@@ -1356,7 +1338,7 @@ static int add_index_entry_with_check(struct index_state *istate, struct cache_e
if (!ok_to_replace)
return error(_("'%s' appears as both a file and as a directory"),
ce->name);
- pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce));
+ pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce), EXPAND_SPARSE);
pos = -pos-1;
}
return pos + 1;
@@ -1585,8 +1567,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
*/
preload_index(istate, pathspec, 0);
trace2_region_enter("index", "refresh", NULL);
- /* TODO: audit for interaction with sparse-index. */
- ensure_full_index(istate);
+
for (i = 0; i < istate->cache_nr; i++) {
struct cache_entry *ce, *new_entry;
int cache_errno = 0;
@@ -1601,6 +1582,13 @@ int refresh_index(struct index_state *istate, unsigned int flags,
if (ignore_skip_worktree && ce_skip_worktree(ce))
continue;
+ /*
+ * If this entry is a sparse directory, then there isn't
+ * any stat() information to update. Ignore the entry.
+ */
+ if (S_ISSPARSEDIR(ce->ce_mode))
+ continue;
+
if (pathspec && !ce_path_match(istate, ce, pathspec, seen))
filtered = 1;
@@ -1628,8 +1616,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
t2_sum_scan += t2_did_scan;
if (new_entry == ce)
continue;
- if (progress)
- display_progress(progress, i);
+ display_progress(progress, i);
if (!new_entry) {
const char *fmt;
@@ -1664,10 +1651,8 @@ int refresh_index(struct index_state *istate, unsigned int flags,
trace2_data_intmax("index", NULL, "refresh/sum_lstat", t2_sum_lstat);
trace2_data_intmax("index", NULL, "refresh/sum_scan", t2_sum_scan);
trace2_region_leave("index", "refresh", NULL);
- if (progress) {
- display_progress(progress, istate->cache_nr);
- stop_progress(&progress);
- }
+ display_progress(progress, istate->cache_nr);
+ stop_progress(&progress);
trace_performance_leave("refresh index");
return has_errors;
}
@@ -1762,6 +1747,8 @@ static int verify_hdr(const struct cache_header *hdr, unsigned long size)
git_hash_ctx c;
unsigned char hash[GIT_MAX_RAWSZ];
int hdr_version;
+ unsigned char *start, *end;
+ struct object_id oid;
if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
return error(_("bad signature 0x%08x"), hdr->hdr_signature);
@@ -1772,10 +1759,16 @@ static int verify_hdr(const struct cache_header *hdr, unsigned long size)
if (!verify_index_checksum)
return 0;
+ end = (unsigned char *)hdr + size;
+ start = end - the_hash_algo->rawsz;
+ oidread(&oid, start);
+ if (oideq(&oid, null_oid()))
+ return 0;
+
the_hash_algo->init_fn(&c);
the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
the_hash_algo->final_fn(hash, &c);
- if (!hasheq(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
+ if (!hasheq(hash, start))
return error(_("bad index file sha1 signature"));
return 0;
}
@@ -1806,7 +1799,7 @@ static int read_index_extension(struct index_state *istate,
break;
case CACHE_EXT_SPARSE_DIRECTORIES:
/* no content, only an indicator */
- istate->sparse_index = 1;
+ istate->sparse_index = INDEX_COLLAPSED;
break;
default:
if (*ext < 'A' || 'Z' < *ext)
@@ -1818,9 +1811,20 @@ static int read_index_extension(struct index_state *istate,
return 0;
}
+/*
+ * Parses the contents of the cache entry contained within the 'ondisk' buffer
+ * into a new incore 'cache_entry'.
+ *
+ * Note that 'char *ondisk' may not be aligned to a 4-byte address interval in
+ * index v4, so we cannot cast it to 'struct ondisk_cache_entry *' and access
+ * its members. Instead, we use the byte offsets of members within the struct to
+ * identify where 'get_be16()', 'get_be32()', and 'oidread()' (which can all
+ * read from an unaligned memory buffer) should read from the 'ondisk' buffer
+ * into the corresponding incore 'cache_entry' members.
+ */
static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
unsigned int version,
- struct ondisk_cache_entry *ondisk,
+ const char *ondisk,
unsigned long *ent_size,
const struct cache_entry *previous_ce)
{
@@ -1828,7 +1832,7 @@ static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
size_t len;
const char *name;
const unsigned hashsz = the_hash_algo->rawsz;
- const uint16_t *flagsp = (const uint16_t *)(ondisk->data + hashsz);
+ const char *flagsp = ondisk + offsetof(struct ondisk_cache_entry, data) + hashsz;
unsigned int flags;
size_t copy_len = 0;
/*
@@ -1846,15 +1850,15 @@ static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
if (flags & CE_EXTENDED) {
int extended_flags;
- extended_flags = get_be16(flagsp + 1) << 16;
+ extended_flags = get_be16(flagsp + sizeof(uint16_t)) << 16;
/* We do not yet understand any bit out of CE_EXTENDED_FLAGS */
if (extended_flags & ~CE_EXTENDED_FLAGS)
die(_("unknown index entry format 0x%08x"), extended_flags);
flags |= extended_flags;
- name = (const char *)(flagsp + 2);
+ name = (const char *)(flagsp + 2 * sizeof(uint16_t));
}
else
- name = (const char *)(flagsp + 1);
+ name = (const char *)(flagsp + sizeof(uint16_t));
if (expand_name_field) {
const unsigned char *cp = (const unsigned char *)name;
@@ -1880,22 +1884,32 @@ static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
ce = mem_pool__ce_alloc(ce_mem_pool, len);
- ce->ce_stat_data.sd_ctime.sec = get_be32(&ondisk->ctime.sec);
- ce->ce_stat_data.sd_mtime.sec = get_be32(&ondisk->mtime.sec);
- ce->ce_stat_data.sd_ctime.nsec = get_be32(&ondisk->ctime.nsec);
- ce->ce_stat_data.sd_mtime.nsec = get_be32(&ondisk->mtime.nsec);
- ce->ce_stat_data.sd_dev = get_be32(&ondisk->dev);
- ce->ce_stat_data.sd_ino = get_be32(&ondisk->ino);
- ce->ce_mode = get_be32(&ondisk->mode);
- ce->ce_stat_data.sd_uid = get_be32(&ondisk->uid);
- ce->ce_stat_data.sd_gid = get_be32(&ondisk->gid);
- ce->ce_stat_data.sd_size = get_be32(&ondisk->size);
+ /*
+ * NEEDSWORK: using 'offsetof()' is cumbersome and should be replaced
+ * with something more akin to 'load_bitmap_entries_v1()'s use of
+ * 'read_be16'/'read_be32'. For consistency with the corresponding
+ * ondisk entry write function ('copy_cache_entry_to_ondisk()'), this
+ * should be done at the same time as removing references to
+ * 'ondisk_cache_entry' there.
+ */
+ ce->ce_stat_data.sd_ctime.sec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, ctime)
+ + offsetof(struct cache_time, sec));
+ ce->ce_stat_data.sd_mtime.sec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, mtime)
+ + offsetof(struct cache_time, sec));
+ ce->ce_stat_data.sd_ctime.nsec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, ctime)
+ + offsetof(struct cache_time, nsec));
+ ce->ce_stat_data.sd_mtime.nsec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, mtime)
+ + offsetof(struct cache_time, nsec));
+ ce->ce_stat_data.sd_dev = get_be32(ondisk + offsetof(struct ondisk_cache_entry, dev));
+ ce->ce_stat_data.sd_ino = get_be32(ondisk + offsetof(struct ondisk_cache_entry, ino));
+ ce->ce_mode = get_be32(ondisk + offsetof(struct ondisk_cache_entry, mode));
+ ce->ce_stat_data.sd_uid = get_be32(ondisk + offsetof(struct ondisk_cache_entry, uid));
+ ce->ce_stat_data.sd_gid = get_be32(ondisk + offsetof(struct ondisk_cache_entry, gid));
+ ce->ce_stat_data.sd_size = get_be32(ondisk + offsetof(struct ondisk_cache_entry, size));
ce->ce_flags = flags & ~CE_NAMEMASK;
ce->ce_namelen = len;
ce->index = 0;
- oidread(&ce->oid, ondisk->data);
- memcpy(ce->name, name, len);
- ce->name[len] = '\0';
+ oidread(&ce->oid, (const unsigned char *)ondisk + offsetof(struct ondisk_cache_entry, data));
if (expand_name_field) {
if (copy_len)
@@ -1940,13 +1954,22 @@ static void tweak_untracked_cache(struct index_state *istate)
prepare_repo_settings(r);
- if (r->settings.core_untracked_cache == UNTRACKED_CACHE_REMOVE) {
+ switch (r->settings.core_untracked_cache) {
+ case UNTRACKED_CACHE_REMOVE:
remove_untracked_cache(istate);
- return;
- }
-
- if (r->settings.core_untracked_cache == UNTRACKED_CACHE_WRITE)
+ break;
+ case UNTRACKED_CACHE_WRITE:
add_untracked_cache(istate);
+ break;
+ case UNTRACKED_CACHE_KEEP:
+ /*
+ * Either an explicit "core.untrackedCache=keep", the
+ * default if "core.untrackedCache" isn't configured,
+ * or a fallback on an unknown "core.untrackedCache"
+ * value.
+ */
+ break;
+ }
}
static void tweak_split_index(struct index_state *istate)
@@ -2055,12 +2078,12 @@ static unsigned long load_cache_entry_block(struct index_state *istate,
unsigned long src_offset = start_offset;
for (i = offset; i < offset + nr; i++) {
- struct ondisk_cache_entry *disk_ce;
struct cache_entry *ce;
unsigned long consumed;
- disk_ce = (struct ondisk_cache_entry *)(mmap + src_offset);
- ce = create_from_disk(ce_mem_pool, istate->version, disk_ce, &consumed, previous_ce);
+ ce = create_from_disk(ce_mem_pool, istate->version,
+ mmap + src_offset,
+ &consumed, previous_ce);
set_index_entry(istate, i, ce);
src_offset += consumed;
@@ -2201,6 +2224,18 @@ static unsigned long load_cache_entries_threaded(struct index_state *istate, con
return consumed;
}
+static void set_new_index_sparsity(struct index_state *istate)
+{
+ /*
+ * If the index's repo exists, mark it sparse according to
+ * repo settings.
+ */
+ prepare_repo_settings(istate->repo);
+ if (!istate->repo->settings.command_requires_full_index &&
+ is_sparse_index_allowed(istate, 0))
+ istate->sparse_index = 1;
+}
+
/* remember to discard_cache() before reading a different cache! */
int do_read_index(struct index_state *istate, const char *path, int must_exist)
{
@@ -2222,8 +2257,11 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
istate->timestamp.nsec = 0;
fd = open(path, O_RDONLY);
if (fd < 0) {
- if (!must_exist && errno == ENOENT)
+ if (!must_exist && errno == ENOENT) {
+ set_new_index_sparsity(istate);
+ istate->initialized = 1;
return 0;
+ }
die_errno(_("%s: index file open failed"), path);
}
@@ -2236,7 +2274,8 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
mmap = xmmap_gently(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (mmap == MAP_FAILED)
- die_errno(_("%s: unable to map index file"), path);
+ die_errno(_("%s: unable to map index file%s"), path,
+ mmap_os_err());
close(fd);
hdr = (const struct cache_header *)mmap;
@@ -2321,11 +2360,16 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
trace2_data_intmax("index", the_repository, "read/cache_nr",
istate->cache_nr);
- if (!istate->repo)
- istate->repo = the_repository;
+ /*
+ * If the command explicitly requires a full index, force it
+ * to be full. Otherwise, correct the sparsity based on repository
+ * settings and other properties of the index (if necessary).
+ */
prepare_repo_settings(istate->repo);
if (istate->repo->settings.command_requires_full_index)
ensure_full_index(istate);
+ else
+ ensure_correct_sparsity(istate);
return istate->cache_nr;
@@ -2378,17 +2422,32 @@ int read_index_from(struct index_state *istate, const char *path,
trace_performance_enter();
if (split_index->base)
- discard_index(split_index->base);
+ release_index(split_index->base);
else
- CALLOC_ARRAY(split_index->base, 1);
+ ALLOC_ARRAY(split_index->base, 1);
+ index_state_init(split_index->base, istate->repo);
base_oid_hex = oid_to_hex(&split_index->base_oid);
base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);
- trace2_region_enter_printf("index", "shared/do_read_index",
- the_repository, "%s", base_path);
- ret = do_read_index(split_index->base, base_path, 1);
- trace2_region_leave_printf("index", "shared/do_read_index",
- the_repository, "%s", base_path);
+ if (file_exists(base_path)) {
+ trace2_region_enter_printf("index", "shared/do_read_index",
+ the_repository, "%s", base_path);
+
+ ret = do_read_index(split_index->base, base_path, 0);
+ trace2_region_leave_printf("index", "shared/do_read_index",
+ the_repository, "%s", base_path);
+ } else {
+ char *path_copy = xstrdup(path);
+ char *base_path2 = xstrfmt("%s/sharedindex.%s",
+ dirname(path_copy), base_oid_hex);
+ free(path_copy);
+ trace2_region_enter_printf("index", "shared/do_read_index",
+ the_repository, "%s", base_path2);
+ ret = do_read_index(split_index->base, base_path2, 1);
+ trace2_region_leave_printf("index", "shared/do_read_index",
+ the_repository, "%s", base_path2);
+ free(base_path2);
+ }
if (!oideq(&split_index->base_oid, &split_index->base->oid))
die(_("broken index, expect %s in %s, got %s"),
base_oid_hex, base_path,
@@ -2407,7 +2466,13 @@ int is_index_unborn(struct index_state *istate)
return (!istate->cache_nr && !istate->timestamp.sec);
}
-int discard_index(struct index_state *istate)
+void index_state_init(struct index_state *istate, struct repository *r)
+{
+ struct index_state blank = INDEX_STATE_INIT(r);
+ memcpy(istate, &blank, sizeof(*istate));
+}
+
+void release_index(struct index_state *istate)
{
/*
* Cache entries in istate->cache[] should have been allocated
@@ -2419,27 +2484,28 @@ int discard_index(struct index_state *istate)
validate_cache_entries(istate);
resolve_undo_clear_index(istate);
- istate->cache_nr = 0;
- istate->cache_changed = 0;
- istate->timestamp.sec = 0;
- istate->timestamp.nsec = 0;
free_name_hash(istate);
cache_tree_free(&(istate->cache_tree));
- istate->initialized = 0;
- istate->fsmonitor_has_run_once = 0;
- FREE_AND_NULL(istate->fsmonitor_last_update);
- FREE_AND_NULL(istate->cache);
- istate->cache_alloc = 0;
+ free(istate->fsmonitor_last_update);
+ free(istate->cache);
discard_split_index(istate);
free_untracked_cache(istate->untracked);
- istate->untracked = NULL;
+
+ if (istate->sparse_checkout_patterns) {
+ clear_pattern_list(istate->sparse_checkout_patterns);
+ FREE_AND_NULL(istate->sparse_checkout_patterns);
+ }
if (istate->ce_mem_pool) {
mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries());
FREE_AND_NULL(istate->ce_mem_pool);
}
+}
- return 0;
+void discard_index(struct index_state *istate)
+{
+ release_index(istate);
+ index_state_init(istate, istate->repo);
}
/*
@@ -2494,13 +2560,14 @@ int repo_index_has_changes(struct repository *repo,
if (tree)
cmp = tree->object.oid;
- if (tree || !get_oid_tree("HEAD", &cmp)) {
+ if (tree || !repo_get_oid_tree(repo, "HEAD", &cmp)) {
struct diff_options opt;
repo_diff_setup(repo, &opt);
opt.flags.exit_with_status = 1;
if (!sb)
opt.flags.quick = 1;
+ diff_setup_done(&opt);
do_diff_cache(&cmp, &opt);
diffcore_std(&opt);
for (i = 0; sb && i < diff_queued_diff.nr; i++) {
@@ -2712,7 +2779,7 @@ static int repo_verify_index(struct repository *repo)
return verify_index_from(repo->index, repo->index_file);
}
-static int has_racy_timestamp(struct index_state *istate)
+int has_racy_timestamp(struct index_state *istate)
{
int entries = istate->cache_nr;
int i;
@@ -2766,6 +2833,16 @@ static int record_ieot(void)
return !git_config_get_index_threads(&val) && val != 1;
}
+enum write_extensions {
+ WRITE_NO_EXTENSION = 0,
+ WRITE_SPLIT_INDEX_EXTENSION = 1<<0,
+ WRITE_CACHE_TREE_EXTENSION = 1<<1,
+ WRITE_RESOLVE_UNDO_EXTENSION = 1<<2,
+ WRITE_UNTRACKED_CACHE_EXTENSION = 1<<3,
+ WRITE_FSMONITOR_EXTENSION = 1<<4,
+};
+#define WRITE_ALL_EXTENSIONS ((enum write_extensions)-1)
+
/*
* On success, `tempfile` is closed. If it is the temporary file
* of a `struct lock_file`, we will therefore effectively perform
@@ -2774,7 +2851,7 @@ static int record_ieot(void)
* rely on it.
*/
static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
- int strip_extensions)
+ enum write_extensions write_extensions, unsigned flags)
{
uint64_t start = getnanotime();
struct hashfile *f;
@@ -2788,12 +2865,17 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
int drop_cache_tree = istate->drop_cache_tree;
off_t offset;
+ int csum_fsync_flag;
int ieot_entries = 1;
struct index_entry_offset_table *ieot = NULL;
int nr, nr_threads;
+ struct repository *r = istate->repo;
f = hashfd(tempfile->fd, tempfile->filename.buf);
+ prepare_repo_settings(r);
+ f->skip_hash = r->settings.index_skip_hash;
+
for (i = removed = extended = 0; i < entries; i++) {
if (cache[i]->ce_flags & CE_REMOVE)
removed++;
@@ -2806,11 +2888,8 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
}
}
- if (!istate->version) {
- istate->version = get_index_format_default(the_repository);
- if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0))
- init_split_index(istate);
- }
+ if (!istate->version)
+ istate->version = get_index_format_default(r);
/* demote version 3 to version 2 when the latter suffices */
if (istate->version == 3 || istate->version == 2)
@@ -2945,10 +3024,13 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
return -1;
}
- if (!strip_extensions && istate->split_index &&
- !is_null_oid(&istate->split_index->base_oid)) {
+ if (write_extensions & WRITE_SPLIT_INDEX_EXTENSION &&
+ istate->split_index) {
struct strbuf sb = STRBUF_INIT;
+ if (istate->sparse_index)
+ die(_("cannot write split index for a sparse index"));
+
err = write_link_extension(&sb, istate) < 0 ||
write_index_ext_header(f, eoie_c, CACHE_EXT_LINK,
sb.len) < 0;
@@ -2957,7 +3039,8 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
if (err)
return -1;
}
- if (!strip_extensions && !drop_cache_tree && istate->cache_tree) {
+ if (write_extensions & WRITE_CACHE_TREE_EXTENSION &&
+ !drop_cache_tree && istate->cache_tree) {
struct strbuf sb = STRBUF_INIT;
cache_tree_write(&sb, istate->cache_tree);
@@ -2967,7 +3050,8 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
if (err)
return -1;
}
- if (!strip_extensions && istate->resolve_undo) {
+ if (write_extensions & WRITE_RESOLVE_UNDO_EXTENSION &&
+ istate->resolve_undo) {
struct strbuf sb = STRBUF_INIT;
resolve_undo_write(&sb, istate->resolve_undo);
@@ -2978,7 +3062,8 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
if (err)
return -1;
}
- if (!strip_extensions && istate->untracked) {
+ if (write_extensions & WRITE_UNTRACKED_CACHE_EXTENSION &&
+ istate->untracked) {
struct strbuf sb = STRBUF_INIT;
write_untracked_extension(&sb, istate->untracked);
@@ -2989,7 +3074,8 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
if (err)
return -1;
}
- if (!strip_extensions && istate->fsmonitor_last_update) {
+ if (write_extensions & WRITE_FSMONITOR_EXTENSION &&
+ istate->fsmonitor_last_update) {
struct strbuf sb = STRBUF_INIT;
write_fsmonitor_extension(&sb, istate);
@@ -3021,7 +3107,13 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
return -1;
}
- finalize_hashfile(f, istate->oid.hash, CSUM_HASH_IN_STREAM);
+ csum_fsync_flag = 0;
+ if (!alternate_index_output && (flags & COMMIT_LOCK))
+ csum_fsync_flag = CSUM_FSYNC;
+
+ finalize_hashfile(f, istate->oid.hash, FSYNC_COMPONENT_INDEX,
+ CSUM_HASH_IN_STREAM | csum_fsync_flag);
+
if (close_tempfile_gently(tempfile)) {
error(_("could not close '%s'"), get_tempfile_path(tempfile));
return -1;
@@ -3057,13 +3149,15 @@ static int commit_locked_index(struct lock_file *lk)
return commit_lock_file(lk);
}
-static int do_write_locked_index(struct index_state *istate, struct lock_file *lock,
- unsigned flags)
+static int do_write_locked_index(struct index_state *istate,
+ struct lock_file *lock,
+ unsigned flags,
+ enum write_extensions write_extensions)
{
int ret;
- int was_full = !istate->sparse_index;
+ int was_full = istate->sparse_index == INDEX_EXPANDED;
- ret = convert_to_sparse(istate);
+ ret = convert_to_sparse(istate, 0);
if (ret) {
warning(_("failed to convert to a sparse-index"));
@@ -3076,7 +3170,7 @@ static int do_write_locked_index(struct index_state *istate, struct lock_file *l
*/
trace2_region_enter_printf("index", "do_write_index", the_repository,
"%s", get_lock_file_path(lock));
- ret = do_write_index(istate, lock->tempfile, 0);
+ ret = do_write_index(istate, lock->tempfile, write_extensions, flags);
trace2_region_leave_printf("index", "do_write_index", the_repository,
"%s", get_lock_file_path(lock));
@@ -3090,7 +3184,7 @@ static int do_write_locked_index(struct index_state *istate, struct lock_file *l
else
ret = close_lock_file_gently(lock);
- run_hook_le(NULL, "post-index-change",
+ run_hooks_l("post-index-change",
istate->updated_workdir ? "1" : "0",
istate->updated_skipworktree ? "1" : "0", NULL);
istate->updated_workdir = 0;
@@ -3105,7 +3199,7 @@ static int write_split_index(struct index_state *istate,
{
int ret;
prepare_to_write_split_index(istate);
- ret = do_write_locked_index(istate, lock, flags);
+ ret = do_write_locked_index(istate, lock, flags, WRITE_ALL_EXTENSIONS);
finish_writing_split_index(istate);
return ret;
}
@@ -3170,17 +3264,17 @@ static int clean_shared_index_files(const char *current_hex)
}
static int write_shared_index(struct index_state *istate,
- struct tempfile **temp)
+ struct tempfile **temp, unsigned flags)
{
struct split_index *si = istate->split_index;
int ret, was_full = !istate->sparse_index;
move_cache_to_base_index(istate);
- convert_to_sparse(istate);
+ convert_to_sparse(istate, 0);
trace2_region_enter_printf("index", "shared/do_write_index",
the_repository, "%s", get_tempfile_path(*temp));
- ret = do_write_index(si->base, *temp, 1);
+ ret = do_write_index(si->base, *temp, WRITE_NO_EXTENSION, flags);
trace2_region_leave_printf("index", "shared/do_write_index",
the_repository, "%s", get_tempfile_path(*temp));
@@ -3237,7 +3331,7 @@ static int too_many_not_shared_entries(struct index_state *istate)
int write_locked_index(struct index_state *istate, struct lock_file *lock,
unsigned flags)
{
- int new_shared_index, ret;
+ int new_shared_index, ret, test_split_index_env;
struct split_index *si = istate->split_index;
if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0))
@@ -3252,18 +3346,25 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock,
if (istate->fsmonitor_last_update)
fill_fsmonitor_bitmap(istate);
- if (!si || alternate_index_output ||
+ test_split_index_env = git_env_bool("GIT_TEST_SPLIT_INDEX", 0);
+
+ if ((!si && !test_split_index_env) ||
+ alternate_index_output ||
(istate->cache_changed & ~EXTMASK)) {
- if (si)
- oidclr(&si->base_oid);
- ret = do_write_locked_index(istate, lock, flags);
+ ret = do_write_locked_index(istate, lock, flags,
+ ~WRITE_SPLIT_INDEX_EXTENSION);
goto out;
}
- if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0)) {
- int v = si->base_oid.hash[0];
- if ((v & 15) < 6)
+ if (test_split_index_env) {
+ if (!si) {
+ si = init_split_index(istate);
istate->cache_changed |= SPLIT_INDEX_ORDERED;
+ } else {
+ int v = si->base_oid.hash[0];
+ if ((v & 15) < 6)
+ istate->cache_changed |= SPLIT_INDEX_ORDERED;
+ }
}
if (too_many_not_shared_entries(istate))
istate->cache_changed |= SPLIT_INDEX_ORDERED;
@@ -3277,11 +3378,11 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock,
/* Same initial permissions as the main .git/index file */
temp = mks_tempfile_sm(git_path("sharedindex_XXXXXX"), 0, 0666);
if (!temp) {
- oidclr(&si->base_oid);
- ret = do_write_locked_index(istate, lock, flags);
+ ret = do_write_locked_index(istate, lock, flags,
+ ~WRITE_SPLIT_INDEX_EXTENSION);
goto out;
}
- ret = write_shared_index(istate, &temp);
+ ret = write_shared_index(istate, &temp, flags);
saved_errno = errno;
if (is_tempfile_active(temp))
@@ -3399,7 +3500,8 @@ void *read_blob_data_from_index(struct index_state *istate,
}
if (pos < 0)
return NULL;
- data = read_object_file(&istate->cache[pos]->oid, &type, &sz);
+ data = repo_read_object_file(the_repository, &istate->cache[pos]->oid,
+ &type, &sz);
if (!data || type != OBJ_BLOB) {
free(data);
return NULL;
@@ -3409,35 +3511,6 @@ void *read_blob_data_from_index(struct index_state *istate,
return data;
}
-void stat_validity_clear(struct stat_validity *sv)
-{
- FREE_AND_NULL(sv->sd);
-}
-
-int stat_validity_check(struct stat_validity *sv, const char *path)
-{
- struct stat st;
-
- if (stat(path, &st) < 0)
- return sv->sd == NULL;
- if (!sv->sd)
- return 0;
- return S_ISREG(st.st_mode) && !match_stat_data(sv->sd, &st);
-}
-
-void stat_validity_update(struct stat_validity *sv, int fd)
-{
- struct stat st;
-
- if (fstat(fd, &st) < 0 || !S_ISREG(st.st_mode))
- stat_validity_clear(sv);
- else {
- if (!sv->sd)
- CALLOC_ARRAY(sv->sd, 1);
- fill_stat_data(sv->sd, &st);
- }
-}
-
void move_index_extensions(struct index_state *dst, struct index_state *src)
{
dst->untracked = src->untracked;
@@ -3659,3 +3732,264 @@ static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_ta
strbuf_add(sb, &buffer, sizeof(uint32_t));
}
}
+
+void prefetch_cache_entries(const struct index_state *istate,
+ must_prefetch_predicate must_prefetch)
+{
+ int i;
+ struct oid_array to_fetch = OID_ARRAY_INIT;
+
+ for (i = 0; i < istate->cache_nr; i++) {
+ struct cache_entry *ce = istate->cache[i];
+
+ if (S_ISGITLINK(ce->ce_mode) || !must_prefetch(ce))
+ continue;
+ if (!oid_object_info_extended(the_repository, &ce->oid,
+ NULL,
+ OBJECT_INFO_FOR_PREFETCH))
+ continue;
+ oid_array_append(&to_fetch, &ce->oid);
+ }
+ promisor_remote_get_direct(the_repository,
+ to_fetch.oid, to_fetch.nr);
+ oid_array_clear(&to_fetch);
+}
+
+static int read_one_entry_opt(struct index_state *istate,
+ const struct object_id *oid,
+ struct strbuf *base,
+ const char *pathname,
+ unsigned mode, int opt)
+{
+ int len;
+ struct cache_entry *ce;
+
+ if (S_ISDIR(mode))
+ return READ_TREE_RECURSIVE;
+
+ len = strlen(pathname);
+ ce = make_empty_cache_entry(istate, base->len + len);
+
+ ce->ce_mode = create_ce_mode(mode);
+ ce->ce_flags = create_ce_flags(1);
+ ce->ce_namelen = base->len + len;
+ memcpy(ce->name, base->buf, base->len);
+ memcpy(ce->name + base->len, pathname, len+1);
+ oidcpy(&ce->oid, oid);
+ return add_index_entry(istate, ce, opt);
+}
+
+static int read_one_entry(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode,
+ void *context)
+{
+ struct index_state *istate = context;
+ return read_one_entry_opt(istate, oid, base, pathname,
+ mode,
+ ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
+}
+
+/*
+ * This is used when the caller knows there is no existing entries at
+ * the stage that will conflict with the entry being added.
+ */
+static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode,
+ void *context)
+{
+ struct index_state *istate = context;
+ return read_one_entry_opt(istate, oid, base, pathname,
+ mode, ADD_CACHE_JUST_APPEND);
+}
+
+/*
+ * Read the tree specified with --with-tree option
+ * (typically, HEAD) into stage #1 and then
+ * squash them down to stage #0. This is used for
+ * --error-unmatch to list and check the path patterns
+ * that were given from the command line. We are not
+ * going to write this index out.
+ */
+void overlay_tree_on_index(struct index_state *istate,
+ const char *tree_name, const char *prefix)
+{
+ struct tree *tree;
+ struct object_id oid;
+ struct pathspec pathspec;
+ struct cache_entry *last_stage0 = NULL;
+ int i;
+ read_tree_fn_t fn = NULL;
+ int err;
+
+ if (repo_get_oid(the_repository, tree_name, &oid))
+ die("tree-ish %s not found.", tree_name);
+ tree = parse_tree_indirect(&oid);
+ if (!tree)
+ die("bad tree-ish %s", tree_name);
+
+ /* Hoist the unmerged entries up to stage #3 to make room */
+ /* TODO: audit for interaction with sparse-index. */
+ ensure_full_index(istate);
+ for (i = 0; i < istate->cache_nr; i++) {
+ struct cache_entry *ce = istate->cache[i];
+ if (!ce_stage(ce))
+ continue;
+ ce->ce_flags |= CE_STAGEMASK;
+ }
+
+ if (prefix) {
+ static const char *(matchbuf[1]);
+ matchbuf[0] = NULL;
+ parse_pathspec(&pathspec, PATHSPEC_ALL_MAGIC,
+ PATHSPEC_PREFER_CWD, prefix, matchbuf);
+ } else
+ memset(&pathspec, 0, sizeof(pathspec));
+
+ /*
+ * See if we have cache entry at the stage. If so,
+ * do it the original slow way, otherwise, append and then
+ * sort at the end.
+ */
+ for (i = 0; !fn && i < istate->cache_nr; i++) {
+ const struct cache_entry *ce = istate->cache[i];
+ if (ce_stage(ce) == 1)
+ fn = read_one_entry;
+ }
+
+ if (!fn)
+ fn = read_one_entry_quick;
+ err = read_tree(the_repository, tree, &pathspec, fn, istate);
+ clear_pathspec(&pathspec);
+ if (err)
+ die("unable to read tree entries %s", tree_name);
+
+ /*
+ * Sort the cache entry -- we need to nuke the cache tree, though.
+ */
+ if (fn == read_one_entry_quick) {
+ cache_tree_free(&istate->cache_tree);
+ QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare);
+ }
+
+ for (i = 0; i < istate->cache_nr; i++) {
+ struct cache_entry *ce = istate->cache[i];
+ switch (ce_stage(ce)) {
+ case 0:
+ last_stage0 = ce;
+ /* fallthru */
+ default:
+ continue;
+ case 1:
+ /*
+ * If there is stage #0 entry for this, we do not
+ * need to show it. We use CE_UPDATE bit to mark
+ * such an entry.
+ */
+ if (last_stage0 &&
+ !strcmp(last_stage0->name, ce->name))
+ ce->ce_flags |= CE_UPDATE;
+ }
+ }
+}
+
+struct update_callback_data {
+ struct index_state *index;
+ int include_sparse;
+ int flags;
+ int add_errors;
+};
+
+static int fix_unmerged_status(struct diff_filepair *p,
+ struct update_callback_data *data)
+{
+ if (p->status != DIFF_STATUS_UNMERGED)
+ return p->status;
+ if (!(data->flags & ADD_CACHE_IGNORE_REMOVAL) && !p->two->mode)
+ /*
+ * This is not an explicit add request, and the
+ * path is missing from the working tree (deleted)
+ */
+ return DIFF_STATUS_DELETED;
+ else
+ /*
+ * Either an explicit add request, or path exists
+ * in the working tree. An attempt to explicitly
+ * add a path that does not exist in the working tree
+ * will be caught as an error by the caller immediately.
+ */
+ return DIFF_STATUS_MODIFIED;
+}
+
+static void update_callback(struct diff_queue_struct *q,
+ struct diff_options *opt UNUSED, void *cbdata)
+{
+ int i;
+ struct update_callback_data *data = cbdata;
+
+ for (i = 0; i < q->nr; i++) {
+ struct diff_filepair *p = q->queue[i];
+ const char *path = p->one->path;
+
+ if (!data->include_sparse &&
+ !path_in_sparse_checkout(path, data->index))
+ continue;
+
+ switch (fix_unmerged_status(p, data)) {
+ default:
+ die(_("unexpected diff status %c"), p->status);
+ case DIFF_STATUS_MODIFIED:
+ case DIFF_STATUS_TYPE_CHANGED:
+ if (add_file_to_index(data->index, path, data->flags)) {
+ if (!(data->flags & ADD_CACHE_IGNORE_ERRORS))
+ die(_("updating files failed"));
+ data->add_errors++;
+ }
+ break;
+ case DIFF_STATUS_DELETED:
+ if (data->flags & ADD_CACHE_IGNORE_REMOVAL)
+ break;
+ if (!(data->flags & ADD_CACHE_PRETEND))
+ remove_file_from_index(data->index, path);
+ if (data->flags & (ADD_CACHE_PRETEND|ADD_CACHE_VERBOSE))
+ printf(_("remove '%s'\n"), path);
+ break;
+ }
+ }
+}
+
+int add_files_to_cache(struct repository *repo, const char *prefix,
+ const struct pathspec *pathspec, char *ps_matched,
+ int include_sparse, int flags)
+{
+ struct update_callback_data data;
+ struct rev_info rev;
+
+ memset(&data, 0, sizeof(data));
+ data.index = repo->index;
+ data.include_sparse = include_sparse;
+ data.flags = flags;
+
+ repo_init_revisions(repo, &rev, prefix);
+ setup_revisions(0, NULL, &rev, NULL);
+ if (pathspec) {
+ copy_pathspec(&rev.prune_data, pathspec);
+ rev.ps_matched = ps_matched;
+ }
+ rev.diffopt.output_format = DIFF_FORMAT_CALLBACK;
+ rev.diffopt.format_callback = update_callback;
+ rev.diffopt.format_callback_data = &data;
+ rev.diffopt.flags.override_submodule_config = 1;
+ rev.max_count = 0; /* do not compare unmerged paths with stage #2 */
+
+ /*
+ * Use an ODB transaction to optimize adding multiple objects.
+ * This function is invoked from commands other than 'add', which
+ * may not have their own transaction active.
+ */
+ begin_odb_transaction();
+ run_diff_files(&rev, DIFF_RACY_IS_MODIFIED);
+ end_odb_transaction();
+
+ release_revisions(&rev);
+ return !!data.add_errors;
+}