summaryrefslogtreecommitdiff
path: root/cache-tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'cache-tree.c')
-rw-r--r--cache-tree.c145
1 files changed, 106 insertions, 39 deletions
diff --git a/cache-tree.c b/cache-tree.c
index 90919f9..387c0a3 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -1,12 +1,18 @@
-#include "cache.h"
+#include "git-compat-util.h"
+#include "environment.h"
+#include "hex.h"
#include "lockfile.h"
#include "tree.h"
#include "tree-walk.h"
#include "cache-tree.h"
-#include "object-store.h"
+#include "bulk-checkin.h"
+#include "object-file.h"
+#include "object-store-ll.h"
+#include "read-cache-ll.h"
#include "replace-object.h"
#include "promisor-remote.h"
-#include "sparse-index.h"
+#include "trace.h"
+#include "trace2.h"
#ifndef DEBUG_CACHE_TREE
#define DEBUG_CACHE_TREE 0
@@ -228,7 +234,7 @@ int cache_tree_fully_valid(struct cache_tree *it)
int i;
if (!it)
return 0;
- if (it->entry_count < 0 || !has_object_file(&it->oid))
+ if (it->entry_count < 0 || !repo_has_object_file(the_repository, &it->oid))
return 0;
for (i = 0; i < it->subtree_nr; i++) {
if (!cache_tree_fully_valid(it->down[i]->cache_tree))
@@ -239,7 +245,7 @@ int cache_tree_fully_valid(struct cache_tree *it)
static int must_check_existence(const struct cache_entry *ce)
{
- return !(has_promisor_remote() && ce_skip_worktree(ce));
+ return !(repo_has_promisor_remote(the_repository) && ce_skip_worktree(ce));
}
static int update_one(struct cache_tree *it,
@@ -279,7 +285,7 @@ static int update_one(struct cache_tree *it,
}
}
- if (0 <= it->entry_count && has_object_file(&it->oid))
+ if (0 <= it->entry_count && repo_has_object_file(the_repository, &it->oid))
return it->entry_count;
/*
@@ -385,7 +391,7 @@ static int update_one(struct cache_tree *it,
ce_missing_ok = mode == S_IFGITLINK || missing_ok ||
!must_check_existence(ce);
if (is_null_oid(oid) ||
- (!ce_missing_ok && !has_object_file(oid))) {
+ (!ce_missing_ok && !repo_has_object_file(the_repository, oid))) {
strbuf_release(&buffer);
if (expected_missing)
return -1;
@@ -404,7 +410,7 @@ static int update_one(struct cache_tree *it,
}
/*
- * CE_INTENT_TO_ADD entries exist on on-disk index but
+ * CE_INTENT_TO_ADD entries exist in on-disk index but
* they are not part of generated trees. Invalidate up
* to root to force cache-tree users to read elsewhere.
*/
@@ -432,16 +438,17 @@ static int update_one(struct cache_tree *it,
if (repair) {
struct object_id oid;
hash_object_file(the_hash_algo, buffer.buf, buffer.len,
- tree_type, &oid);
- if (has_object_file_with_flags(&oid, OBJECT_INFO_SKIP_FETCH_OBJECT))
+ OBJ_TREE, &oid);
+ if (repo_has_object_file_with_flags(the_repository, &oid, OBJECT_INFO_SKIP_FETCH_OBJECT))
oidcpy(&it->oid, &oid);
else
to_invalidate = 1;
} else if (dryrun) {
hash_object_file(the_hash_algo, buffer.buf, buffer.len,
- tree_type, &it->oid);
- } else if (write_object_file(buffer.buf, buffer.len, tree_type,
- &it->oid)) {
+ OBJ_TREE, &it->oid);
+ } else if (write_object_file_flags(buffer.buf, buffer.len, OBJ_TREE,
+ &it->oid, NULL, flags & WRITE_TREE_SILENT
+ ? HASH_SILENT : 0)) {
strbuf_release(&buffer);
return -1;
}
@@ -468,13 +475,15 @@ int cache_tree_update(struct index_state *istate, int flags)
if (!istate->cache_tree)
istate->cache_tree = cache_tree();
- if (!(flags & WRITE_TREE_MISSING_OK) && has_promisor_remote())
+ if (!(flags & WRITE_TREE_MISSING_OK) && repo_has_promisor_remote(the_repository))
prefetch_cache_entries(istate, must_check_existence);
trace_performance_enter();
trace2_region_enter("cache_tree", "update", the_repository);
+ begin_odb_transaction();
i = update_one(istate->cache_tree, istate->cache, istate->cache_nr,
"", 0, &skip, flags);
+ end_odb_transaction();
trace2_region_leave("cache_tree", "update", the_repository);
trace_performance_leave("cache_tree_update");
if (i < 0)
@@ -691,14 +700,14 @@ struct tree* write_in_core_index_as_tree(struct repository *repo) {
ret = write_index_as_tree_internal(&o, index_state, was_valid, 0, NULL);
if (ret == WRITE_TREE_UNMERGED_INDEX) {
int i;
- fprintf(stderr, "BUG: There are unmerged index entries:\n");
+ bug("there are unmerged index entries:");
for (i = 0; i < index_state->cache_nr; i++) {
const struct cache_entry *ce = index_state->cache[i];
if (ce_stage(ce))
- fprintf(stderr, "BUG: %d %.*s\n", ce_stage(ce),
- (int)ce_namelen(ce), ce->name);
+ bug("%d %.*s", ce_stage(ce),
+ (int)ce_namelen(ce), ce->name);
}
- BUG("unmerged index entries when writing inmemory index");
+ BUG("unmerged index entries when writing in-core index");
}
return lookup_tree(repo, &index_state->cache_tree->oid);
@@ -740,16 +749,27 @@ out:
return ret;
}
+static void prime_cache_tree_sparse_dir(struct cache_tree *it,
+ struct tree *tree)
+{
+
+ oidcpy(&it->oid, &tree->object.oid);
+ it->entry_count = 1;
+}
+
static void prime_cache_tree_rec(struct repository *r,
struct cache_tree *it,
- struct tree *tree)
+ struct tree *tree,
+ struct strbuf *tree_path)
{
struct tree_desc desc;
struct name_entry entry;
int cnt;
+ size_t base_path_len = tree_path->len;
oidcpy(&it->oid, &tree->object.oid);
- init_tree_desc(&desc, tree->buffer, tree->size);
+
+ init_tree_desc(&desc, &tree->object.oid, tree->buffer, tree->size);
cnt = 0;
while (tree_entry(&desc, &entry)) {
if (!S_ISDIR(entry.mode))
@@ -757,14 +777,39 @@ static void prime_cache_tree_rec(struct repository *r,
else {
struct cache_tree_sub *sub;
struct tree *subtree = lookup_tree(r, &entry.oid);
- if (!subtree->object.parsed)
- parse_tree(subtree);
+
+ if (parse_tree(subtree) < 0)
+ exit(128);
sub = cache_tree_sub(it, entry.path);
sub->cache_tree = cache_tree();
- prime_cache_tree_rec(r, sub->cache_tree, subtree);
+
+ /*
+ * Recursively-constructed subtree path is only needed when working
+ * in a sparse index (where it's used to determine whether the
+ * subtree is a sparse directory in the index).
+ */
+ if (r->index->sparse_index) {
+ strbuf_setlen(tree_path, base_path_len);
+ strbuf_add(tree_path, entry.path, entry.pathlen);
+ strbuf_addch(tree_path, '/');
+ }
+
+ /*
+ * If a sparse index is in use, the directory being processed may be
+ * sparse. To confirm that, we can check whether an entry with that
+ * exact name exists in the index. If it does, the created subtree
+ * should be sparse. Otherwise, cache tree expansion should continue
+ * as normal.
+ */
+ if (r->index->sparse_index &&
+ index_entry_exists(r->index, tree_path->buf, tree_path->len))
+ prime_cache_tree_sparse_dir(sub->cache_tree, subtree);
+ else
+ prime_cache_tree_rec(r, sub->cache_tree, subtree, tree_path);
cnt += sub->cache_tree->entry_count;
}
}
+
it->entry_count = cnt;
}
@@ -772,13 +817,16 @@ void prime_cache_tree(struct repository *r,
struct index_state *istate,
struct tree *tree)
{
- trace2_region_enter("cache-tree", "prime_cache_tree", the_repository);
+ struct strbuf tree_path = STRBUF_INIT;
+
+ trace2_region_enter("cache-tree", "prime_cache_tree", r);
cache_tree_free(&istate->cache_tree);
istate->cache_tree = cache_tree();
- prime_cache_tree_rec(r, istate->cache_tree, tree);
+ prime_cache_tree_rec(r, istate->cache_tree, tree, &tree_path);
+ strbuf_release(&tree_path);
istate->cache_changed |= CACHE_TREE_CHANGED;
- trace2_region_leave("cache-tree", "prime_cache_tree", the_repository);
+ trace2_region_leave("cache-tree", "prime_cache_tree", r);
}
/*
@@ -813,9 +861,7 @@ int cache_tree_matches_traversal(struct cache_tree *root,
return 0;
}
-static void verify_one_sparse(struct repository *r,
- struct index_state *istate,
- struct cache_tree *it,
+static void verify_one_sparse(struct index_state *istate,
struct strbuf *path,
int pos)
{
@@ -826,10 +872,17 @@ static void verify_one_sparse(struct repository *r,
path->buf);
}
-static void verify_one(struct repository *r,
- struct index_state *istate,
- struct cache_tree *it,
- struct strbuf *path)
+/*
+ * Returns:
+ * 0 - Verification completed.
+ * 1 - Restart verification - a call to ensure_full_index() freed the cache
+ * tree that is being verified and verification needs to be restarted from
+ * the new toplevel cache tree.
+ */
+static int verify_one(struct repository *r,
+ struct index_state *istate,
+ struct cache_tree *it,
+ struct strbuf *path)
{
int i, pos, len = path->len;
struct strbuf tree_buf = STRBUF_INIT;
@@ -837,21 +890,30 @@ static void verify_one(struct repository *r,
for (i = 0; i < it->subtree_nr; i++) {
strbuf_addf(path, "%s/", it->down[i]->name);
- verify_one(r, istate, it->down[i]->cache_tree, path);
+ if (verify_one(r, istate, it->down[i]->cache_tree, path))
+ return 1;
strbuf_setlen(path, len);
}
if (it->entry_count < 0 ||
/* no verification on tests (t7003) that replace trees */
lookup_replace_object(r, &it->oid) != &it->oid)
- return;
+ return 0;
if (path->len) {
+ /*
+ * If the index is sparse and the cache tree is not
+ * index_name_pos() may trigger ensure_full_index() which will
+ * free the tree that is being verified.
+ */
+ int is_sparse = istate->sparse_index;
pos = index_name_pos(istate, path->buf, path->len);
+ if (is_sparse && !istate->sparse_index)
+ return 1;
if (pos >= 0) {
- verify_one_sparse(r, istate, it, path, pos);
- return;
+ verify_one_sparse(istate, path, pos);
+ return 0;
}
pos = -pos - 1;
@@ -891,7 +953,7 @@ static void verify_one(struct repository *r,
strbuf_addf(&tree_buf, "%o %.*s%c", mode, entlen, name, '\0');
strbuf_add(&tree_buf, oid->hash, r->hash_algo->rawsz);
}
- hash_object_file(r->hash_algo, tree_buf.buf, tree_buf.len, tree_type,
+ hash_object_file(r->hash_algo, tree_buf.buf, tree_buf.len, OBJ_TREE,
&new_oid);
if (!oideq(&new_oid, &it->oid))
BUG("cache-tree for path %.*s does not match. "
@@ -899,6 +961,7 @@ static void verify_one(struct repository *r,
oid_to_hex(&new_oid), oid_to_hex(&it->oid));
strbuf_setlen(path, len);
strbuf_release(&tree_buf);
+ return 0;
}
void cache_tree_verify(struct repository *r, struct index_state *istate)
@@ -907,6 +970,10 @@ void cache_tree_verify(struct repository *r, struct index_state *istate)
if (!istate->cache_tree)
return;
- verify_one(r, istate, istate->cache_tree, &path);
+ if (verify_one(r, istate, istate->cache_tree, &path)) {
+ strbuf_reset(&path);
+ if (verify_one(r, istate, istate->cache_tree, &path))
+ BUG("ensure_full_index() called twice while verifying cache tree");
+ }
strbuf_release(&path);
}