From 768d7965068189ec5287ac53b4b55bfd443bb456 Mon Sep 17 00:00:00 2001 From: Jameson Miller Date: Mon, 2 Jul 2018 19:49:29 +0000 Subject: read-cache: teach refresh_cache_entry to take istate Refactor refresh_cache_entry() to work on a specific index, instead of implicitly using the_index. This is in preparation for making the make_cache_entry function apply to a specific index. Signed-off-by: Jameson Miller Signed-off-by: Junio C Hamano diff --git a/cache.h b/cache.h index 89a107a..9538511 100644 --- a/cache.h +++ b/cache.h @@ -751,7 +751,7 @@ extern void fill_stat_cache_info(struct cache_entry *ce, struct stat *st); #define REFRESH_IGNORE_SUBMODULES 0x0010 /* ignore submodules */ #define REFRESH_IN_PORCELAIN 0x0020 /* user friendly output, not "needs update" */ extern int refresh_index(struct index_state *, unsigned int flags, const struct pathspec *pathspec, char *seen, const char *header_msg); -extern struct cache_entry *refresh_cache_entry(struct cache_entry *, unsigned int); +extern struct cache_entry *refresh_cache_entry(struct index_state *, struct cache_entry *, unsigned int); /* * Opportunistically update the index but do not complain if we can't. diff --git a/merge-recursive.c b/merge-recursive.c index f110e1c..11a767c 100644 --- a/merge-recursive.c +++ b/merge-recursive.c @@ -323,7 +323,7 @@ static int add_cacheinfo(struct merge_options *o, if (refresh) { struct cache_entry *nce; - nce = refresh_cache_entry(ce, CE_MATCH_REFRESH | CE_MATCH_IGNORE_MISSING); + nce = refresh_cache_entry(&the_index, ce, CE_MATCH_REFRESH | CE_MATCH_IGNORE_MISSING); if (!nce) return err(o, _("add_cacheinfo failed to refresh for path '%s'; merge aborting."), path); if (nce != ce) diff --git a/read-cache.c b/read-cache.c index 3725882..fa8366e 100644 --- a/read-cache.c +++ b/read-cache.c @@ -767,7 +767,7 @@ struct cache_entry *make_cache_entry(unsigned int mode, ce->ce_namelen = len; ce->ce_mode = create_ce_mode(mode); - ret = refresh_cache_entry(ce, refresh_options); + ret = refresh_cache_entry(&the_index, ce, refresh_options); if (ret != ce) free(ce); return ret; @@ -1473,10 +1473,11 @@ int refresh_index(struct index_state *istate, unsigned int flags, return has_errors; } -struct cache_entry *refresh_cache_entry(struct cache_entry *ce, - unsigned int options) +struct cache_entry *refresh_cache_entry(struct index_state *istate, + struct cache_entry *ce, + unsigned int options) { - return refresh_cache_ent(&the_index, ce, options, NULL, NULL); + return refresh_cache_ent(istate, ce, options, NULL, NULL); } -- cgit v0.10.2-6-g49f6 From 825ed4d9a044380ac093563e6bd74311ea4488ef Mon Sep 17 00:00:00 2001 From: Jameson Miller Date: Mon, 2 Jul 2018 19:49:30 +0000 Subject: read-cache: teach make_cache_entry to take object_id Teach make_cache_entry function to take object_id instead of a SHA-1. Signed-off-by: Junio C Hamano diff --git a/apply.c b/apply.c index d79e615..5dd634c 100644 --- a/apply.c +++ b/apply.c @@ -4090,7 +4090,7 @@ static int build_fake_ancestor(struct apply_state *state, struct patch *list) return error(_("sha1 information is lacking or useless " "(%s)."), name); - ce = make_cache_entry(patch->old_mode, oid.hash, name, 0, 0); + ce = make_cache_entry(patch->old_mode, &oid, name, 0, 0); if (!ce) return error(_("make_cache_entry failed for path '%s'"), name); diff --git a/builtin/checkout.c b/builtin/checkout.c index 2e1d237..548bf40 100644 --- a/builtin/checkout.c +++ b/builtin/checkout.c @@ -230,7 +230,7 @@ static int checkout_merged(int pos, const struct checkout *state) if (write_object_file(result_buf.ptr, result_buf.size, blob_type, &oid)) die(_("Unable to add merge result for '%s'"), path); free(result_buf.ptr); - ce = make_cache_entry(mode, oid.hash, path, 2, 0); + ce = make_cache_entry(mode, &oid, path, 2, 0); if (!ce) die(_("make_cache_entry failed for path '%s'"), path); status = checkout_entry(ce, state, NULL); diff --git a/builtin/difftool.c b/builtin/difftool.c index bc97d4a..873a06f 100644 --- a/builtin/difftool.c +++ b/builtin/difftool.c @@ -321,7 +321,7 @@ static int checkout_path(unsigned mode, struct object_id *oid, struct cache_entry *ce; int ret; - ce = make_cache_entry(mode, oid->hash, path, 0, 0); + ce = make_cache_entry(mode, oid, path, 0, 0); ret = checkout_entry(ce, state, NULL); free(ce); @@ -488,7 +488,7 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix, * index. */ struct cache_entry *ce2 = - make_cache_entry(rmode, roid.hash, + make_cache_entry(rmode, &roid, dst_path, 0, 0); add_index_entry(&wtindex, ce2, diff --git a/builtin/reset.c b/builtin/reset.c index a862c70..00109b0 100644 --- a/builtin/reset.c +++ b/builtin/reset.c @@ -134,7 +134,7 @@ static void update_index_from_diff(struct diff_queue_struct *q, continue; } - ce = make_cache_entry(one->mode, one->oid.hash, one->path, + ce = make_cache_entry(one->mode, &one->oid, one->path, 0, 0); if (!ce) die(_("make_cache_entry failed for path '%s'"), diff --git a/cache.h b/cache.h index 9538511..7953d52 100644 --- a/cache.h +++ b/cache.h @@ -698,7 +698,12 @@ extern int remove_file_from_index(struct index_state *, const char *path); extern int add_to_index(struct index_state *, const char *path, struct stat *, int flags); extern int add_file_to_index(struct index_state *, const char *path, int flags); -extern struct cache_entry *make_cache_entry(unsigned int mode, const unsigned char *sha1, const char *path, int stage, unsigned int refresh_options); +extern struct cache_entry *make_cache_entry(unsigned int mode, + const struct object_id *oid, + const char *path, + int stage, + unsigned int refresh_options); + extern int chmod_index_entry(struct index_state *, struct cache_entry *ce, char flip); extern int ce_same_name(const struct cache_entry *a, const struct cache_entry *b); extern void set_object_name_for_intent_to_add_entry(struct cache_entry *ce); diff --git a/merge-recursive.c b/merge-recursive.c index 11a767c..8b30cc7 100644 --- a/merge-recursive.c +++ b/merge-recursive.c @@ -315,7 +315,7 @@ static int add_cacheinfo(struct merge_options *o, struct cache_entry *ce; int ret; - ce = make_cache_entry(mode, oid ? oid->hash : null_sha1, path, stage, 0); + ce = make_cache_entry(mode, oid ? oid : &null_oid, path, stage, 0); if (!ce) return err(o, _("add_cacheinfo failed for path '%s'; merge aborting."), path); diff --git a/read-cache.c b/read-cache.c index fa8366e..c12664c 100644 --- a/read-cache.c +++ b/read-cache.c @@ -746,8 +746,10 @@ int add_file_to_index(struct index_state *istate, const char *path, int flags) } struct cache_entry *make_cache_entry(unsigned int mode, - const unsigned char *sha1, const char *path, int stage, - unsigned int refresh_options) + const struct object_id *oid, + const char *path, + int stage, + unsigned int refresh_options) { int size, len; struct cache_entry *ce, *ret; @@ -761,7 +763,7 @@ struct cache_entry *make_cache_entry(unsigned int mode, size = cache_entry_size(len); ce = xcalloc(1, size); - hashcpy(ce->oid.hash, sha1); + oidcpy(&ce->oid, oid); memcpy(ce->name, path, len); ce->ce_flags = create_ce_flags(stage); ce->ce_namelen = len; diff --git a/resolve-undo.c b/resolve-undo.c index fc5b3b8..4d4e5cb 100644 --- a/resolve-undo.c +++ b/resolve-undo.c @@ -146,7 +146,7 @@ int unmerge_index_entry_at(struct index_state *istate, int pos) struct cache_entry *nce; if (!ru->mode[i]) continue; - nce = make_cache_entry(ru->mode[i], ru->oid[i].hash, + nce = make_cache_entry(ru->mode[i], &ru->oid[i], name, i + 1, 0); if (matched) nce->ce_flags |= CE_MATCHED; -- cgit v0.10.2-6-g49f6 From a849735bfbf159b98ead9ef4c843dc8acfd372f0 Mon Sep 17 00:00:00 2001 From: Jameson Miller Date: Mon, 2 Jul 2018 19:49:31 +0000 Subject: block alloc: add lifecycle APIs for cache_entry structs It has been observed that the time spent loading an index with a large number of entries is partly dominated by malloc() calls. This change is in preparation for using memory pools to reduce the number of malloc() calls made to allocate cahce entries when loading an index. Add an API to allocate and discard cache entries, abstracting the details of managing the memory backing the cache entries. This commit does actually change how memory is managed - this will be done in a later commit in the series. This change makes the distinction between cache entries that are associated with an index and cache entries that are not associated with an index. A main use of cache entries is with an index, and we can optimize the memory management around this. We still have other cases where a cache entry is not persisted with an index, and so we need to handle the "transient" use case as well. To keep the congnitive overhead of managing the cache entries, there will only be a single discard function. This means there must be enough information kept with the cache entry so that we know how to discard them. A summary of the main functions in the API is: make_cache_entry: create cache entry for use in an index. Uses specified parameters to populate cache_entry fields. make_empty_cache_entry: Create an empty cache entry for use in an index. Returns cache entry with empty fields. make_transient_cache_entry: create cache entry that is not used in an index. Uses specified parameters to populate cache_entry fields. make_empty_transient_cache_entry: create cache entry that is not used in an index. Returns cache entry with empty fields. discard_cache_entry: A single function that knows how to discard a cache entry regardless of how it was allocated. Signed-off-by: Jameson Miller Signed-off-by: Junio C Hamano diff --git a/apply.c b/apply.c index 5dd634c..ee6c406 100644 --- a/apply.c +++ b/apply.c @@ -4090,12 +4090,12 @@ static int build_fake_ancestor(struct apply_state *state, struct patch *list) return error(_("sha1 information is lacking or useless " "(%s)."), name); - ce = make_cache_entry(patch->old_mode, &oid, name, 0, 0); + ce = make_cache_entry(&result, patch->old_mode, &oid, name, 0, 0); if (!ce) return error(_("make_cache_entry failed for path '%s'"), name); if (add_index_entry(&result, ce, ADD_CACHE_OK_TO_ADD)) { - free(ce); + discard_cache_entry(ce); return error(_("could not add %s to temporary index"), name); } @@ -4263,12 +4263,11 @@ static int add_index_file(struct apply_state *state, struct stat st; struct cache_entry *ce; int namelen = strlen(path); - unsigned ce_size = cache_entry_size(namelen); if (!state->update_index) return 0; - ce = xcalloc(1, ce_size); + ce = make_empty_cache_entry(&the_index, namelen); memcpy(ce->name, path, namelen); ce->ce_mode = create_ce_mode(mode); ce->ce_flags = create_ce_flags(0); @@ -4278,13 +4277,13 @@ static int add_index_file(struct apply_state *state, if (!skip_prefix(buf, "Subproject commit ", &s) || get_oid_hex(s, &ce->oid)) { - free(ce); - return error(_("corrupt patch for submodule %s"), path); + discard_cache_entry(ce); + return error(_("corrupt patch for submodule %s"), path); } } else { if (!state->cached) { if (lstat(path, &st) < 0) { - free(ce); + discard_cache_entry(ce); return error_errno(_("unable to stat newly " "created file '%s'"), path); @@ -4292,13 +4291,13 @@ static int add_index_file(struct apply_state *state, fill_stat_cache_info(ce, &st); } if (write_object_file(buf, size, blob_type, &ce->oid) < 0) { - free(ce); + discard_cache_entry(ce); return error(_("unable to create backing store " "for newly created file %s"), path); } } if (add_cache_entry(ce, ADD_CACHE_OK_TO_ADD) < 0) { - free(ce); + discard_cache_entry(ce); return error(_("unable to add cache entry for %s"), path); } @@ -4422,27 +4421,26 @@ static int add_conflicted_stages_file(struct apply_state *state, struct patch *patch) { int stage, namelen; - unsigned ce_size, mode; + unsigned mode; struct cache_entry *ce; if (!state->update_index) return 0; namelen = strlen(patch->new_name); - ce_size = cache_entry_size(namelen); mode = patch->new_mode ? patch->new_mode : (S_IFREG | 0644); remove_file_from_cache(patch->new_name); for (stage = 1; stage < 4; stage++) { if (is_null_oid(&patch->threeway_stage[stage - 1])) continue; - ce = xcalloc(1, ce_size); + ce = make_empty_cache_entry(&the_index, namelen); memcpy(ce->name, patch->new_name, namelen); ce->ce_mode = create_ce_mode(mode); ce->ce_flags = create_ce_flags(stage); ce->ce_namelen = namelen; oidcpy(&ce->oid, &patch->threeway_stage[stage - 1]); if (add_cache_entry(ce, ADD_CACHE_OK_TO_ADD) < 0) { - free(ce); + discard_cache_entry(ce); return error(_("unable to add cache entry for %s"), patch->new_name); } diff --git a/blame.c b/blame.c index 14d0e0b..4c6668d 100644 --- a/blame.c +++ b/blame.c @@ -154,7 +154,7 @@ static struct commit *fake_working_tree_commit(struct diff_options *opt, struct strbuf buf = STRBUF_INIT; const char *ident; time_t now; - int size, len; + int len; struct cache_entry *ce; unsigned mode; struct strbuf msg = STRBUF_INIT; @@ -252,8 +252,7 @@ static struct commit *fake_working_tree_commit(struct diff_options *opt, /* Let's not bother reading from HEAD tree */ mode = S_IFREG | 0644; } - size = cache_entry_size(len); - ce = xcalloc(1, size); + ce = make_empty_cache_entry(&the_index, len); oidcpy(&ce->oid, &origin->blob_oid); memcpy(ce->name, path, len); ce->ce_flags = create_ce_flags(0); diff --git a/builtin/checkout.c b/builtin/checkout.c index 548bf40..56d1e1a 100644 --- a/builtin/checkout.c +++ b/builtin/checkout.c @@ -77,7 +77,7 @@ static int update_some(const struct object_id *oid, struct strbuf *base, return READ_TREE_RECURSIVE; len = base->len + strlen(pathname); - ce = xcalloc(1, cache_entry_size(len)); + ce = make_empty_cache_entry(&the_index, len); oidcpy(&ce->oid, oid); memcpy(ce->name, base->buf, base->len); memcpy(ce->name + base->len, pathname, len - base->len); @@ -96,7 +96,7 @@ static int update_some(const struct object_id *oid, struct strbuf *base, if (ce->ce_mode == old->ce_mode && !oidcmp(&ce->oid, &old->oid)) { old->ce_flags |= CE_UPDATE; - free(ce); + discard_cache_entry(ce); return 0; } } @@ -230,11 +230,11 @@ static int checkout_merged(int pos, const struct checkout *state) if (write_object_file(result_buf.ptr, result_buf.size, blob_type, &oid)) die(_("Unable to add merge result for '%s'"), path); free(result_buf.ptr); - ce = make_cache_entry(mode, &oid, path, 2, 0); + ce = make_transient_cache_entry(mode, &oid, path, 2); if (!ce) die(_("make_cache_entry failed for path '%s'"), path); status = checkout_entry(ce, state, NULL); - free(ce); + discard_cache_entry(ce); return status; } diff --git a/builtin/difftool.c b/builtin/difftool.c index 873a06f..4593f0c 100644 --- a/builtin/difftool.c +++ b/builtin/difftool.c @@ -321,10 +321,10 @@ static int checkout_path(unsigned mode, struct object_id *oid, struct cache_entry *ce; int ret; - ce = make_cache_entry(mode, oid, path, 0, 0); + ce = make_transient_cache_entry(mode, oid, path, 0); ret = checkout_entry(ce, state, NULL); - free(ce); + discard_cache_entry(ce); return ret; } @@ -488,7 +488,7 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix, * index. */ struct cache_entry *ce2 = - make_cache_entry(rmode, &roid, + make_cache_entry(&wtindex, rmode, &roid, dst_path, 0, 0); add_index_entry(&wtindex, ce2, diff --git a/builtin/reset.c b/builtin/reset.c index 00109b0..c3f0cfa 100644 --- a/builtin/reset.c +++ b/builtin/reset.c @@ -134,7 +134,7 @@ static void update_index_from_diff(struct diff_queue_struct *q, continue; } - ce = make_cache_entry(one->mode, &one->oid, one->path, + ce = make_cache_entry(&the_index, one->mode, &one->oid, one->path, 0, 0); if (!ce) die(_("make_cache_entry failed for path '%s'"), diff --git a/builtin/update-index.c b/builtin/update-index.c index a8709a2..ea2f2a4 100644 --- a/builtin/update-index.c +++ b/builtin/update-index.c @@ -268,15 +268,14 @@ static int process_lstat_error(const char *path, int err) static int add_one_path(const struct cache_entry *old, const char *path, int len, struct stat *st) { - int option, size; + int option; struct cache_entry *ce; /* Was the old index entry already up-to-date? */ if (old && !ce_stage(old) && !ce_match_stat(old, st, 0)) return 0; - size = cache_entry_size(len); - ce = xcalloc(1, size); + ce = make_empty_cache_entry(&the_index, len); memcpy(ce->name, path, len); ce->ce_flags = create_ce_flags(0); ce->ce_namelen = len; @@ -285,13 +284,13 @@ static int add_one_path(const struct cache_entry *old, const char *path, int len if (index_path(&ce->oid, path, st, info_only ? 0 : HASH_WRITE_OBJECT)) { - free(ce); + discard_cache_entry(ce); return -1; } option = allow_add ? ADD_CACHE_OK_TO_ADD : 0; option |= allow_replace ? ADD_CACHE_OK_TO_REPLACE : 0; if (add_cache_entry(ce, option)) { - free(ce); + discard_cache_entry(ce); return error("%s: cannot add to the index - missing --add option?", path); } return 0; @@ -402,15 +401,14 @@ static int process_path(const char *path, struct stat *st, int stat_errno) static int add_cacheinfo(unsigned int mode, const struct object_id *oid, const char *path, int stage) { - int size, len, option; + int len, option; struct cache_entry *ce; if (!verify_path(path, mode)) return error("Invalid path '%s'", path); len = strlen(path); - size = cache_entry_size(len); - ce = xcalloc(1, size); + ce = make_empty_cache_entry(&the_index, len); oidcpy(&ce->oid, oid); memcpy(ce->name, path, len); @@ -599,7 +597,6 @@ static struct cache_entry *read_one_ent(const char *which, { unsigned mode; struct object_id oid; - int size; struct cache_entry *ce; if (get_tree_entry(ent, path, &oid, &mode)) { @@ -612,8 +609,7 @@ static struct cache_entry *read_one_ent(const char *which, error("%s: not a blob in %s branch.", path, which); return NULL; } - size = cache_entry_size(namelen); - ce = xcalloc(1, size); + ce = make_empty_cache_entry(&the_index, namelen); oidcpy(&ce->oid, &oid); memcpy(ce->name, path, namelen); @@ -690,8 +686,8 @@ static int unresolve_one(const char *path) error("%s: cannot add their version to the index.", path); ret = -1; free_return: - free(ce_2); - free(ce_3); + discard_cache_entry(ce_2); + discard_cache_entry(ce_3); return ret; } @@ -758,7 +754,7 @@ static int do_reupdate(int ac, const char **av, ce->name, ce_namelen(ce), 0); if (old && ce->ce_mode == old->ce_mode && !oidcmp(&ce->oid, &old->oid)) { - free(old); + discard_cache_entry(old); continue; /* unchanged */ } /* Be careful. The working tree may not have the @@ -769,7 +765,7 @@ static int do_reupdate(int ac, const char **av, path = xstrdup(ce->name); update_one(path); free(path); - free(old); + discard_cache_entry(old); if (save_nr != active_nr) goto redo; } diff --git a/cache.h b/cache.h index 7953d52..5aadaeb 100644 --- a/cache.h +++ b/cache.h @@ -339,6 +339,40 @@ extern void remove_name_hash(struct index_state *istate, struct cache_entry *ce) extern void free_name_hash(struct index_state *istate); +/* Cache entry creation and cleanup */ + +/* + * Create cache_entry intended for use in the specified index. Caller + * is responsible for discarding the cache_entry with + * `discard_cache_entry`. + */ +struct cache_entry *make_cache_entry(struct index_state *istate, + unsigned int mode, + const struct object_id *oid, + const char *path, + int stage, + unsigned int refresh_options); + +struct cache_entry *make_empty_cache_entry(struct index_state *istate, + size_t name_len); + +/* + * Create a cache_entry that is not intended to be added to an index. + * Caller is responsible for discarding the cache_entry + * with `discard_cache_entry`. + */ +struct cache_entry *make_transient_cache_entry(unsigned int mode, + const struct object_id *oid, + const char *path, + int stage); + +struct cache_entry *make_empty_transient_cache_entry(size_t name_len); + +/* + * Discard cache entry. + */ +void discard_cache_entry(struct cache_entry *ce); + #ifndef NO_THE_INDEX_COMPATIBILITY_MACROS #define active_cache (the_index.cache) #define active_nr (the_index.cache_nr) @@ -698,12 +732,6 @@ extern int remove_file_from_index(struct index_state *, const char *path); extern int add_to_index(struct index_state *, const char *path, struct stat *, int flags); extern int add_file_to_index(struct index_state *, const char *path, int flags); -extern struct cache_entry *make_cache_entry(unsigned int mode, - const struct object_id *oid, - const char *path, - int stage, - unsigned int refresh_options); - extern int chmod_index_entry(struct index_state *, struct cache_entry *ce, char flip); extern int ce_same_name(const struct cache_entry *a, const struct cache_entry *b); extern void set_object_name_for_intent_to_add_entry(struct cache_entry *ce); diff --git a/merge-recursive.c b/merge-recursive.c index 8b30cc7..a580cff 100644 --- a/merge-recursive.c +++ b/merge-recursive.c @@ -315,7 +315,7 @@ static int add_cacheinfo(struct merge_options *o, struct cache_entry *ce; int ret; - ce = make_cache_entry(mode, oid ? oid : &null_oid, path, stage, 0); + ce = make_cache_entry(&the_index, mode, oid ? oid : &null_oid, path, stage, 0); if (!ce) return err(o, _("add_cacheinfo failed for path '%s'; merge aborting."), path); diff --git a/read-cache.c b/read-cache.c index c12664c..41e4d0e 100644 --- a/read-cache.c +++ b/read-cache.c @@ -61,7 +61,7 @@ static void replace_index_entry(struct index_state *istate, int nr, struct cache replace_index_entry_in_base(istate, old, ce); remove_name_hash(istate, old); - free(old); + discard_cache_entry(old); ce->ce_flags &= ~CE_HASHED; set_index_entry(istate, nr, ce); ce->ce_flags |= CE_UPDATE_IN_BASE; @@ -74,7 +74,7 @@ void rename_index_entry_at(struct index_state *istate, int nr, const char *new_n struct cache_entry *old_entry = istate->cache[nr], *new_entry; int namelen = strlen(new_name); - new_entry = xmalloc(cache_entry_size(namelen)); + new_entry = make_empty_cache_entry(istate, namelen); copy_cache_entry(new_entry, old_entry); new_entry->ce_flags &= ~CE_HASHED; new_entry->ce_namelen = namelen; @@ -623,7 +623,7 @@ static struct cache_entry *create_alias_ce(struct index_state *istate, /* Ok, create the new entry using the name of the existing alias */ len = ce_namelen(alias); - new_entry = xcalloc(1, cache_entry_size(len)); + new_entry = make_empty_cache_entry(istate, len); memcpy(new_entry->name, alias->name, len); copy_cache_entry(new_entry, ce); save_or_free_index_entry(istate, ce); @@ -640,7 +640,7 @@ void set_object_name_for_intent_to_add_entry(struct cache_entry *ce) int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags) { - int size, namelen, was_same; + int namelen, was_same; mode_t st_mode = st->st_mode; struct cache_entry *ce, *alias = NULL; unsigned ce_option = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE|CE_MATCH_RACY_IS_DIRTY; @@ -662,8 +662,7 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st, while (namelen && path[namelen-1] == '/') namelen--; } - size = cache_entry_size(namelen); - ce = xcalloc(1, size); + ce = make_empty_cache_entry(istate, namelen); memcpy(ce->name, path, namelen); ce->ce_namelen = namelen; if (!intent_only) @@ -704,13 +703,13 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st, ce_mark_uptodate(alias); alias->ce_flags |= CE_ADDED; - free(ce); + discard_cache_entry(ce); return 0; } } if (!intent_only) { if (index_path(&ce->oid, path, st, newflags)) { - free(ce); + discard_cache_entry(ce); return error("unable to index file %s", path); } } else @@ -727,9 +726,9 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st, ce->ce_mode == alias->ce_mode); if (pretend) - free(ce); + discard_cache_entry(ce); else if (add_index_entry(istate, ce, add_option)) { - free(ce); + discard_cache_entry(ce); return error("unable to add %s to index", path); } if (verbose && !was_same) @@ -745,14 +744,25 @@ int add_file_to_index(struct index_state *istate, const char *path, int flags) return add_to_index(istate, path, &st, flags); } -struct cache_entry *make_cache_entry(unsigned int mode, +struct cache_entry *make_empty_cache_entry(struct index_state *istate, size_t len) +{ + return xcalloc(1, cache_entry_size(len)); +} + +struct cache_entry *make_empty_transient_cache_entry(size_t len) +{ + return xcalloc(1, cache_entry_size(len)); +} + +struct cache_entry *make_cache_entry(struct index_state *istate, + unsigned int mode, const struct object_id *oid, const char *path, int stage, unsigned int refresh_options) { - int size, len; struct cache_entry *ce, *ret; + int len; if (!verify_path(path, mode)) { error("Invalid path '%s'", path); @@ -760,8 +770,7 @@ struct cache_entry *make_cache_entry(unsigned int mode, } len = strlen(path); - size = cache_entry_size(len); - ce = xcalloc(1, size); + ce = make_empty_cache_entry(istate, len); oidcpy(&ce->oid, oid); memcpy(ce->name, path, len); @@ -771,10 +780,33 @@ struct cache_entry *make_cache_entry(unsigned int mode, ret = refresh_cache_entry(&the_index, ce, refresh_options); if (ret != ce) - free(ce); + discard_cache_entry(ce); return ret; } +struct cache_entry *make_transient_cache_entry(unsigned int mode, const struct object_id *oid, + const char *path, int stage) +{ + struct cache_entry *ce; + int len; + + if (!verify_path(path, mode)) { + error("Invalid path '%s'", path); + return NULL; + } + + len = strlen(path); + ce = make_empty_transient_cache_entry(len); + + oidcpy(&ce->oid, oid); + memcpy(ce->name, path, len); + ce->ce_flags = create_ce_flags(stage); + ce->ce_namelen = len; + ce->ce_mode = create_ce_mode(mode); + + return ce; +} + /* * Chmod an index entry with either +x or -x. * @@ -1270,7 +1302,7 @@ static struct cache_entry *refresh_cache_ent(struct index_state *istate, { struct stat st; struct cache_entry *updated; - int changed, size; + int changed; int refresh = options & CE_MATCH_REFRESH; int ignore_valid = options & CE_MATCH_IGNORE_VALID; int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE; @@ -1350,8 +1382,7 @@ static struct cache_entry *refresh_cache_ent(struct index_state *istate, return NULL; } - size = ce_size(ce); - updated = xmalloc(size); + updated = make_empty_cache_entry(istate, ce_namelen(ce)); copy_cache_entry(updated, ce); memcpy(updated->name, ce->name, ce->ce_namelen + 1); fill_stat_cache_info(updated, &st); @@ -1637,12 +1668,13 @@ int read_index(struct index_state *istate) return read_index_from(istate, get_index_file(), get_git_dir()); } -static struct cache_entry *cache_entry_from_ondisk(struct ondisk_cache_entry *ondisk, +static struct cache_entry *cache_entry_from_ondisk(struct index_state *istate, + struct ondisk_cache_entry *ondisk, unsigned int flags, const char *name, size_t len) { - struct cache_entry *ce = xmalloc(cache_entry_size(len)); + struct cache_entry *ce = make_empty_cache_entry(istate, len); ce->ce_stat_data.sd_ctime.sec = get_be32(&ondisk->ctime.sec); ce->ce_stat_data.sd_mtime.sec = get_be32(&ondisk->mtime.sec); @@ -1684,7 +1716,8 @@ static unsigned long expand_name_field(struct strbuf *name, const char *cp_) return (const char *)ep + 1 - cp_; } -static struct cache_entry *create_from_disk(struct ondisk_cache_entry *ondisk, +static struct cache_entry *create_from_disk(struct index_state *istate, + struct ondisk_cache_entry *ondisk, unsigned long *ent_size, struct strbuf *previous_name) { @@ -1715,13 +1748,13 @@ static struct cache_entry *create_from_disk(struct ondisk_cache_entry *ondisk, /* v3 and earlier */ if (len == CE_NAMEMASK) len = strlen(name); - ce = cache_entry_from_ondisk(ondisk, flags, name, len); + ce = cache_entry_from_ondisk(istate, ondisk, flags, name, len); *ent_size = ondisk_ce_size(ce); } else { unsigned long consumed; consumed = expand_name_field(previous_name, name); - ce = cache_entry_from_ondisk(ondisk, flags, + ce = cache_entry_from_ondisk(istate, ondisk, flags, previous_name->buf, previous_name->len); @@ -1853,7 +1886,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist) unsigned long consumed; disk_ce = (struct ondisk_cache_entry *)((char *)mmap + src_offset); - ce = create_from_disk(disk_ce, &consumed, previous_name); + ce = create_from_disk(istate, disk_ce, &consumed, previous_name); set_index_entry(istate, i, ce); src_offset += consumed; @@ -1959,7 +1992,7 @@ int discard_index(struct index_state *istate) istate->cache[i]->index <= istate->split_index->base->cache_nr && istate->cache[i] == istate->split_index->base->cache[istate->cache[i]->index - 1]) continue; - free(istate->cache[i]); + discard_cache_entry(istate->cache[i]); } resolve_undo_clear_index(istate); istate->cache_nr = 0; @@ -2649,14 +2682,13 @@ int read_index_unmerged(struct index_state *istate) for (i = 0; i < istate->cache_nr; i++) { struct cache_entry *ce = istate->cache[i]; struct cache_entry *new_ce; - int size, len; + int len; if (!ce_stage(ce)) continue; unmerged = 1; len = ce_namelen(ce); - size = cache_entry_size(len); - new_ce = xcalloc(1, size); + new_ce = make_empty_cache_entry(istate, len); memcpy(new_ce->name, ce->name, len); new_ce->ce_flags = create_ce_flags(0) | CE_CONFLICTED; new_ce->ce_namelen = len; @@ -2765,3 +2797,8 @@ void move_index_extensions(struct index_state *dst, struct index_state *src) dst->untracked = src->untracked; src->untracked = NULL; } + +void discard_cache_entry(struct cache_entry *ce) +{ + free(ce); +} diff --git a/resolve-undo.c b/resolve-undo.c index 4d4e5cb..c30ae5c 100644 --- a/resolve-undo.c +++ b/resolve-undo.c @@ -146,7 +146,9 @@ int unmerge_index_entry_at(struct index_state *istate, int pos) struct cache_entry *nce; if (!ru->mode[i]) continue; - nce = make_cache_entry(ru->mode[i], &ru->oid[i], + nce = make_cache_entry(istate, + ru->mode[i], + &ru->oid[i], name, i + 1, 0); if (matched) nce->ce_flags |= CE_MATCHED; diff --git a/split-index.c b/split-index.c index 660c75f..317900d 100644 --- a/split-index.c +++ b/split-index.c @@ -123,7 +123,7 @@ static void replace_entry(size_t pos, void *data) src->ce_flags |= CE_UPDATE_IN_BASE; src->ce_namelen = dst->ce_namelen; copy_cache_entry(dst, src); - free(src); + discard_cache_entry(src); si->nr_replacements++; } @@ -224,7 +224,7 @@ void prepare_to_write_split_index(struct index_state *istate) base->ce_flags = base_flags; if (ret) ce->ce_flags |= CE_UPDATE_IN_BASE; - free(base); + discard_cache_entry(base); si->base->cache[ce->index - 1] = ce; } for (i = 0; i < si->base->cache_nr; i++) { @@ -301,7 +301,7 @@ void save_or_free_index_entry(struct index_state *istate, struct cache_entry *ce ce == istate->split_index->base->cache[ce->index - 1]) ce->ce_flags |= CE_REMOVE; else - free(ce); + discard_cache_entry(ce); } void replace_index_entry_in_base(struct index_state *istate, @@ -314,7 +314,7 @@ void replace_index_entry_in_base(struct index_state *istate, old_entry->index <= istate->split_index->base->cache_nr) { new_entry->index = old_entry->index; if (old_entry != istate->split_index->base->cache[new_entry->index - 1]) - free(istate->split_index->base->cache[new_entry->index - 1]); + discard_cache_entry(istate->split_index->base->cache[new_entry->index - 1]); istate->split_index->base->cache[new_entry->index - 1] = new_entry; } } diff --git a/tree.c b/tree.c index 244eb5e..5111ce8 100644 --- a/tree.c +++ b/tree.c @@ -16,15 +16,13 @@ static int read_one_entry_opt(struct index_state *istate, unsigned mode, int stage, int opt) { int len; - unsigned int size; struct cache_entry *ce; if (S_ISDIR(mode)) return READ_TREE_RECURSIVE; len = strlen(pathname); - size = cache_entry_size(baselen + len); - ce = xcalloc(1, size); + ce = make_empty_cache_entry(istate, baselen + len); ce->ce_mode = create_ce_mode(mode); ce->ce_flags = create_ce_flags(stage); diff --git a/unpack-trees.c b/unpack-trees.c index 3a85a02..33cba55 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -203,10 +203,10 @@ static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce, ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); } -static struct cache_entry *dup_entry(const struct cache_entry *ce) +static struct cache_entry *dup_entry(const struct cache_entry *ce, struct index_state *istate) { unsigned int size = ce_size(ce); - struct cache_entry *new_entry = xmalloc(size); + struct cache_entry *new_entry = make_empty_cache_entry(istate, ce_namelen(ce)); memcpy(new_entry, ce, size); return new_entry; @@ -216,7 +216,7 @@ static void add_entry(struct unpack_trees_options *o, const struct cache_entry *ce, unsigned int set, unsigned int clear) { - do_add_entry(o, dup_entry(ce), set, clear); + do_add_entry(o, dup_entry(ce, &o->result), set, clear); } /* @@ -797,10 +797,17 @@ static int ce_in_traverse_path(const struct cache_entry *ce, return (info->pathlen < ce_namelen(ce)); } -static struct cache_entry *create_ce_entry(const struct traverse_info *info, const struct name_entry *n, int stage) +static struct cache_entry *create_ce_entry(const struct traverse_info *info, + const struct name_entry *n, + int stage, + struct index_state *istate, + int is_transient) { int len = traverse_path_len(info, n); - struct cache_entry *ce = xcalloc(1, cache_entry_size(len)); + struct cache_entry *ce = + is_transient ? + make_empty_transient_cache_entry(len) : + make_empty_cache_entry(istate, len); ce->ce_mode = create_ce_mode(n->mode); ce->ce_flags = create_ce_flags(stage); @@ -846,7 +853,15 @@ static int unpack_nondirectories(int n, unsigned long mask, stage = 3; else stage = 2; - src[i + o->merge] = create_ce_entry(info, names + i, stage); + + /* + * If the merge bit is set, then the cache entries are + * discarded in the following block. In this case, + * construct "transient" cache_entries, as they are + * not stored in the index. otherwise construct the + * cache entry from the index aware logic. + */ + src[i + o->merge] = create_ce_entry(info, names + i, stage, &o->result, o->merge); } if (o->merge) { @@ -855,7 +870,7 @@ static int unpack_nondirectories(int n, unsigned long mask, for (i = 0; i < n; i++) { struct cache_entry *ce = src[i + o->merge]; if (ce != o->df_conflict_entry) - free(ce); + discard_cache_entry(ce); } return rc; } @@ -1787,7 +1802,7 @@ static int merged_entry(const struct cache_entry *ce, struct unpack_trees_options *o) { int update = CE_UPDATE; - struct cache_entry *merge = dup_entry(ce); + struct cache_entry *merge = dup_entry(ce, &o->result); if (!old) { /* @@ -1807,7 +1822,7 @@ static int merged_entry(const struct cache_entry *ce, if (verify_absent(merge, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) { - free(merge); + discard_cache_entry(merge); return -1; } invalidate_ce_path(merge, o); @@ -1833,7 +1848,7 @@ static int merged_entry(const struct cache_entry *ce, update = 0; } else { if (verify_uptodate(old, o)) { - free(merge); + discard_cache_entry(merge); return -1; } /* Migrate old flags over */ -- cgit v0.10.2-6-g49f6 From 8fb8e3f63654df20926f665486d2edea2fff0243 Mon Sep 17 00:00:00 2001 From: Jameson Miller Date: Mon, 2 Jul 2018 19:49:33 +0000 Subject: mem-pool: only search head block for available space Instead of searching all memory blocks for available space to fulfill a memory request, only search the head block. If the head block does not have space, assume that previous block would most likely not be able to fulfill request either. This could potentially lead to more memory fragmentation, but also avoids searching memory blocks that probably will not be able to fulfill request. This pattern will benefit consumers that are able to generate a good estimate for how much memory will be needed, or if they are performing fixed sized allocations, so that once a block is exhausted it will never be able to fulfill a future request. Signed-off-by: Jameson Miller Signed-off-by: Junio C Hamano diff --git a/mem-pool.c b/mem-pool.c index 389d7af..c80124f 100644 --- a/mem-pool.c +++ b/mem-pool.c @@ -21,16 +21,16 @@ static struct mp_block *mem_pool_alloc_block(struct mem_pool *mem_pool, size_t b void *mem_pool_alloc(struct mem_pool *mem_pool, size_t len) { - struct mp_block *p; + struct mp_block *p = NULL; void *r; /* round up to a 'uintmax_t' alignment */ if (len & (sizeof(uintmax_t) - 1)) len += sizeof(uintmax_t) - (len & (sizeof(uintmax_t) - 1)); - for (p = mem_pool->mp_block; p; p = p->next_block) - if (p->end - p->next_free >= len) - break; + if (mem_pool->mp_block && + mem_pool->mp_block->end - mem_pool->mp_block->next_free >= len) + p = mem_pool->mp_block; if (!p) { if (len >= (mem_pool->block_alloc / 2)) { -- cgit v0.10.2-6-g49f6 From 158dfeff3dc1d155b84e68b265a9b6c265717e1e Mon Sep 17 00:00:00 2001 From: Jameson Miller Date: Mon, 2 Jul 2018 19:49:34 +0000 Subject: mem-pool: add life cycle management functions Add initialization and discard functions to mem_pool type. As the memory allocated by mem_pool can now be freed, we also track the large allocations. If the there are existing mp_blocks in the mem_poo's linked list of mp_blocksl, then the mp_block for a large allocation is inserted behind the head block. This is because only the head mp_block is considered when searching for availble space. This results in the following desirable properties: 1) The mp_block allocated for the large request will not be included not included in the search for available in future requests, the large mp_block is sized for the specific request and does not contain any spare space. 2) The head mp_block will not bumped from considation for future memory requests just because a request for a large chunk of memory came in. These changes are in preparation for a future commit that will utilize creating and discarding memory pool. Signed-off-by: Jameson Miller Signed-off-by: Junio C Hamano diff --git a/mem-pool.c b/mem-pool.c index c80124f..1769400 100644 --- a/mem-pool.c +++ b/mem-pool.c @@ -5,20 +5,65 @@ #include "cache.h" #include "mem-pool.h" -static struct mp_block *mem_pool_alloc_block(struct mem_pool *mem_pool, size_t block_alloc) +#define BLOCK_GROWTH_SIZE 1024*1024 - sizeof(struct mp_block); + +/* + * Allocate a new mp_block and insert it after the block specified in + * `insert_after`. If `insert_after` is NULL, then insert block at the + * head of the linked list. + */ +static struct mp_block *mem_pool_alloc_block(struct mem_pool *mem_pool, size_t block_alloc, struct mp_block *insert_after) { struct mp_block *p; mem_pool->pool_alloc += sizeof(struct mp_block) + block_alloc; p = xmalloc(st_add(sizeof(struct mp_block), block_alloc)); - p->next_block = mem_pool->mp_block; + p->next_free = (char *)p->space; p->end = p->next_free + block_alloc; - mem_pool->mp_block = p; + + if (insert_after) { + p->next_block = insert_after->next_block; + insert_after->next_block = p; + } else { + p->next_block = mem_pool->mp_block; + mem_pool->mp_block = p; + } return p; } +void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size) +{ + struct mem_pool *pool; + + if (*mem_pool) + return; + + pool = xcalloc(1, sizeof(*pool)); + + pool->block_alloc = BLOCK_GROWTH_SIZE; + + if (initial_size > 0) + mem_pool_alloc_block(pool, initial_size, NULL); + + *mem_pool = pool; +} + +void mem_pool_discard(struct mem_pool *mem_pool) +{ + struct mp_block *block, *block_to_free; + + while ((block = mem_pool->mp_block)) + { + block_to_free = block; + block = block->next_block; + free(block_to_free); + } + + free(mem_pool); +} + void *mem_pool_alloc(struct mem_pool *mem_pool, size_t len) { struct mp_block *p = NULL; @@ -33,12 +78,10 @@ void *mem_pool_alloc(struct mem_pool *mem_pool, size_t len) p = mem_pool->mp_block; if (!p) { - if (len >= (mem_pool->block_alloc / 2)) { - mem_pool->pool_alloc += len; - return xmalloc(len); - } + if (len >= (mem_pool->block_alloc / 2)) + return mem_pool_alloc_block(mem_pool, len, mem_pool->mp_block); - p = mem_pool_alloc_block(mem_pool, mem_pool->block_alloc); + p = mem_pool_alloc_block(mem_pool, mem_pool->block_alloc, NULL); } r = p->next_free; diff --git a/mem-pool.h b/mem-pool.h index 829ad58..f75b336 100644 --- a/mem-pool.h +++ b/mem-pool.h @@ -22,6 +22,16 @@ struct mem_pool { }; /* + * Initialize mem_pool with specified initial size. + */ +void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size); + +/* + * Discard a memory pool and free all the memory it is responsible for. + */ +void mem_pool_discard(struct mem_pool *mem_pool); + +/* * Alloc memory from the mem_pool. */ void *mem_pool_alloc(struct mem_pool *pool, size_t len); -- cgit v0.10.2-6-g49f6 From 0e58301d8199208d1e48b9f64c4ad1089a355905 Mon Sep 17 00:00:00 2001 From: Jameson Miller Date: Mon, 2 Jul 2018 19:49:35 +0000 Subject: mem-pool: fill out functionality Add functions for: - combining two memory pools - determining if a memory address is within the range managed by a memory pool These functions will be used by future commits. Signed-off-by: Jameson Miller Signed-off-by: Junio C Hamano diff --git a/mem-pool.c b/mem-pool.c index 1769400..b250a5f 100644 --- a/mem-pool.c +++ b/mem-pool.c @@ -96,3 +96,45 @@ void *mem_pool_calloc(struct mem_pool *mem_pool, size_t count, size_t size) memset(r, 0, len); return r; } + +int mem_pool_contains(struct mem_pool *mem_pool, void *mem) +{ + struct mp_block *p; + + /* Check if memory is allocated in a block */ + for (p = mem_pool->mp_block; p; p = p->next_block) + if ((mem >= ((void *)p->space)) && + (mem < ((void *)p->end))) + return 1; + + return 0; +} + +void mem_pool_combine(struct mem_pool *dst, struct mem_pool *src) +{ + struct mp_block *p; + + /* Append the blocks from src to dst */ + if (dst->mp_block && src->mp_block) { + /* + * src and dst have blocks, append + * blocks from src to dst. + */ + p = dst->mp_block; + while (p->next_block) + p = p->next_block; + + p->next_block = src->mp_block; + } else if (src->mp_block) { + /* + * src has blocks, dst is empty. + */ + dst->mp_block = src->mp_block; + } else { + /* src is empty, nothing to do. */ + } + + dst->pool_alloc += src->pool_alloc; + src->pool_alloc = 0; + src->mp_block = NULL; +} diff --git a/mem-pool.h b/mem-pool.h index f75b336..adeefdc 100644 --- a/mem-pool.h +++ b/mem-pool.h @@ -41,4 +41,17 @@ void *mem_pool_alloc(struct mem_pool *pool, size_t len); */ void *mem_pool_calloc(struct mem_pool *pool, size_t count, size_t size); +/* + * Move the memory associated with the 'src' pool to the 'dst' pool. The 'src' + * pool will be empty and not contain any memory. It still needs to be free'd + * with a call to `mem_pool_discard`. + */ +void mem_pool_combine(struct mem_pool *dst, struct mem_pool *src); + +/* + * Check if a memory pointed at by 'mem' is part of the range of + * memory managed by the specified mem_pool. + */ +int mem_pool_contains(struct mem_pool *mem_pool, void *mem); + #endif -- cgit v0.10.2-6-g49f6 From 8e72d67529ba0adfb6f7d24a21e909c2a1e1d069 Mon Sep 17 00:00:00 2001 From: Jameson Miller Date: Mon, 2 Jul 2018 19:49:37 +0000 Subject: block alloc: allocate cache entries from mem_pool When reading large indexes from disk, a portion of the time is dominated in malloc() calls. This can be mitigated by allocating a large block of memory and manage it ourselves via memory pools. This change moves the cache entry allocation to be on top of memory pools. Design: The index_state struct will gain a notion of an associated memory_pool from which cache_entries will be allocated from. When reading in the index from disk, we have information on the number of entries and their size, which can guide us in deciding how large our initial memory allocation should be. When an index is discarded, the associated memory_pool will be discarded as well - so the lifetime of a cache_entry is tied to the lifetime of the index_state that it was allocated for. In the case of a Split Index, the following rules are followed. 1st, some terminology is defined: Terminology: - 'the_index': represents the logical view of the index - 'split_index': represents the "base" cache entries. Read from the split index file. 'the_index' can reference a single split_index, as well as cache_entries from the split_index. `the_index` will be discarded before the `split_index` is. This means that when we are allocating cache_entries in the presence of a split index, we need to allocate the entries from the `split_index`'s memory pool. This allows us to follow the pattern that `the_index` can reference cache_entries from the `split_index`, and that the cache_entries will not be freed while they are still being referenced. Managing transient cache_entry structs: Cache entries are usually allocated for an index, but this is not always the case. Cache entries are sometimes allocated because this is the type that the existing checkout_entry function works with. Because of this, the existing code needs to handle cache entries associated with an index / memory pool, and those that only exist transiently. Several strategies were contemplated around how to handle this: Chosen approach: An extra field was added to the cache_entry type to track whether the cache_entry was allocated from a memory pool or not. This is currently an int field, as there are no more available bits in the existing ce_flags bit field. If / when more bits are needed, this new field can be turned into a proper bit field. Alternatives: 1) Do not include any information about how the cache_entry was allocated. Calling code would be responsible for tracking whether the cache_entry needed to be freed or not. Pro: No extra memory overhead to track this state Con: Extra complexity in callers to handle this correctly. The extra complexity and burden to not regress this behavior in the future was more than we wanted. 2) cache_entry would gain knowledge about which mem_pool allocated it Pro: Could (potentially) do extra logic to know when a mem_pool no longer had references to any cache_entry Con: cache_entry would grow heavier by a pointer, instead of int We didn't see a tangible benefit to this approach 3) Do not add any extra information to a cache_entry, but when freeing a cache entry, check if the memory exists in a region managed by existing mem_pools. Pro: No extra memory overhead to track state Con: Extra computation is performed when freeing cache entries We decided tracking and iterating over known memory pool regions was less desirable than adding an extra field to track this stae. Signed-off-by: Jameson Miller Signed-off-by: Junio C Hamano diff --git a/cache.h b/cache.h index 5aadaeb..c1c7e9e 100644 --- a/cache.h +++ b/cache.h @@ -15,6 +15,7 @@ #include "path.h" #include "sha1-array.h" #include "repository.h" +#include "mem-pool.h" #include typedef struct git_zstream { @@ -156,6 +157,7 @@ struct cache_entry { struct stat_data ce_stat_data; unsigned int ce_mode; unsigned int ce_flags; + unsigned int mem_pool_allocated; unsigned int ce_namelen; unsigned int index; /* for link extension */ struct object_id oid; @@ -227,6 +229,7 @@ static inline void copy_cache_entry(struct cache_entry *dst, const struct cache_entry *src) { unsigned int state = dst->ce_flags & CE_HASHED; + int mem_pool_allocated = dst->mem_pool_allocated; /* Don't copy hash chain and name */ memcpy(&dst->ce_stat_data, &src->ce_stat_data, @@ -235,6 +238,9 @@ static inline void copy_cache_entry(struct cache_entry *dst, /* Restore the hash state */ dst->ce_flags = (dst->ce_flags & ~CE_HASHED) | state; + + /* Restore the mem_pool_allocated flag */ + dst->mem_pool_allocated = mem_pool_allocated; } static inline unsigned create_ce_flags(unsigned stage) @@ -328,6 +334,7 @@ struct index_state { struct untracked_cache *untracked; uint64_t fsmonitor_last_update; struct ewah_bitmap *fsmonitor_dirty; + struct mem_pool *ce_mem_pool; }; extern struct index_state the_index; @@ -373,6 +380,20 @@ struct cache_entry *make_empty_transient_cache_entry(size_t name_len); */ void discard_cache_entry(struct cache_entry *ce); +/* + * Duplicate a cache_entry. Allocate memory for the new entry from a + * memory_pool. Takes into account cache_entry fields that are meant + * for managing the underlying memory allocation of the cache_entry. + */ +struct cache_entry *dup_cache_entry(const struct cache_entry *ce, struct index_state *istate); + +/* + * Validate the cache entries in the index. This is an internal + * consistency check that the cache_entry structs are allocated from + * the expected memory pool. + */ +void validate_cache_entries(const struct index_state *istate); + #ifndef NO_THE_INDEX_COMPATIBILITY_MACROS #define active_cache (the_index.cache) #define active_nr (the_index.cache_nr) diff --git a/mem-pool.c b/mem-pool.c index b250a5f..139617c 100644 --- a/mem-pool.c +++ b/mem-pool.c @@ -54,7 +54,8 @@ void mem_pool_discard(struct mem_pool *mem_pool) { struct mp_block *block, *block_to_free; - while ((block = mem_pool->mp_block)) + block = mem_pool->mp_block; + while (block) { block_to_free = block; block = block->next_block; diff --git a/read-cache.c b/read-cache.c index 41e4d0e..b073696 100644 --- a/read-cache.c +++ b/read-cache.c @@ -46,6 +46,48 @@ CE_ENTRY_ADDED | CE_ENTRY_REMOVED | CE_ENTRY_CHANGED | \ SPLIT_INDEX_ORDERED | UNTRACKED_CHANGED | FSMONITOR_CHANGED) + +/* + * This is an estimate of the pathname length in the index. We use + * this for V4 index files to guess the un-deltafied size of the index + * in memory because of pathname deltafication. This is not required + * for V2/V3 index formats because their pathnames are not compressed. + * If the initial amount of memory set aside is not sufficient, the + * mem pool will allocate extra memory. + */ +#define CACHE_ENTRY_PATH_LENGTH 80 + +static inline struct cache_entry *mem_pool__ce_alloc(struct mem_pool *mem_pool, size_t len) +{ + struct cache_entry *ce; + ce = mem_pool_alloc(mem_pool, cache_entry_size(len)); + ce->mem_pool_allocated = 1; + return ce; +} + +static inline struct cache_entry *mem_pool__ce_calloc(struct mem_pool *mem_pool, size_t len) +{ + struct cache_entry * ce; + ce = mem_pool_calloc(mem_pool, 1, cache_entry_size(len)); + ce->mem_pool_allocated = 1; + return ce; +} + +static struct mem_pool *find_mem_pool(struct index_state *istate) +{ + struct mem_pool **pool_ptr; + + if (istate->split_index && istate->split_index->base) + pool_ptr = &istate->split_index->base->ce_mem_pool; + else + pool_ptr = &istate->ce_mem_pool; + + if (!*pool_ptr) + mem_pool_init(pool_ptr, 0); + + return *pool_ptr; +} + struct index_state the_index; static const char *alternate_index_output; @@ -746,7 +788,7 @@ int add_file_to_index(struct index_state *istate, const char *path, int flags) struct cache_entry *make_empty_cache_entry(struct index_state *istate, size_t len) { - return xcalloc(1, cache_entry_size(len)); + return mem_pool__ce_calloc(find_mem_pool(istate), len); } struct cache_entry *make_empty_transient_cache_entry(size_t len) @@ -1668,13 +1710,13 @@ int read_index(struct index_state *istate) return read_index_from(istate, get_index_file(), get_git_dir()); } -static struct cache_entry *cache_entry_from_ondisk(struct index_state *istate, +static struct cache_entry *cache_entry_from_ondisk(struct mem_pool *mem_pool, struct ondisk_cache_entry *ondisk, unsigned int flags, const char *name, size_t len) { - struct cache_entry *ce = make_empty_cache_entry(istate, len); + struct cache_entry *ce = mem_pool__ce_alloc(mem_pool, len); ce->ce_stat_data.sd_ctime.sec = get_be32(&ondisk->ctime.sec); ce->ce_stat_data.sd_mtime.sec = get_be32(&ondisk->mtime.sec); @@ -1716,7 +1758,7 @@ static unsigned long expand_name_field(struct strbuf *name, const char *cp_) return (const char *)ep + 1 - cp_; } -static struct cache_entry *create_from_disk(struct index_state *istate, +static struct cache_entry *create_from_disk(struct mem_pool *mem_pool, struct ondisk_cache_entry *ondisk, unsigned long *ent_size, struct strbuf *previous_name) @@ -1748,13 +1790,13 @@ static struct cache_entry *create_from_disk(struct index_state *istate, /* v3 and earlier */ if (len == CE_NAMEMASK) len = strlen(name); - ce = cache_entry_from_ondisk(istate, ondisk, flags, name, len); + ce = cache_entry_from_ondisk(mem_pool, ondisk, flags, name, len); *ent_size = ondisk_ce_size(ce); } else { unsigned long consumed; consumed = expand_name_field(previous_name, name); - ce = cache_entry_from_ondisk(istate, ondisk, flags, + ce = cache_entry_from_ondisk(mem_pool, ondisk, flags, previous_name->buf, previous_name->len); @@ -1828,6 +1870,22 @@ static void post_read_index_from(struct index_state *istate) tweak_fsmonitor(istate); } +static size_t estimate_cache_size_from_compressed(unsigned int entries) +{ + return entries * (sizeof(struct cache_entry) + CACHE_ENTRY_PATH_LENGTH); +} + +static size_t estimate_cache_size(size_t ondisk_size, unsigned int entries) +{ + long per_entry = sizeof(struct cache_entry) - sizeof(struct ondisk_cache_entry); + + /* + * Account for potential alignment differences. + */ + per_entry += align_padding_size(sizeof(struct cache_entry), -sizeof(struct ondisk_cache_entry)); + return ondisk_size + entries * per_entry; +} + /* remember to discard_cache() before reading a different cache! */ int do_read_index(struct index_state *istate, const char *path, int must_exist) { @@ -1874,10 +1932,15 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist) istate->cache = xcalloc(istate->cache_alloc, sizeof(*istate->cache)); istate->initialized = 1; - if (istate->version == 4) + if (istate->version == 4) { previous_name = &previous_name_buf; - else + mem_pool_init(&istate->ce_mem_pool, + estimate_cache_size_from_compressed(istate->cache_nr)); + } else { previous_name = NULL; + mem_pool_init(&istate->ce_mem_pool, + estimate_cache_size(mmap_size, istate->cache_nr)); + } src_offset = sizeof(*hdr); for (i = 0; i < istate->cache_nr; i++) { @@ -1886,7 +1949,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist) unsigned long consumed; disk_ce = (struct ondisk_cache_entry *)((char *)mmap + src_offset); - ce = create_from_disk(istate, disk_ce, &consumed, previous_name); + ce = create_from_disk(istate->ce_mem_pool, disk_ce, &consumed, previous_name); set_index_entry(istate, i, ce); src_offset += consumed; @@ -1983,17 +2046,13 @@ int is_index_unborn(struct index_state *istate) int discard_index(struct index_state *istate) { - int i; + /* + * Cache entries in istate->cache[] should have been allocated + * from the memory pool associated with this index, or from an + * associated split_index. There is no need to free individual + * cache entries. + */ - for (i = 0; i < istate->cache_nr; i++) { - if (istate->cache[i]->index && - istate->split_index && - istate->split_index->base && - istate->cache[i]->index <= istate->split_index->base->cache_nr && - istate->cache[i] == istate->split_index->base->cache[istate->cache[i]->index - 1]) - continue; - discard_cache_entry(istate->cache[i]); - } resolve_undo_clear_index(istate); istate->cache_nr = 0; istate->cache_changed = 0; @@ -2007,6 +2066,12 @@ int discard_index(struct index_state *istate) discard_split_index(istate); free_untracked_cache(istate->untracked); istate->untracked = NULL; + + if (istate->ce_mem_pool) { + mem_pool_discard(istate->ce_mem_pool); + istate->ce_mem_pool = NULL; + } + return 0; } @@ -2798,7 +2863,23 @@ void move_index_extensions(struct index_state *dst, struct index_state *src) src->untracked = NULL; } +struct cache_entry *dup_cache_entry(const struct cache_entry *ce, + struct index_state *istate) +{ + unsigned int size = ce_size(ce); + int mem_pool_allocated; + struct cache_entry *new_entry = make_empty_cache_entry(istate, ce_namelen(ce)); + mem_pool_allocated = new_entry->mem_pool_allocated; + + memcpy(new_entry, ce, size); + new_entry->mem_pool_allocated = mem_pool_allocated; + return new_entry; +} + void discard_cache_entry(struct cache_entry *ce) { + if (ce && ce->mem_pool_allocated) + return; + free(ce); } diff --git a/split-index.c b/split-index.c index 317900d..84f067e 100644 --- a/split-index.c +++ b/split-index.c @@ -73,16 +73,31 @@ void move_cache_to_base_index(struct index_state *istate) int i; /* - * do not delete old si->base, its index entries may be shared - * with istate->cache[]. Accept a bit of leaking here because - * this code is only used by short-lived update-index. + * If there was a previous base index, then transfer ownership of allocated + * entries to the parent index. */ + if (si->base && + si->base->ce_mem_pool) { + + if (!istate->ce_mem_pool) + mem_pool_init(&istate->ce_mem_pool, 0); + + mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool); + } + si->base = xcalloc(1, sizeof(*si->base)); si->base->version = istate->version; /* zero timestamp disables racy test in ce_write_index() */ si->base->timestamp = istate->timestamp; ALLOC_GROW(si->base->cache, istate->cache_nr, si->base->cache_alloc); si->base->cache_nr = istate->cache_nr; + + /* + * The mem_pool needs to move with the allocated entries. + */ + si->base->ce_mem_pool = istate->ce_mem_pool; + istate->ce_mem_pool = NULL; + COPY_ARRAY(si->base->cache, istate->cache, istate->cache_nr); mark_base_index_entries(si->base); for (i = 0; i < si->base->cache_nr; i++) @@ -331,12 +346,31 @@ void remove_split_index(struct index_state *istate) { if (istate->split_index) { /* - * can't discard_split_index(&the_index); because that - * will destroy split_index->base->cache[], which may - * be shared with the_index.cache[]. So yeah we're - * leaking a bit here. + * When removing the split index, we need to move + * ownership of the mem_pool associated with the + * base index to the main index. There may be cache entries + * allocated from the base's memory pool that are shared with + * the_index.cache[]. */ - istate->split_index = NULL; + mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool); + + /* + * The split index no longer owns the mem_pool backing + * its cache array. As we are discarding this index, + * mark the index as having no cache entries, so it + * will not attempt to clean up the cache entries or + * validate them. + */ + if (istate->split_index->base) + istate->split_index->base->cache_nr = 0; + + /* + * We can discard the split index because its + * memory pool has been incorporated into the + * memory pool associated with the the_index. + */ + discard_split_index(istate); + istate->cache_changed |= SOMETHING_CHANGED; } } diff --git a/unpack-trees.c b/unpack-trees.c index 33cba55..a3b5131 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -203,20 +203,11 @@ static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce, ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); } -static struct cache_entry *dup_entry(const struct cache_entry *ce, struct index_state *istate) -{ - unsigned int size = ce_size(ce); - struct cache_entry *new_entry = make_empty_cache_entry(istate, ce_namelen(ce)); - - memcpy(new_entry, ce, size); - return new_entry; -} - static void add_entry(struct unpack_trees_options *o, const struct cache_entry *ce, unsigned int set, unsigned int clear) { - do_add_entry(o, dup_entry(ce, &o->result), set, clear); + do_add_entry(o, dup_cache_entry(ce, &o->result), set, clear); } /* @@ -1802,7 +1793,7 @@ static int merged_entry(const struct cache_entry *ce, struct unpack_trees_options *o) { int update = CE_UPDATE; - struct cache_entry *merge = dup_entry(ce, &o->result); + struct cache_entry *merge = dup_cache_entry(ce, &o->result); if (!old) { /* -- cgit v0.10.2-6-g49f6 From 8616a2d0cb57865540f1c00ac2e5385a6cc5d84e Mon Sep 17 00:00:00 2001 From: Jameson Miller Date: Mon, 2 Jul 2018 19:49:39 +0000 Subject: block alloc: add validations around cache_entry lifecyle Add an option (controlled by an environment variable) perform extra validations on mem_pool allocated cache entries. When set: 1) Invalidate cache_entry memory when discarding cache_entry. 2) When discarding index_state struct, verify that all cache_entries were allocated from expected mem_pool. 3) When discarding mem_pools, invalidate mem_pool memory. This should provide extra checks that mem_pools and their allocated cache_entries are being used as expected. Signed-off-by: Jameson Miller Signed-off-by: Junio C Hamano diff --git a/cache.h b/cache.h index c1c7e9e..a3334a7 100644 --- a/cache.h +++ b/cache.h @@ -381,6 +381,12 @@ struct cache_entry *make_empty_transient_cache_entry(size_t name_len); void discard_cache_entry(struct cache_entry *ce); /* + * Check configuration if we should perform extra validation on cache + * entries. + */ +int should_validate_cache_entries(void); + +/* * Duplicate a cache_entry. Allocate memory for the new entry from a * memory_pool. Takes into account cache_entry fields that are meant * for managing the underlying memory allocation of the cache_entry. diff --git a/git.c b/git.c index c2f48d5..010898b 100644 --- a/git.c +++ b/git.c @@ -414,7 +414,10 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv) trace_argv_printf(argv, "trace: built-in: git"); + validate_cache_entries(&the_index); status = p->fn(argc, argv, prefix); + validate_cache_entries(&the_index); + if (status) return status; diff --git a/mem-pool.c b/mem-pool.c index 139617c..a2841a4 100644 --- a/mem-pool.c +++ b/mem-pool.c @@ -50,7 +50,7 @@ void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size) *mem_pool = pool; } -void mem_pool_discard(struct mem_pool *mem_pool) +void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory) { struct mp_block *block, *block_to_free; @@ -59,6 +59,10 @@ void mem_pool_discard(struct mem_pool *mem_pool) { block_to_free = block; block = block->next_block; + + if (invalidate_memory) + memset(block_to_free->space, 0xDD, ((char *)block_to_free->end) - ((char *)block_to_free->space)); + free(block_to_free); } diff --git a/mem-pool.h b/mem-pool.h index adeefdc..999d3c3 100644 --- a/mem-pool.h +++ b/mem-pool.h @@ -29,7 +29,7 @@ void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size); /* * Discard a memory pool and free all the memory it is responsible for. */ -void mem_pool_discard(struct mem_pool *mem_pool); +void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory); /* * Alloc memory from the mem_pool. diff --git a/read-cache.c b/read-cache.c index b073696..fd67e2e 100644 --- a/read-cache.c +++ b/read-cache.c @@ -2050,8 +2050,10 @@ int discard_index(struct index_state *istate) * Cache entries in istate->cache[] should have been allocated * from the memory pool associated with this index, or from an * associated split_index. There is no need to free individual - * cache entries. + * cache entries. validate_cache_entries can detect when this + * assertion does not hold. */ + validate_cache_entries(istate); resolve_undo_clear_index(istate); istate->cache_nr = 0; @@ -2068,13 +2070,45 @@ int discard_index(struct index_state *istate) istate->untracked = NULL; if (istate->ce_mem_pool) { - mem_pool_discard(istate->ce_mem_pool); + mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries()); istate->ce_mem_pool = NULL; } return 0; } +/* + * Validate the cache entries of this index. + * All cache entries associated with this index + * should have been allocated by the memory pool + * associated with this index, or by a referenced + * split index. + */ +void validate_cache_entries(const struct index_state *istate) +{ + int i; + + if (!should_validate_cache_entries() ||!istate || !istate->initialized) + return; + + for (i = 0; i < istate->cache_nr; i++) { + if (!istate) { + die("internal error: cache entry is not allocated from expected memory pool"); + } else if (!istate->ce_mem_pool || + !mem_pool_contains(istate->ce_mem_pool, istate->cache[i])) { + if (!istate->split_index || + !istate->split_index->base || + !istate->split_index->base->ce_mem_pool || + !mem_pool_contains(istate->split_index->base->ce_mem_pool, istate->cache[i])) { + die("internal error: cache entry is not allocated from expected memory pool"); + } + } + } + + if (istate->split_index) + validate_cache_entries(istate->split_index->base); +} + int unmerged_index(const struct index_state *istate) { int i; @@ -2878,8 +2912,25 @@ struct cache_entry *dup_cache_entry(const struct cache_entry *ce, void discard_cache_entry(struct cache_entry *ce) { + if (ce && should_validate_cache_entries()) + memset(ce, 0xCD, cache_entry_size(ce->ce_namelen)); + if (ce && ce->mem_pool_allocated) return; free(ce); } + +int should_validate_cache_entries(void) +{ + static int validate_index_cache_entries = -1; + + if (validate_index_cache_entries < 0) { + if (getenv("GIT_TEST_VALIDATE_INDEX_CACHE_ENTRIES")) + validate_index_cache_entries = 1; + else + validate_index_cache_entries = 0; + } + + return validate_index_cache_entries; +} -- cgit v0.10.2-6-g49f6