summaryrefslogtreecommitdiff
path: root/builtin
diff options
context:
space:
mode:
Diffstat (limited to 'builtin')
-rw-r--r--builtin/add.c31
-rw-r--r--builtin/am.c3
-rw-r--r--builtin/apply.c1
-rw-r--r--builtin/bisect--helper.c73
-rw-r--r--builtin/blame.c10
-rw-r--r--builtin/branch.c11
-rw-r--r--builtin/cat-file.c33
-rw-r--r--builtin/check-ref-format.c11
-rw-r--r--builtin/checkout.c43
-rw-r--r--builtin/clone.c88
-rw-r--r--builtin/commit.c17
-rw-r--r--builtin/describe.c2
-rw-r--r--builtin/diff-files.c8
-rw-r--r--builtin/diff-index.c5
-rw-r--r--builtin/diff-tree.c3
-rw-r--r--builtin/diff.c4
-rw-r--r--builtin/difftool.c6
-rw-r--r--builtin/fast-export.c2
-rw-r--r--builtin/fast-import.c2
-rw-r--r--builtin/fetch-pack.c8
-rw-r--r--builtin/fetch.c100
-rw-r--r--builtin/fsck.c39
-rw-r--r--builtin/fsmonitor--daemon.c1577
-rw-r--r--builtin/gc.c26
-rw-r--r--builtin/grep.c9
-rw-r--r--builtin/index-pack.c8
-rw-r--r--builtin/log.c75
-rw-r--r--builtin/ls-remote.c2
-rw-r--r--builtin/ls-tree.c366
-rw-r--r--builtin/mailsplit.c5
-rw-r--r--builtin/merge-file.c32
-rw-r--r--builtin/merge-tree.c187
-rw-r--r--builtin/merge.c6
-rw-r--r--builtin/mktree.c11
-rw-r--r--builtin/multi-pack-index.c53
-rw-r--r--builtin/mv.c255
-rw-r--r--builtin/name-rev.c23
-rw-r--r--builtin/pack-objects.c406
-rw-r--r--builtin/pack-redundant.c10
-rw-r--r--builtin/prune.c1
-rw-r--r--builtin/pull.c26
-rw-r--r--builtin/push.c64
-rw-r--r--builtin/rebase.c87
-rw-r--r--builtin/receive-pack.c53
-rw-r--r--builtin/reflog.c147
-rw-r--r--builtin/remote.c30
-rw-r--r--builtin/repack.c236
-rw-r--r--builtin/replace.c2
-rw-r--r--builtin/reset.c13
-rw-r--r--builtin/rev-list.c25
-rw-r--r--builtin/rev-parse.c5
-rw-r--r--builtin/revert.c10
-rw-r--r--builtin/shortlog.c10
-rw-r--r--builtin/show-branch.c4
-rw-r--r--builtin/show-ref.c17
-rw-r--r--builtin/sparse-checkout.c10
-rw-r--r--builtin/stash.c169
-rw-r--r--builtin/submodule--helper.c989
-rw-r--r--builtin/tag.c4
-rw-r--r--builtin/unpack-objects.c109
-rw-r--r--builtin/update-index.c43
-rw-r--r--builtin/worktree.c49
62 files changed, 4334 insertions, 1320 deletions
diff --git a/builtin/add.c b/builtin/add.c
index 3ffb86a..f843729 100644
--- a/builtin/add.c
+++ b/builtin/add.c
@@ -141,8 +141,17 @@ int add_files_to_cache(const char *prefix,
rev.diffopt.format_callback_data = &data;
rev.diffopt.flags.override_submodule_config = 1;
rev.max_count = 0; /* do not compare unmerged paths with stage #2 */
+
+ /*
+ * Use an ODB transaction to optimize adding multiple objects.
+ * This function is invoked from commands other than 'add', which
+ * may not have their own transaction active.
+ */
+ begin_odb_transaction();
run_diff_files(&rev, DIFF_RACY_IS_MODIFIED);
- clear_pathspec(&rev.prune_data);
+ end_odb_transaction();
+
+ release_revisions(&rev);
return !!data.add_errors;
}
@@ -236,17 +245,12 @@ int run_add_interactive(const char *revision, const char *patch_mode,
int use_builtin_add_i =
git_env_bool("GIT_TEST_ADD_I_USE_BUILTIN", -1);
- if (use_builtin_add_i < 0) {
- int experimental;
- if (!git_config_get_bool("add.interactive.usebuiltin",
- &use_builtin_add_i))
- ; /* ok */
- else if (!git_config_get_bool("feature.experimental", &experimental) &&
- experimental)
- use_builtin_add_i = 1;
- }
+ if (use_builtin_add_i < 0 &&
+ git_config_get_bool("add.interactive.usebuiltin",
+ &use_builtin_add_i))
+ use_builtin_add_i = 1;
- if (use_builtin_add_i == 1) {
+ if (use_builtin_add_i != 0) {
enum add_p_mode mode;
if (!patch_mode)
@@ -340,6 +344,7 @@ static int edit_patch(int argc, const char **argv, const char *prefix)
unlink(file);
free(file);
+ release_revisions(&rev);
return 0;
}
@@ -670,7 +675,7 @@ int cmd_add(int argc, const char **argv, const char *prefix)
string_list_clear(&only_match_skip_worktree, 0);
}
- plug_bulk_checkin();
+ begin_odb_transaction();
if (add_renormalize)
exit_status |= renormalize_tracked_files(&pathspec, flags);
@@ -682,7 +687,7 @@ int cmd_add(int argc, const char **argv, const char *prefix)
if (chmod_arg && pathspec.nr)
exit_status |= chmod_pathspec(&pathspec, chmod_arg[0], show_only);
- unplug_bulk_checkin();
+ end_odb_transaction();
finish:
if (write_locked_index(&the_index, &lock_file,
diff --git a/builtin/am.c b/builtin/am.c
index 0f4111b..93bec62 100644
--- a/builtin/am.c
+++ b/builtin/am.c
@@ -1397,6 +1397,7 @@ static void write_commit_patch(const struct am_state *state, struct commit *comm
add_pending_object(&rev_info, &commit->object, "");
diff_setup_done(&rev_info.diffopt);
log_tree_commit(&rev_info, commit);
+ release_revisions(&rev_info);
}
/**
@@ -1429,6 +1430,7 @@ static void write_index_patch(const struct am_state *state)
add_pending_object(&rev_info, &tree->object, "");
diff_setup_done(&rev_info.diffopt);
run_diff_index(&rev_info, 1);
+ release_revisions(&rev_info);
}
/**
@@ -1582,6 +1584,7 @@ static int fall_back_threeway(const struct am_state *state, const char *index_pa
add_pending_oid(&rev_info, "HEAD", &our_tree, 0);
diff_setup_done(&rev_info.diffopt);
run_diff_index(&rev_info, 1);
+ release_revisions(&rev_info);
}
if (run_apply(state, index_path))
diff --git a/builtin/apply.c b/builtin/apply.c
index 3f099b9..555219d 100644
--- a/builtin/apply.c
+++ b/builtin/apply.c
@@ -1,7 +1,6 @@
#include "cache.h"
#include "builtin.h"
#include "parse-options.h"
-#include "lockfile.h"
#include "apply.h"
static const char * const apply_usage[] = {
diff --git a/builtin/bisect--helper.c b/builtin/bisect--helper.c
index 8b2b259..8a052c7 100644
--- a/builtin/bisect--helper.c
+++ b/builtin/bisect--helper.c
@@ -329,12 +329,12 @@ static int check_and_set_terms(struct bisect_terms *terms, const char *cmd)
return 0;
}
-static int mark_good(const char *refname, const struct object_id *oid,
- int flag, void *cb_data)
+static int inc_nr(const char *refname, const struct object_id *oid,
+ int flag, void *cb_data)
{
- int *m_good = (int *)cb_data;
- *m_good = 0;
- return 1;
+ unsigned int *nr = (unsigned int *)cb_data;
+ (*nr)++;
+ return 0;
}
static const char need_bad_and_good_revision_warning[] =
@@ -384,23 +384,64 @@ static int decide_next(const struct bisect_terms *terms,
vocab_good, vocab_bad, vocab_good, vocab_bad);
}
-static int bisect_next_check(const struct bisect_terms *terms,
- const char *current_term)
+static void bisect_status(struct bisect_state *state,
+ const struct bisect_terms *terms)
{
- int missing_good = 1, missing_bad = 1;
char *bad_ref = xstrfmt("refs/bisect/%s", terms->term_bad);
char *good_glob = xstrfmt("%s-*", terms->term_good);
if (ref_exists(bad_ref))
- missing_bad = 0;
+ state->nr_bad = 1;
- for_each_glob_ref_in(mark_good, good_glob, "refs/bisect/",
- (void *) &missing_good);
+ for_each_glob_ref_in(inc_nr, good_glob, "refs/bisect/",
+ (void *) &state->nr_good);
free(good_glob);
free(bad_ref);
+}
- return decide_next(terms, current_term, missing_good, missing_bad);
+__attribute__((format (printf, 1, 2)))
+static void bisect_log_printf(const char *fmt, ...)
+{
+ struct strbuf buf = STRBUF_INIT;
+ va_list ap;
+
+ va_start(ap, fmt);
+ strbuf_vaddf(&buf, fmt, ap);
+ va_end(ap);
+
+ printf("%s", buf.buf);
+ append_to_file(git_path_bisect_log(), "# %s", buf.buf);
+
+ strbuf_release(&buf);
+}
+
+static void bisect_print_status(const struct bisect_terms *terms)
+{
+ struct bisect_state state = { 0 };
+
+ bisect_status(&state, terms);
+
+ /* If we had both, we'd already be started, and shouldn't get here. */
+ if (state.nr_good && state.nr_bad)
+ return;
+
+ if (!state.nr_good && !state.nr_bad)
+ bisect_log_printf(_("status: waiting for both good and bad commits\n"));
+ else if (state.nr_good)
+ bisect_log_printf(Q_("status: waiting for bad commit, %d good commit known\n",
+ "status: waiting for bad commit, %d good commits known\n",
+ state.nr_good), state.nr_good);
+ else
+ bisect_log_printf(_("status: waiting for good commit(s), bad commit known\n"));
+}
+
+static int bisect_next_check(const struct bisect_terms *terms,
+ const char *current_term)
+{
+ struct bisect_state state = { 0 };
+ bisect_status(&state, terms);
+ return decide_next(terms, current_term, !state.nr_good, !state.nr_bad);
}
static int get_terms(struct bisect_terms *terms)
@@ -433,7 +474,7 @@ static int bisect_terms(struct bisect_terms *terms, const char *option)
if (get_terms(terms))
return error(_("no terms defined"));
- if (option == NULL) {
+ if (!option) {
printf(_("Your current terms are %s for the old state\n"
"and %s for the new state.\n"),
terms->term_good, terms->term_bad);
@@ -555,6 +596,7 @@ static int bisect_skipped_commits(struct bisect_terms *terms)
reset_revision_walk();
strbuf_release(&commit_name);
+ release_revisions(&revs);
fclose(fp);
return 0;
}
@@ -606,8 +648,10 @@ static enum bisect_error bisect_next(struct bisect_terms *terms, const char *pre
static enum bisect_error bisect_auto_next(struct bisect_terms *terms, const char *prefix)
{
- if (bisect_next_check(terms, NULL))
+ if (bisect_next_check(terms, NULL)) {
+ bisect_print_status(terms);
return BISECT_OK;
+ }
return bisect_next(terms, prefix);
}
@@ -1041,6 +1085,7 @@ static enum bisect_error bisect_skip(struct bisect_terms *terms, const char **ar
oid_to_hex(&commit->object.oid));
reset_revision_walk();
+ release_revisions(&revs);
} else {
strvec_push(&argv_state, argv[i]);
}
diff --git a/builtin/blame.c b/builtin/blame.c
index 8d15b68..02e3942 100644
--- a/builtin/blame.c
+++ b/builtin/blame.c
@@ -898,6 +898,7 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
unsigned int range_i;
long anchor;
const int hexsz = the_hash_algo->hexsz;
+ long num_lines = 0;
setup_default_color_by_age();
git_config(git_blame_config, &output_option);
@@ -1129,7 +1130,10 @@ parse_done:
for (range_i = ranges.nr; range_i > 0; --range_i) {
const struct range *r = &ranges.ranges[range_i - 1];
ent = blame_entry_prepend(ent, r->start, r->end, o);
+ num_lines += (r->end - r->start);
}
+ if (!num_lines)
+ num_lines = sb.num_lines;
o->suspects = ent;
prio_queue_put(&sb.commits, o->commit);
@@ -1158,7 +1162,7 @@ parse_done:
sb.found_guilty_entry = &found_guilty_entry;
sb.found_guilty_entry_data = &pi;
if (show_progress)
- pi.progress = start_delayed_progress(_("Blaming lines"), sb.num_lines);
+ pi.progress = start_delayed_progress(_("Blaming lines"), num_lines);
assign_blame(&sb, opt);
@@ -1167,7 +1171,7 @@ parse_done:
if (!incremental)
setup_pager();
else
- return 0;
+ goto cleanup;
blame_sort_final(&sb);
@@ -1201,6 +1205,8 @@ parse_done:
printf("num commits: %d\n", sb.num_commits);
}
+cleanup:
cleanup_scoreboard(&sb);
+ release_revisions(&revs);
return 0;
}
diff --git a/builtin/branch.c b/builtin/branch.c
index 5d00d0b..55cd9a6 100644
--- a/builtin/branch.c
+++ b/builtin/branch.c
@@ -204,7 +204,6 @@ static void delete_branch_config(const char *branchname)
static int delete_branches(int argc, const char **argv, int force, int kinds,
int quiet)
{
- struct worktree **worktrees;
struct commit *head_rev = NULL;
struct object_id oid;
char *name = NULL;
@@ -242,8 +241,6 @@ static int delete_branches(int argc, const char **argv, int force, int kinds,
die(_("Couldn't look up commit object for HEAD"));
}
- worktrees = get_worktrees();
-
for (i = 0; i < argc; i++, strbuf_reset(&bname)) {
char *target = NULL;
int flags = 0;
@@ -253,12 +250,11 @@ static int delete_branches(int argc, const char **argv, int force, int kinds,
name = mkpathdup(fmt, bname.buf);
if (kinds == FILTER_REFS_BRANCHES) {
- const struct worktree *wt =
- find_shared_symref(worktrees, "HEAD", name);
- if (wt) {
+ const char *path;
+ if ((path = branch_checked_out(name))) {
error(_("Cannot delete branch '%s' "
"checked out at '%s'"),
- bname.buf, wt->path);
+ bname.buf, path);
ret = 1;
continue;
}
@@ -315,7 +311,6 @@ static int delete_branches(int argc, const char **argv, int force, int kinds,
free(name);
strbuf_release(&bname);
- free_worktrees(worktrees);
return ret;
}
diff --git a/builtin/cat-file.c b/builtin/cat-file.c
index 50cf389..f42782e 100644
--- a/builtin/cat-file.c
+++ b/builtin/cat-file.c
@@ -71,6 +71,7 @@ static int stream_blob(const struct object_id *oid)
static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
int unknown_type)
{
+ int ret;
struct object_id oid;
enum object_type type;
char *buf;
@@ -106,7 +107,8 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
if (sb.len) {
printf("%s\n", sb.buf);
strbuf_release(&sb);
- return 0;
+ ret = 0;
+ goto cleanup;
}
break;
@@ -115,7 +117,8 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
if (oid_object_info_extended(the_repository, &oid, &oi, flags) < 0)
die("git cat-file: could not get object info");
printf("%"PRIuMAX"\n", (uintmax_t)size);
- return 0;
+ ret = 0;
+ goto cleanup;
case 'e':
return !has_object_file(&oid);
@@ -123,8 +126,10 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
case 'w':
if (filter_object(path, obj_context.mode,
- &oid, &buf, &size))
- return -1;
+ &oid, &buf, &size)) {
+ ret = -1;
+ goto cleanup;
+ }
break;
case 'c':
@@ -143,11 +148,14 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
const char *ls_args[3] = { NULL };
ls_args[0] = "ls-tree";
ls_args[1] = obj_name;
- return cmd_ls_tree(2, ls_args, NULL);
+ ret = cmd_ls_tree(2, ls_args, NULL);
+ goto cleanup;
}
- if (type == OBJ_BLOB)
- return stream_blob(&oid);
+ if (type == OBJ_BLOB) {
+ ret = stream_blob(&oid);
+ goto cleanup;
+ }
buf = read_object_file(&oid, &type, &size);
if (!buf)
die("Cannot read object %s", obj_name);
@@ -172,8 +180,10 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
} else
oidcpy(&blob_oid, &oid);
- if (oid_object_info(the_repository, &blob_oid, NULL) == OBJ_BLOB)
- return stream_blob(&blob_oid);
+ if (oid_object_info(the_repository, &blob_oid, NULL) == OBJ_BLOB) {
+ ret = stream_blob(&blob_oid);
+ goto cleanup;
+ }
/*
* we attempted to dereference a tag to a blob
* and failed; there may be new dereference
@@ -193,9 +203,11 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
die("git cat-file %s: bad file", obj_name);
write_or_die(1, buf, size);
+ ret = 0;
+cleanup:
free(buf);
free(obj_context.path);
- return 0;
+ return ret;
}
struct expand_data {
@@ -655,6 +667,7 @@ static void batch_objects_command(struct batch_options *opt,
free_cmds(queued_cmd, &nr);
}
+ free_cmds(queued_cmd, &nr);
free(queued_cmd);
strbuf_release(&input);
}
diff --git a/builtin/check-ref-format.c b/builtin/check-ref-format.c
index bc67d3f..fd0e5f8 100644
--- a/builtin/check-ref-format.c
+++ b/builtin/check-ref-format.c
@@ -57,6 +57,8 @@ int cmd_check_ref_format(int argc, const char **argv, const char *prefix)
int normalize = 0;
int flags = 0;
const char *refname;
+ char *to_free = NULL;
+ int ret = 1;
if (argc == 2 && !strcmp(argv[1], "-h"))
usage(builtin_check_ref_format_usage);
@@ -81,11 +83,14 @@ int cmd_check_ref_format(int argc, const char **argv, const char *prefix)
refname = argv[i];
if (normalize)
- refname = collapse_slashes(refname);
+ refname = to_free = collapse_slashes(refname);
if (check_refname_format(refname, flags))
- return 1;
+ goto cleanup;
if (normalize)
printf("%s\n", refname);
- return 0;
+ ret = 0;
+cleanup:
+ free(to_free);
+ return ret;
}
diff --git a/builtin/checkout.c b/builtin/checkout.c
index 7976814..29c74f8 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -417,7 +417,7 @@ static int checkout_worktree(const struct checkout_opts *opts,
mem_pool_discard(&ce_mem_pool, should_validate_cache_entries());
remove_marked_cache_entries(&the_index, 1);
remove_scheduled_dirs();
- errs |= finish_delayed_checkout(&state, &nr_checkouts, opts->show_progress);
+ errs |= finish_delayed_checkout(&state, opts->show_progress);
if (opts->count_checkout_paths) {
if (nr_unmerged)
@@ -629,7 +629,7 @@ static void show_local_changes(struct object *head,
diff_setup_done(&rev.diffopt);
add_pending_object(&rev, head, NULL);
run_diff_index(&rev, 0);
- object_array_clear(&rev.pending);
+ release_revisions(&rev);
}
static void describe_detached_head(const char *msg, struct commit *commit)
@@ -710,6 +710,26 @@ static void setup_branch_path(struct branch_info *branch)
branch->path = strbuf_detach(&buf, NULL);
}
+static void init_topts(struct unpack_trees_options *topts, int merge,
+ int show_progress, int overwrite_ignore,
+ struct commit *old_commit)
+{
+ memset(topts, 0, sizeof(*topts));
+ topts->head_idx = -1;
+ topts->src_index = &the_index;
+ topts->dst_index = &the_index;
+
+ setup_unpack_trees_porcelain(topts, "checkout");
+
+ topts->initial_checkout = is_cache_unborn();
+ topts->update = 1;
+ topts->merge = 1;
+ topts->quiet = merge && old_commit;
+ topts->verbose_update = show_progress;
+ topts->fn = twoway_merge;
+ topts->preserve_ignored = !overwrite_ignore;
+}
+
static int merge_working_tree(const struct checkout_opts *opts,
struct branch_info *old_branch_info,
struct branch_info *new_branch_info,
@@ -740,13 +760,6 @@ static int merge_working_tree(const struct checkout_opts *opts,
struct unpack_trees_options topts;
const struct object_id *old_commit_oid;
- memset(&topts, 0, sizeof(topts));
- topts.head_idx = -1;
- topts.src_index = &the_index;
- topts.dst_index = &the_index;
-
- setup_unpack_trees_porcelain(&topts, "checkout");
-
refresh_cache(REFRESH_QUIET);
if (unmerged_cache()) {
@@ -755,17 +768,12 @@ static int merge_working_tree(const struct checkout_opts *opts,
}
/* 2-way merge to the new branch */
- topts.initial_checkout = is_cache_unborn();
- topts.update = 1;
- topts.merge = 1;
- topts.quiet = opts->merge && old_branch_info->commit;
- topts.verbose_update = opts->show_progress;
- topts.fn = twoway_merge;
+ init_topts(&topts, opts->merge, opts->show_progress,
+ opts->overwrite_ignore, old_branch_info->commit);
init_checkout_metadata(&topts.meta, new_branch_info->refname,
new_branch_info->commit ?
&new_branch_info->commit->object.oid :
&new_branch_info->oid, NULL);
- topts.preserve_ignored = !opts->overwrite_ignore;
old_commit_oid = old_branch_info->commit ?
&old_branch_info->commit->object.oid :
@@ -834,7 +842,7 @@ static int merge_working_tree(const struct checkout_opts *opts,
if (ret)
return ret;
o.ancestor = old_branch_info->name;
- if (old_branch_info->name == NULL) {
+ if (!old_branch_info->name) {
strbuf_add_unique_abbrev(&old_commit_shortname,
&old_branch_info->commit->object.oid,
DEFAULT_ABBREV);
@@ -1082,6 +1090,7 @@ static void orphaned_commit_warning(struct commit *old_commit, struct commit *ne
/* Clean up objects used, as they will be reused. */
repo_clear_commit_marks(the_repository, ALL_REV_FLAGS);
+ release_revisions(&revs);
}
static int switch_branches(const struct checkout_opts *opts,
diff --git a/builtin/clone.c b/builtin/clone.c
index 5231656..c4ff464 100644
--- a/builtin/clone.c
+++ b/builtin/clone.c
@@ -494,6 +494,7 @@ static struct ref *wanted_peer_refs(const struct ref *refs,
/* if --branch=tag, pull the requested tag explicitly */
get_fetch_map(remote_head, tag_refspec, &tail, 0);
}
+ free_refs(remote_head);
} else {
int i;
for (i = 0; i < refspec->nr; i++)
@@ -606,7 +607,7 @@ static void update_remote_refs(const struct ref *refs,
}
static void update_head(const struct ref *our, const struct ref *remote,
- const char *msg)
+ const char *unborn, const char *msg)
{
const char *head;
if (our && skip_prefix(our->name, "refs/heads/", &head)) {
@@ -632,6 +633,15 @@ static void update_head(const struct ref *our, const struct ref *remote,
*/
update_ref(msg, "HEAD", &remote->old_oid, NULL, REF_NO_DEREF,
UPDATE_REFS_DIE_ON_ERR);
+ } else if (unborn && skip_prefix(unborn, "refs/heads/", &head)) {
+ /*
+ * Unborn head from remote; same as "our" case above except
+ * that we have no ref to update.
+ */
+ if (create_symref("HEAD", unborn, NULL) < 0)
+ die(_("unable to update HEAD"));
+ if (!option_bare)
+ install_branch_config(0, head, remote_name, unborn);
}
}
@@ -672,7 +682,7 @@ static int checkout(int submodule_progress, int filter_submodules)
head = resolve_refdup("HEAD", RESOLVE_REF_READING, &oid, NULL);
if (!head) {
warning(_("remote HEAD refers to nonexistent ref, "
- "unable to checkout.\n"));
+ "unable to checkout"));
return 0;
}
if (!strcmp(head, "HEAD")) {
@@ -876,6 +886,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
const struct ref *refs, *remote_head;
struct ref *remote_head_points_at = NULL;
const struct ref *our_head_points_at;
+ char *unborn_head = NULL;
struct ref *mapped_refs = NULL;
const struct ref *ref;
struct strbuf key = STRBUF_INIT;
@@ -1106,10 +1117,12 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
* apply the remote name provided by --origin only after this second
* call to git_config, to ensure it overrides all config-based values.
*/
- if (option_origin != NULL)
+ if (option_origin) {
+ free(remote_name);
remote_name = xstrdup(option_origin);
+ }
- if (remote_name == NULL)
+ if (!remote_name)
remote_name = xstrdup("origin");
if (!valid_remote_name(remote_name))
@@ -1264,51 +1277,49 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
if (transport_fetch_refs(transport, mapped_refs))
die(_("remote transport reported error"));
}
-
- remote_head = find_ref_by_name(refs, "HEAD");
- remote_head_points_at =
- guess_remote_head(remote_head, mapped_refs, 0);
-
- if (option_branch) {
- our_head_points_at =
- find_remote_branch(mapped_refs, option_branch);
-
- if (!our_head_points_at)
- die(_("Remote branch %s not found in upstream %s"),
- option_branch, remote_name);
- }
- else
- our_head_points_at = remote_head_points_at;
}
- else {
- const char *branch;
- const char *ref;
- char *ref_free = NULL;
- if (option_branch)
- die(_("Remote branch %s not found in upstream %s"),
- option_branch, remote_name);
+ remote_head = find_ref_by_name(refs, "HEAD");
+ remote_head_points_at = guess_remote_head(remote_head, mapped_refs, 0);
- warning(_("You appear to have cloned an empty repository."));
+ if (option_branch) {
+ our_head_points_at = find_remote_branch(mapped_refs, option_branch);
+ if (!our_head_points_at)
+ die(_("Remote branch %s not found in upstream %s"),
+ option_branch, remote_name);
+ } else if (remote_head_points_at) {
+ our_head_points_at = remote_head_points_at;
+ } else if (remote_head) {
our_head_points_at = NULL;
- remote_head_points_at = NULL;
- remote_head = NULL;
- option_no_checkout = 1;
+ } else {
+ const char *branch;
+
+ if (!mapped_refs) {
+ warning(_("You appear to have cloned an empty repository."));
+ option_no_checkout = 1;
+ }
if (transport_ls_refs_options.unborn_head_target &&
skip_prefix(transport_ls_refs_options.unborn_head_target,
"refs/heads/", &branch)) {
- ref = transport_ls_refs_options.unborn_head_target;
- create_symref("HEAD", ref, reflog_msg.buf);
+ unborn_head = xstrdup(transport_ls_refs_options.unborn_head_target);
} else {
branch = git_default_branch_name(0);
- ref_free = xstrfmt("refs/heads/%s", branch);
- ref = ref_free;
+ unborn_head = xstrfmt("refs/heads/%s", branch);
}
- if (!option_bare)
- install_branch_config(0, branch, remote_name, ref);
- free(ref_free);
+ /*
+ * We may have selected a local default branch name "foo",
+ * and even though the remote's HEAD does not point there,
+ * it may still have a "foo" branch. If so, set it up so
+ * that we can follow the usual checkout code later.
+ *
+ * Note that for an empty repo we'll already have set
+ * option_no_checkout above, which would work against us here.
+ * But for an empty repo, find_remote_branch() can never find
+ * a match.
+ */
+ our_head_points_at = find_remote_branch(mapped_refs, branch);
}
write_refspec_config(src_ref_prefix, our_head_points_at,
@@ -1328,7 +1339,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
branch_top.buf, reflog_msg.buf, transport,
!is_local);
- update_head(our_head_points_at, remote_head, reflog_msg.buf);
+ update_head(our_head_points_at, remote_head, unborn_head, reflog_msg.buf);
/*
* We want to show progress for recursive submodule clones iff
@@ -1355,6 +1366,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
strbuf_release(&key);
free_refs(mapped_refs);
free_refs(remote_head_points_at);
+ free(unborn_head);
free(dir);
free(path);
UNLEAK(repo);
diff --git a/builtin/commit.c b/builtin/commit.c
index 009a1de..fcf9c85 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -861,7 +861,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
}
s->fp = fopen_for_writing(git_path_commit_editmsg());
- if (s->fp == NULL)
+ if (!s->fp)
die_errno(_("could not open '%s'"), git_path_commit_editmsg());
/* Ignore status.displayCommentPrefix: we do need comments in COMMIT_EDITMSG. */
@@ -1100,7 +1100,6 @@ static const char *find_author_by_nickname(const char *name)
struct rev_info revs;
struct commit *commit;
struct strbuf buf = STRBUF_INIT;
- struct string_list mailmap = STRING_LIST_INIT_NODUP;
const char *av[20];
int ac = 0;
@@ -1111,7 +1110,8 @@ static const char *find_author_by_nickname(const char *name)
av[++ac] = buf.buf;
av[++ac] = NULL;
setup_revisions(ac, av, &revs, NULL);
- revs.mailmap = &mailmap;
+ revs.mailmap = xmalloc(sizeof(struct string_list));
+ string_list_init_nodup(revs.mailmap);
read_mailmap(revs.mailmap);
if (prepare_revision_walk(&revs))
@@ -1122,7 +1122,7 @@ static const char *find_author_by_nickname(const char *name)
ctx.date_mode.type = DATE_NORMAL;
strbuf_release(&buf);
format_commit_message(commit, "%aN <%aE>", &buf, &ctx);
- clear_mailmap(&mailmap);
+ release_revisions(&revs);
return strbuf_detach(&buf, NULL);
}
die(_("--author '%s' is not 'Name <email>' and matches no existing author"), name);
@@ -1687,6 +1687,7 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
struct commit *current_head = NULL;
struct commit_extra_header *extra = NULL;
struct strbuf err = STRBUF_INIT;
+ int ret = 0;
if (argc == 2 && !strcmp(argv[1], "-h"))
usage_with_options(builtin_commit_usage, builtin_commit_options);
@@ -1721,8 +1722,9 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
running hooks, writing the trees, and interacting with the user. */
if (!prepare_to_commit(index_file, prefix,
current_head, &s, &author_ident)) {
+ ret = 1;
rollback_index_files();
- return 1;
+ goto cleanup;
}
/* Determine parents */
@@ -1820,7 +1822,6 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
rollback_index_files();
die(_("failed to write commit object"));
}
- strbuf_release(&author_ident);
free_commit_extra_headers(extra);
if (update_head_with_reflog(current_head, &oid, reflog_msg, &sb,
@@ -1862,7 +1863,9 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
apply_autostash(git_path_merge_autostash(the_repository));
+cleanup:
+ UNLEAK(author_ident);
UNLEAK(err);
UNLEAK(sb);
- return 0;
+ return ret;
}
diff --git a/builtin/describe.c b/builtin/describe.c
index 42159cd..a76f1a1 100644
--- a/builtin/describe.c
+++ b/builtin/describe.c
@@ -517,6 +517,7 @@ static void describe_blob(struct object_id oid, struct strbuf *dst)
traverse_commit_list(&revs, process_commit, process_object, &pcd);
reset_revision_walk();
+ release_revisions(&revs);
}
static void describe(const char *arg, int last_one)
@@ -667,6 +668,7 @@ int cmd_describe(int argc, const char **argv, const char *prefix)
suffix = NULL;
else
suffix = dirty;
+ release_revisions(&revs);
}
describe("HEAD", 1);
} else if (dirty) {
diff --git a/builtin/diff-files.c b/builtin/diff-files.c
index 70103c4..92cf6e1 100644
--- a/builtin/diff-files.c
+++ b/builtin/diff-files.c
@@ -77,8 +77,12 @@ int cmd_diff_files(int argc, const char **argv, const char *prefix)
if (read_cache_preload(&rev.diffopt.pathspec) < 0) {
perror("read_cache_preload");
- return -1;
+ result = -1;
+ goto cleanup;
}
result = run_diff_files(&rev, options);
- return diff_result_code(&rev.diffopt, result);
+ result = diff_result_code(&rev.diffopt, result);
+cleanup:
+ release_revisions(&rev);
+ return result;
}
diff --git a/builtin/diff-index.c b/builtin/diff-index.c
index 5fd23ab..7d158af 100644
--- a/builtin/diff-index.c
+++ b/builtin/diff-index.c
@@ -70,6 +70,7 @@ int cmd_diff_index(int argc, const char **argv, const char *prefix)
return -1;
}
result = run_diff_index(&rev, option);
- UNLEAK(rev);
- return diff_result_code(&rev.diffopt, result);
+ result = diff_result_code(&rev.diffopt, result);
+ release_revisions(&rev);
+ return result;
}
diff --git a/builtin/diff-tree.c b/builtin/diff-tree.c
index 0e0ac1f..116097a 100644
--- a/builtin/diff-tree.c
+++ b/builtin/diff-tree.c
@@ -195,6 +195,7 @@ int cmd_diff_tree(int argc, const char **argv, const char *prefix)
int saved_dcctc = 0;
opt->diffopt.rotate_to_strict = 0;
+ opt->diffopt.no_free = 1;
if (opt->diffopt.detect_rename) {
if (!the_index.cache)
repo_read_index(the_repository);
@@ -217,6 +218,8 @@ int cmd_diff_tree(int argc, const char **argv, const char *prefix)
}
opt->diffopt.degraded_cc_to_c = saved_dcctc;
opt->diffopt.needed_rename_limit = saved_nrl;
+ opt->diffopt.no_free = 0;
+ diff_free(&opt->diffopt);
}
return diff_result_code(&opt->diffopt, 0);
diff --git a/builtin/diff.c b/builtin/diff.c
index bb7fafc..54bb3de 100644
--- a/builtin/diff.c
+++ b/builtin/diff.c
@@ -352,7 +352,7 @@ static void symdiff_prepare(struct rev_info *rev, struct symdiff *sym)
othercount++;
continue;
}
- if (map == NULL)
+ if (!map)
map = bitmap_new();
bitmap_set(map, i);
}
@@ -594,7 +594,7 @@ int cmd_diff(int argc, const char **argv, const char *prefix)
result = diff_result_code(&rev.diffopt, result);
if (1 < rev.diffopt.skip_stat_unmatch)
refresh_index_quietly();
- UNLEAK(rev);
+ release_revisions(&rev);
UNLEAK(ent);
UNLEAK(blob);
return result;
diff --git a/builtin/difftool.c b/builtin/difftool.c
index faa3507..b3c509b 100644
--- a/builtin/difftool.c
+++ b/builtin/difftool.c
@@ -217,7 +217,7 @@ static void changed_files(struct hashmap *result, const char *index_path,
update_index.use_shell = 0;
update_index.clean_on_exit = 1;
update_index.dir = workdir;
- strvec_pushf(&update_index.env_array, "GIT_INDEX_FILE=%s", index_path);
+ strvec_pushf(&update_index.env, "GIT_INDEX_FILE=%s", index_path);
/* Ignore any errors of update-index */
run_command(&update_index);
@@ -230,7 +230,7 @@ static void changed_files(struct hashmap *result, const char *index_path,
diff_files.clean_on_exit = 1;
diff_files.out = -1;
diff_files.dir = workdir;
- strvec_pushf(&diff_files.env_array, "GIT_INDEX_FILE=%s", index_path);
+ strvec_pushf(&diff_files.env, "GIT_INDEX_FILE=%s", index_path);
if (start_command(&diff_files))
die("could not obtain raw diff");
fp = xfdopen(diff_files.out, "r");
@@ -675,7 +675,7 @@ static int run_file_diff(int prompt, const char *prefix,
child->git_cmd = 1;
child->dir = prefix;
- strvec_pushv(&child->env_array, env);
+ strvec_pushv(&child->env, env);
return run_command(child);
}
diff --git a/builtin/fast-export.c b/builtin/fast-export.c
index a7d7269..e1748fb 100644
--- a/builtin/fast-export.c
+++ b/builtin/fast-export.c
@@ -1261,6 +1261,7 @@ int cmd_fast_export(int argc, const char **argv, const char *prefix)
revs.diffopt.format_callback = show_filemodify;
revs.diffopt.format_callback_data = &paths_of_changed_objects;
revs.diffopt.flags.recursive = 1;
+ revs.diffopt.no_free = 1;
while ((commit = get_revision(&revs)))
handle_commit(commit, &revs, &paths_of_changed_objects);
@@ -1275,6 +1276,7 @@ int cmd_fast_export(int argc, const char **argv, const char *prefix)
printf("done\n");
refspec_clear(&refspecs);
+ release_revisions(&revs);
return 0;
}
diff --git a/builtin/fast-import.c b/builtin/fast-import.c
index 28d3193..14113cf 100644
--- a/builtin/fast-import.c
+++ b/builtin/fast-import.c
@@ -3465,7 +3465,7 @@ static void git_pack_config(void)
pack_idx_opts.version = indexversion_value;
if (pack_idx_opts.version > 2)
git_die_config("pack.indexversion",
- "bad pack.indexversion=%"PRIu32, pack_idx_opts.version);
+ "bad pack.indexVersion=%"PRIu32, pack_idx_opts.version);
}
if (!git_config_get_ulong("pack.packsizelimit", &packsizelimit_value))
max_packsize = packsizelimit_value;
diff --git a/builtin/fetch-pack.c b/builtin/fetch-pack.c
index c2d96f4..f045bbb 100644
--- a/builtin/fetch-pack.c
+++ b/builtin/fetch-pack.c
@@ -153,11 +153,15 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
args.from_promisor = 1;
continue;
}
- if (skip_prefix(arg, ("--" CL_ARG__FILTER "="), &arg)) {
+ if (!strcmp("--refetch", arg)) {
+ args.refetch = 1;
+ continue;
+ }
+ if (skip_prefix(arg, ("--filter="), &arg)) {
parse_list_objects_filter(&args.filter_options, arg);
continue;
}
- if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) {
+ if (!strcmp(arg, ("--no-filter"))) {
list_objects_filter_set_no_filter(&args.filter_options);
continue;
}
diff --git a/builtin/fetch.c b/builtin/fetch.c
index 9b4018f..fc5cecb 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -59,7 +59,7 @@ static int prune_tags = -1; /* unspecified */
static int all, append, dry_run, force, keep, multiple, update_head_ok;
static int write_fetch_head = 1;
-static int verbosity, deepen_relative, set_upstream;
+static int verbosity, deepen_relative, set_upstream, refetch;
static int progress = -1;
static int enable_auto_gc = 1;
static int tags = TAGS_DEFAULT, unshallow, update_shallow, deepen;
@@ -190,6 +190,9 @@ static struct option builtin_fetch_options[] = {
OPT_SET_INT_F(0, "unshallow", &unshallow,
N_("convert to a complete repository"),
1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "refetch", &refetch,
+ N_("re-fetch without negotiating common commits"),
+ 1, PARSE_OPT_NONEG),
{ OPTION_STRING, 0, "submodule-prefix", &submodule_prefix, N_("dir"),
N_("prepend this to submodule path output"), PARSE_OPT_HIDDEN },
OPT_CALLBACK_F(0, "recurse-submodules-default",
@@ -878,11 +881,9 @@ static void format_display(struct strbuf *display, char code,
static int update_local_ref(struct ref *ref,
struct ref_transaction *transaction,
const char *remote, const struct ref *remote_ref,
- struct strbuf *display, int summary_width,
- struct worktree **worktrees)
+ struct strbuf *display, int summary_width)
{
struct commit *current = NULL, *updated;
- const struct worktree *wt;
const char *pretty_ref = prettify_refname(ref->name);
int fast_forward = 0;
@@ -897,16 +898,14 @@ static int update_local_ref(struct ref *ref,
}
if (!update_head_ok &&
- (wt = find_shared_symref(worktrees, "HEAD", ref->name)) &&
- !wt->is_bare && !is_null_oid(&ref->old_oid)) {
+ !is_null_oid(&ref->old_oid) &&
+ branch_checked_out(ref->name)) {
/*
* If this is the head, and it's not okay to update
* the head, and the old value of the head isn't empty...
*/
format_display(display, '!', _("[rejected]"),
- wt->is_current ?
- _("can't fetch in current branch") :
- _("checked out in another worktree"),
+ _("can't fetch into checked-out branch"),
remote, pretty_ref, summary_width);
return 1;
}
@@ -1107,10 +1106,10 @@ N_("it took %.2f seconds to check forced updates; you can use\n"
static int store_updated_refs(const char *raw_url, const char *remote_name,
int connectivity_checked,
struct ref_transaction *transaction, struct ref *ref_map,
- struct fetch_head *fetch_head, struct worktree **worktrees)
+ struct fetch_head *fetch_head)
{
int url_len, i, rc = 0;
- struct strbuf note = STRBUF_INIT, err = STRBUF_INIT;
+ struct strbuf note = STRBUF_INIT;
const char *what, *kind;
struct ref *rm;
char *url;
@@ -1237,8 +1236,7 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
strbuf_reset(&note);
if (ref) {
rc |= update_local_ref(ref, transaction, what,
- rm, &note, summary_width,
- worktrees);
+ rm, &note, summary_width);
free(ref);
} else if (write_fetch_head || dry_run) {
/*
@@ -1278,7 +1276,6 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
abort:
strbuf_release(&note);
- strbuf_release(&err);
free(url);
return rc;
}
@@ -1305,6 +1302,14 @@ static int check_exist_and_connected(struct ref *ref_map)
return -1;
/*
+ * Similarly, if we need to refetch, we always want to perform a full
+ * fetch ignoring existing objects.
+ */
+ if (refetch)
+ return -1;
+
+
+ /*
* check_connected() allows objects to merely be promised, but
* we need all direct targets to exist.
*/
@@ -1321,8 +1326,7 @@ static int check_exist_and_connected(struct ref *ref_map)
static int fetch_and_consume_refs(struct transport *transport,
struct ref_transaction *transaction,
struct ref *ref_map,
- struct fetch_head *fetch_head,
- struct worktree **worktrees)
+ struct fetch_head *fetch_head)
{
int connectivity_checked = 1;
int ret;
@@ -1345,7 +1349,7 @@ static int fetch_and_consume_refs(struct transport *transport,
trace2_region_enter("fetch", "consume_refs", the_repository);
ret = store_updated_refs(transport->url, transport->remote->name,
connectivity_checked, transaction, ref_map,
- fetch_head, worktrees);
+ fetch_head);
trace2_region_leave("fetch", "consume_refs", the_repository);
out:
@@ -1423,18 +1427,16 @@ cleanup:
return result;
}
-static void check_not_current_branch(struct ref *ref_map,
- struct worktree **worktrees)
+static void check_not_current_branch(struct ref *ref_map)
{
- const struct worktree *wt;
+ const char *path;
for (; ref_map; ref_map = ref_map->next)
if (ref_map->peer_ref &&
- (wt = find_shared_symref(worktrees, "HEAD",
- ref_map->peer_ref->name)) &&
- !wt->is_bare)
+ starts_with(ref_map->peer_ref->name, "refs/heads/") &&
+ (path = branch_checked_out(ref_map->peer_ref->name)))
die(_("refusing to fetch into branch '%s' "
"checked out at '%s'"),
- ref_map->peer_ref->name, wt->path);
+ ref_map->peer_ref->name, path);
}
static int truncate_fetch_head(void)
@@ -1517,6 +1519,8 @@ static struct transport *prepare_transport(struct remote *remote, int deepen)
set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, "yes");
if (update_shallow)
set_option(transport, TRANS_OPT_UPDATE_SHALLOW, "yes");
+ if (refetch)
+ set_option(transport, TRANS_OPT_REFETCH, "yes");
if (filter_options.choice) {
const char *spec =
expand_list_objects_filter_spec(&filter_options);
@@ -1535,8 +1539,7 @@ static struct transport *prepare_transport(struct remote *remote, int deepen)
static int backfill_tags(struct transport *transport,
struct ref_transaction *transaction,
struct ref *ref_map,
- struct fetch_head *fetch_head,
- struct worktree **worktrees)
+ struct fetch_head *fetch_head)
{
int retcode, cannot_reuse;
@@ -1557,7 +1560,7 @@ static int backfill_tags(struct transport *transport,
transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, NULL);
transport_set_option(transport, TRANS_OPT_DEPTH, "0");
transport_set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, NULL);
- retcode = fetch_and_consume_refs(transport, transaction, ref_map, fetch_head, worktrees);
+ retcode = fetch_and_consume_refs(transport, transaction, ref_map, fetch_head);
if (gsecondary) {
transport_disconnect(gsecondary);
@@ -1578,7 +1581,6 @@ static int do_fetch(struct transport *transport,
struct transport_ls_refs_options transport_ls_refs_options =
TRANSPORT_LS_REFS_OPTIONS_INIT;
int must_list_refs = 1;
- struct worktree **worktrees = get_worktrees();
struct fetch_head fetch_head = { 0 };
struct strbuf err = STRBUF_INIT;
@@ -1636,7 +1638,7 @@ static int do_fetch(struct transport *transport,
ref_map = get_ref_map(transport->remote, remote_refs, rs,
tags, &autotags);
if (!update_head_ok)
- check_not_current_branch(ref_map, worktrees);
+ check_not_current_branch(ref_map);
retcode = open_fetch_head(&fetch_head);
if (retcode)
@@ -1669,7 +1671,7 @@ static int do_fetch(struct transport *transport,
retcode = 1;
}
- if (fetch_and_consume_refs(transport, transaction, ref_map, &fetch_head, worktrees)) {
+ if (fetch_and_consume_refs(transport, transaction, ref_map, &fetch_head)) {
retcode = 1;
goto cleanup;
}
@@ -1692,7 +1694,7 @@ static int do_fetch(struct transport *transport,
* the transaction and don't commit anything.
*/
if (backfill_tags(transport, transaction, tags_ref_map,
- &fetch_head, worktrees))
+ &fetch_head))
retcode = 1;
}
@@ -1777,7 +1779,6 @@ cleanup:
close_fetch_head(&fetch_head);
strbuf_release(&err);
free_refs(ref_map);
- free_worktrees(worktrees);
return retcode;
}
@@ -2174,6 +2175,10 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
else if (argc > 1)
die(_("fetch --all does not make sense with refspecs"));
(void) for_each_remote(get_one_remote_for_fetch, &list);
+
+ /* do not do fetch_multiple() of one */
+ if (list.nr == 1)
+ remote = remote_get(list.items[0].string);
} else if (argc == 0) {
/* No arguments -- use default remote */
remote = remote_get(NULL);
@@ -2248,7 +2253,17 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
result = fetch_multiple(&list, max_children);
}
- if (!result && (recurse_submodules != RECURSE_SUBMODULES_OFF)) {
+
+ /*
+ * This is only needed after fetch_one(), which does not fetch
+ * submodules by itself.
+ *
+ * When we fetch from multiple remotes, fetch_multiple() has
+ * already updated submodules to grab commits necessary for
+ * the fetched history from each remote, so there is no need
+ * to fetch submodules from here.
+ */
+ if (!result && remote && (recurse_submodules != RECURSE_SUBMODULES_OFF)) {
struct strvec options = STRVEC_INIT;
int max_children = max_jobs;
@@ -2293,8 +2308,25 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
NULL);
}
- if (enable_auto_gc)
+ if (enable_auto_gc) {
+ if (refetch) {
+ /*
+ * Hint auto-maintenance strongly to encourage repacking,
+ * but respect config settings disabling it.
+ */
+ int opt_val;
+ if (git_config_get_int("gc.autopacklimit", &opt_val))
+ opt_val = -1;
+ if (opt_val != 0)
+ git_config_push_parameter("gc.autoPackLimit=1");
+
+ if (git_config_get_int("maintenance.incremental-repack.auto", &opt_val))
+ opt_val = -1;
+ if (opt_val != 0)
+ git_config_push_parameter("maintenance.incremental-repack.auto=-1");
+ }
run_auto_maintenance(verbosity < 0);
+ }
cleanup:
string_list_clear(&list, 0);
diff --git a/builtin/fsck.c b/builtin/fsck.c
index 9e54892..6c73092 100644
--- a/builtin/fsck.c
+++ b/builtin/fsck.c
@@ -19,6 +19,7 @@
#include "decorate.h"
#include "packfile.h"
#include "object-store.h"
+#include "resolve-undo.h"
#include "run-command.h"
#include "worktree.h"
@@ -757,6 +758,43 @@ static int fsck_cache_tree(struct cache_tree *it)
return err;
}
+static int fsck_resolve_undo(struct index_state *istate)
+{
+ struct string_list_item *item;
+ struct string_list *resolve_undo = istate->resolve_undo;
+
+ if (!resolve_undo)
+ return 0;
+
+ for_each_string_list_item(item, resolve_undo) {
+ const char *path = item->string;
+ struct resolve_undo_info *ru = item->util;
+ int i;
+
+ if (!ru)
+ continue;
+ for (i = 0; i < 3; i++) {
+ struct object *obj;
+
+ if (!ru->mode[i] || !S_ISREG(ru->mode[i]))
+ continue;
+
+ obj = parse_object(the_repository, &ru->oid[i]);
+ if (!obj) {
+ error(_("%s: invalid sha1 pointer in resolve-undo"),
+ oid_to_hex(&ru->oid[i]));
+ errors_found |= ERROR_REFS;
+ continue;
+ }
+ obj->flags |= USED;
+ fsck_put_object_name(&fsck_walk_options, &ru->oid[i],
+ ":(%d):%s", i, path);
+ mark_object_reachable(obj);
+ }
+ }
+ return 0;
+}
+
static void mark_object_for_connectivity(const struct object_id *oid)
{
struct object *obj = lookup_unknown_object(the_repository, oid);
@@ -938,6 +976,7 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
}
if (active_cache_tree)
fsck_cache_tree(active_cache_tree);
+ fsck_resolve_undo(&the_index);
}
check_connectivity();
diff --git a/builtin/fsmonitor--daemon.c b/builtin/fsmonitor--daemon.c
new file mode 100644
index 0000000..2c109cf
--- /dev/null
+++ b/builtin/fsmonitor--daemon.c
@@ -0,0 +1,1577 @@
+#include "builtin.h"
+#include "config.h"
+#include "parse-options.h"
+#include "fsmonitor.h"
+#include "fsmonitor-ipc.h"
+#include "compat/fsmonitor/fsm-health.h"
+#include "compat/fsmonitor/fsm-listen.h"
+#include "fsmonitor--daemon.h"
+#include "simple-ipc.h"
+#include "khash.h"
+#include "pkt-line.h"
+
+static const char * const builtin_fsmonitor__daemon_usage[] = {
+ N_("git fsmonitor--daemon start [<options>]"),
+ N_("git fsmonitor--daemon run [<options>]"),
+ N_("git fsmonitor--daemon stop"),
+ N_("git fsmonitor--daemon status"),
+ NULL
+};
+
+#ifdef HAVE_FSMONITOR_DAEMON_BACKEND
+/*
+ * Global state loaded from config.
+ */
+#define FSMONITOR__IPC_THREADS "fsmonitor.ipcthreads"
+static int fsmonitor__ipc_threads = 8;
+
+#define FSMONITOR__START_TIMEOUT "fsmonitor.starttimeout"
+static int fsmonitor__start_timeout_sec = 60;
+
+#define FSMONITOR__ANNOUNCE_STARTUP "fsmonitor.announcestartup"
+static int fsmonitor__announce_startup = 0;
+
+static int fsmonitor_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, FSMONITOR__IPC_THREADS)) {
+ int i = git_config_int(var, value);
+ if (i < 1)
+ return error(_("value of '%s' out of range: %d"),
+ FSMONITOR__IPC_THREADS, i);
+ fsmonitor__ipc_threads = i;
+ return 0;
+ }
+
+ if (!strcmp(var, FSMONITOR__START_TIMEOUT)) {
+ int i = git_config_int(var, value);
+ if (i < 0)
+ return error(_("value of '%s' out of range: %d"),
+ FSMONITOR__START_TIMEOUT, i);
+ fsmonitor__start_timeout_sec = i;
+ return 0;
+ }
+
+ if (!strcmp(var, FSMONITOR__ANNOUNCE_STARTUP)) {
+ int is_bool;
+ int i = git_config_bool_or_int(var, value, &is_bool);
+ if (i < 0)
+ return error(_("value of '%s' not bool or int: %d"),
+ var, i);
+ fsmonitor__announce_startup = i;
+ return 0;
+ }
+
+ return git_default_config(var, value, cb);
+}
+
+/*
+ * Acting as a CLIENT.
+ *
+ * Send a "quit" command to the `git-fsmonitor--daemon` (if running)
+ * and wait for it to shutdown.
+ */
+static int do_as_client__send_stop(void)
+{
+ struct strbuf answer = STRBUF_INIT;
+ int ret;
+
+ ret = fsmonitor_ipc__send_command("quit", &answer);
+
+ /* The quit command does not return any response data. */
+ strbuf_release(&answer);
+
+ if (ret)
+ return ret;
+
+ trace2_region_enter("fsm_client", "polling-for-daemon-exit", NULL);
+ while (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
+ sleep_millisec(50);
+ trace2_region_leave("fsm_client", "polling-for-daemon-exit", NULL);
+
+ return 0;
+}
+
+static int do_as_client__status(void)
+{
+ enum ipc_active_state state = fsmonitor_ipc__get_state();
+
+ switch (state) {
+ case IPC_STATE__LISTENING:
+ printf(_("fsmonitor-daemon is watching '%s'\n"),
+ the_repository->worktree);
+ return 0;
+
+ default:
+ printf(_("fsmonitor-daemon is not watching '%s'\n"),
+ the_repository->worktree);
+ return 1;
+ }
+}
+
+enum fsmonitor_cookie_item_result {
+ FCIR_ERROR = -1, /* could not create cookie file ? */
+ FCIR_INIT,
+ FCIR_SEEN,
+ FCIR_ABORT,
+};
+
+struct fsmonitor_cookie_item {
+ struct hashmap_entry entry;
+ char *name;
+ enum fsmonitor_cookie_item_result result;
+};
+
+static int cookies_cmp(const void *data, const struct hashmap_entry *he1,
+ const struct hashmap_entry *he2, const void *keydata)
+{
+ const struct fsmonitor_cookie_item *a =
+ container_of(he1, const struct fsmonitor_cookie_item, entry);
+ const struct fsmonitor_cookie_item *b =
+ container_of(he2, const struct fsmonitor_cookie_item, entry);
+
+ return strcmp(a->name, keydata ? keydata : b->name);
+}
+
+static enum fsmonitor_cookie_item_result with_lock__wait_for_cookie(
+ struct fsmonitor_daemon_state *state)
+{
+ /* assert current thread holding state->main_lock */
+
+ int fd;
+ struct fsmonitor_cookie_item *cookie;
+ struct strbuf cookie_pathname = STRBUF_INIT;
+ struct strbuf cookie_filename = STRBUF_INIT;
+ enum fsmonitor_cookie_item_result result;
+ int my_cookie_seq;
+
+ CALLOC_ARRAY(cookie, 1);
+
+ my_cookie_seq = state->cookie_seq++;
+
+ strbuf_addf(&cookie_filename, "%i-%i", getpid(), my_cookie_seq);
+
+ strbuf_addbuf(&cookie_pathname, &state->path_cookie_prefix);
+ strbuf_addbuf(&cookie_pathname, &cookie_filename);
+
+ cookie->name = strbuf_detach(&cookie_filename, NULL);
+ cookie->result = FCIR_INIT;
+ hashmap_entry_init(&cookie->entry, strhash(cookie->name));
+
+ hashmap_add(&state->cookies, &cookie->entry);
+
+ trace_printf_key(&trace_fsmonitor, "cookie-wait: '%s' '%s'",
+ cookie->name, cookie_pathname.buf);
+
+ /*
+ * Create the cookie file on disk and then wait for a notification
+ * that the listener thread has seen it.
+ */
+ fd = open(cookie_pathname.buf, O_WRONLY | O_CREAT | O_EXCL, 0600);
+ if (fd < 0) {
+ error_errno(_("could not create fsmonitor cookie '%s'"),
+ cookie->name);
+
+ cookie->result = FCIR_ERROR;
+ goto done;
+ }
+
+ /*
+ * Technically, close() and unlink() can fail, but we don't
+ * care here. We only created the file to trigger a watch
+ * event from the FS to know that when we're up to date.
+ */
+ close(fd);
+ unlink(cookie_pathname.buf);
+
+ /*
+ * Technically, this is an infinite wait (well, unless another
+ * thread sends us an abort). I'd like to change this to
+ * use `pthread_cond_timedwait()` and return an error/timeout
+ * and let the caller do the trivial response thing, but we
+ * don't have that routine in our thread-utils.
+ *
+ * After extensive beta testing I'm not really worried about
+ * this. Also note that the above open() and unlink() calls
+ * will cause at least two FS events on that path, so the odds
+ * of getting stuck are pretty slim.
+ */
+ while (cookie->result == FCIR_INIT)
+ pthread_cond_wait(&state->cookies_cond,
+ &state->main_lock);
+
+done:
+ hashmap_remove(&state->cookies, &cookie->entry, NULL);
+
+ result = cookie->result;
+
+ free(cookie->name);
+ free(cookie);
+ strbuf_release(&cookie_pathname);
+
+ return result;
+}
+
+/*
+ * Mark these cookies as _SEEN and wake up the corresponding client threads.
+ */
+static void with_lock__mark_cookies_seen(struct fsmonitor_daemon_state *state,
+ const struct string_list *cookie_names)
+{
+ /* assert current thread holding state->main_lock */
+
+ int k;
+ int nr_seen = 0;
+
+ for (k = 0; k < cookie_names->nr; k++) {
+ struct fsmonitor_cookie_item key;
+ struct fsmonitor_cookie_item *cookie;
+
+ key.name = cookie_names->items[k].string;
+ hashmap_entry_init(&key.entry, strhash(key.name));
+
+ cookie = hashmap_get_entry(&state->cookies, &key, entry, NULL);
+ if (cookie) {
+ trace_printf_key(&trace_fsmonitor, "cookie-seen: '%s'",
+ cookie->name);
+ cookie->result = FCIR_SEEN;
+ nr_seen++;
+ }
+ }
+
+ if (nr_seen)
+ pthread_cond_broadcast(&state->cookies_cond);
+}
+
+/*
+ * Set _ABORT on all pending cookies and wake up all client threads.
+ */
+static void with_lock__abort_all_cookies(struct fsmonitor_daemon_state *state)
+{
+ /* assert current thread holding state->main_lock */
+
+ struct hashmap_iter iter;
+ struct fsmonitor_cookie_item *cookie;
+ int nr_aborted = 0;
+
+ hashmap_for_each_entry(&state->cookies, &iter, cookie, entry) {
+ trace_printf_key(&trace_fsmonitor, "cookie-abort: '%s'",
+ cookie->name);
+ cookie->result = FCIR_ABORT;
+ nr_aborted++;
+ }
+
+ if (nr_aborted)
+ pthread_cond_broadcast(&state->cookies_cond);
+}
+
+/*
+ * Requests to and from a FSMonitor Protocol V2 provider use an opaque
+ * "token" as a virtual timestamp. Clients can request a summary of all
+ * created/deleted/modified files relative to a token. In the response,
+ * clients receive a new token for the next (relative) request.
+ *
+ *
+ * Token Format
+ * ============
+ *
+ * The contents of the token are private and provider-specific.
+ *
+ * For the built-in fsmonitor--daemon, we define a token as follows:
+ *
+ * "builtin" ":" <token_id> ":" <sequence_nr>
+ *
+ * The "builtin" prefix is used as a namespace to avoid conflicts
+ * with other providers (such as Watchman).
+ *
+ * The <token_id> is an arbitrary OPAQUE string, such as a GUID,
+ * UUID, or {timestamp,pid}. It is used to group all filesystem
+ * events that happened while the daemon was monitoring (and in-sync
+ * with the filesystem).
+ *
+ * Unlike FSMonitor Protocol V1, it is not defined as a timestamp
+ * and does not define less-than/greater-than relationships.
+ * (There are too many race conditions to rely on file system
+ * event timestamps.)
+ *
+ * The <sequence_nr> is a simple integer incremented whenever the
+ * daemon needs to make its state public. For example, if 1000 file
+ * system events come in, but no clients have requested the data,
+ * the daemon can continue to accumulate file changes in the same
+ * bin and does not need to advance the sequence number. However,
+ * as soon as a client does arrive, the daemon needs to start a new
+ * bin and increment the sequence number.
+ *
+ * The sequence number serves as the boundary between 2 sets
+ * of bins -- the older ones that the client has already seen
+ * and the newer ones that it hasn't.
+ *
+ * When a new <token_id> is created, the <sequence_nr> is reset to
+ * zero.
+ *
+ *
+ * About Token Ids
+ * ===============
+ *
+ * A new token_id is created:
+ *
+ * [1] each time the daemon is started.
+ *
+ * [2] any time that the daemon must re-sync with the filesystem
+ * (such as when the kernel drops or we miss events on a very
+ * active volume).
+ *
+ * [3] in response to a client "flush" command (for dropped event
+ * testing).
+ *
+ * When a new token_id is created, the daemon is free to discard all
+ * cached filesystem events associated with any previous token_ids.
+ * Events associated with a non-current token_id will never be sent
+ * to a client. A token_id change implicitly means that the daemon
+ * has gap in its event history.
+ *
+ * Therefore, clients that present a token with a stale (non-current)
+ * token_id will always be given a trivial response.
+ */
+struct fsmonitor_token_data {
+ struct strbuf token_id;
+ struct fsmonitor_batch *batch_head;
+ struct fsmonitor_batch *batch_tail;
+ uint64_t client_ref_count;
+};
+
+struct fsmonitor_batch {
+ struct fsmonitor_batch *next;
+ uint64_t batch_seq_nr;
+ const char **interned_paths;
+ size_t nr, alloc;
+ time_t pinned_time;
+};
+
+static struct fsmonitor_token_data *fsmonitor_new_token_data(void)
+{
+ static int test_env_value = -1;
+ static uint64_t flush_count = 0;
+ struct fsmonitor_token_data *token;
+ struct fsmonitor_batch *batch;
+
+ CALLOC_ARRAY(token, 1);
+ batch = fsmonitor_batch__new();
+
+ strbuf_init(&token->token_id, 0);
+ token->batch_head = batch;
+ token->batch_tail = batch;
+ token->client_ref_count = 0;
+
+ if (test_env_value < 0)
+ test_env_value = git_env_bool("GIT_TEST_FSMONITOR_TOKEN", 0);
+
+ if (!test_env_value) {
+ struct timeval tv;
+ struct tm tm;
+ time_t secs;
+
+ gettimeofday(&tv, NULL);
+ secs = tv.tv_sec;
+ gmtime_r(&secs, &tm);
+
+ strbuf_addf(&token->token_id,
+ "%"PRIu64".%d.%4d%02d%02dT%02d%02d%02d.%06ldZ",
+ flush_count++,
+ getpid(),
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec,
+ (long)tv.tv_usec);
+ } else {
+ strbuf_addf(&token->token_id, "test_%08x", test_env_value++);
+ }
+
+ /*
+ * We created a new <token_id> and are starting a new series
+ * of tokens with a zero <seq_nr>.
+ *
+ * Since clients cannot guess our new (non test) <token_id>
+ * they will always receive a trivial response (because of the
+ * mismatch on the <token_id>). The trivial response will
+ * tell them our new <token_id> so that subsequent requests
+ * will be relative to our new series. (And when sending that
+ * response, we pin the current head of the batch list.)
+ *
+ * Even if the client correctly guesses the <token_id>, their
+ * request of "builtin:<token_id>:0" asks for all changes MORE
+ * RECENT than batch/bin 0.
+ *
+ * This implies that it is a waste to accumulate paths in the
+ * initial batch/bin (because they will never be transmitted).
+ *
+ * So the daemon could be running for days and watching the
+ * file system, but doesn't need to actually accumulate any
+ * paths UNTIL we need to set a reference point for a later
+ * relative request.
+ *
+ * However, it is very useful for testing to always have a
+ * reference point set. Pin batch 0 to force early file system
+ * events to accumulate.
+ */
+ if (test_env_value)
+ batch->pinned_time = time(NULL);
+
+ return token;
+}
+
+struct fsmonitor_batch *fsmonitor_batch__new(void)
+{
+ struct fsmonitor_batch *batch;
+
+ CALLOC_ARRAY(batch, 1);
+
+ return batch;
+}
+
+void fsmonitor_batch__free_list(struct fsmonitor_batch *batch)
+{
+ while (batch) {
+ struct fsmonitor_batch *next = batch->next;
+
+ /*
+ * The actual strings within the array of this batch
+ * are interned, so we don't own them. We only own
+ * the array.
+ */
+ free(batch->interned_paths);
+ free(batch);
+
+ batch = next;
+ }
+}
+
+void fsmonitor_batch__add_path(struct fsmonitor_batch *batch,
+ const char *path)
+{
+ const char *interned_path = strintern(path);
+
+ trace_printf_key(&trace_fsmonitor, "event: %s", interned_path);
+
+ ALLOC_GROW(batch->interned_paths, batch->nr + 1, batch->alloc);
+ batch->interned_paths[batch->nr++] = interned_path;
+}
+
+static void fsmonitor_batch__combine(struct fsmonitor_batch *batch_dest,
+ const struct fsmonitor_batch *batch_src)
+{
+ size_t k;
+
+ ALLOC_GROW(batch_dest->interned_paths,
+ batch_dest->nr + batch_src->nr + 1,
+ batch_dest->alloc);
+
+ for (k = 0; k < batch_src->nr; k++)
+ batch_dest->interned_paths[batch_dest->nr++] =
+ batch_src->interned_paths[k];
+}
+
+/*
+ * To keep the batch list from growing unbounded in response to filesystem
+ * activity, we try to truncate old batches from the end of the list as
+ * they become irrelevant.
+ *
+ * We assume that the .git/index will be updated with the most recent token
+ * any time the index is updated. And future commands will only ask for
+ * recent changes *since* that new token. So as tokens advance into the
+ * future, older batch items will never be requested/needed. So we can
+ * truncate them without loss of functionality.
+ *
+ * However, multiple commands may be talking to the daemon concurrently
+ * or perform a slow command, so a little "token skew" is possible.
+ * Therefore, we want this to be a little bit lazy and have a generous
+ * delay.
+ *
+ * The current reader thread walked backwards in time from `token->batch_head`
+ * back to `batch_marker` somewhere in the middle of the batch list.
+ *
+ * Let's walk backwards in time from that marker an arbitrary delay
+ * and truncate the list there. Note that these timestamps are completely
+ * artificial (based on when we pinned the batch item) and not on any
+ * filesystem activity.
+ *
+ * Return the obsolete portion of the list after we have removed it from
+ * the official list so that the caller can free it after leaving the lock.
+ */
+#define MY_TIME_DELAY_SECONDS (5 * 60) /* seconds */
+
+static struct fsmonitor_batch *with_lock__truncate_old_batches(
+ struct fsmonitor_daemon_state *state,
+ const struct fsmonitor_batch *batch_marker)
+{
+ /* assert current thread holding state->main_lock */
+
+ const struct fsmonitor_batch *batch;
+ struct fsmonitor_batch *remainder;
+
+ if (!batch_marker)
+ return NULL;
+
+ trace_printf_key(&trace_fsmonitor, "Truncate: mark (%"PRIu64",%"PRIu64")",
+ batch_marker->batch_seq_nr,
+ (uint64_t)batch_marker->pinned_time);
+
+ for (batch = batch_marker; batch; batch = batch->next) {
+ time_t t;
+
+ if (!batch->pinned_time) /* an overflow batch */
+ continue;
+
+ t = batch->pinned_time + MY_TIME_DELAY_SECONDS;
+ if (t > batch_marker->pinned_time) /* too close to marker */
+ continue;
+
+ goto truncate_past_here;
+ }
+
+ return NULL;
+
+truncate_past_here:
+ state->current_token_data->batch_tail = (struct fsmonitor_batch *)batch;
+
+ remainder = ((struct fsmonitor_batch *)batch)->next;
+ ((struct fsmonitor_batch *)batch)->next = NULL;
+
+ return remainder;
+}
+
+static void fsmonitor_free_token_data(struct fsmonitor_token_data *token)
+{
+ if (!token)
+ return;
+
+ assert(token->client_ref_count == 0);
+
+ strbuf_release(&token->token_id);
+
+ fsmonitor_batch__free_list(token->batch_head);
+
+ free(token);
+}
+
+/*
+ * Flush all of our cached data about the filesystem. Call this if we
+ * lose sync with the filesystem and miss some notification events.
+ *
+ * [1] If we are missing events, then we no longer have a complete
+ * history of the directory (relative to our current start token).
+ * We should create a new token and start fresh (as if we just
+ * booted up).
+ *
+ * [2] Some of those lost events may have been for cookie files. We
+ * should assume the worst and abort them rather letting them starve.
+ *
+ * If there are no concurrent threads reading the current token data
+ * series, we can free it now. Otherwise, let the last reader free
+ * it.
+ *
+ * Either way, the old token data series is no longer associated with
+ * our state data.
+ */
+static void with_lock__do_force_resync(struct fsmonitor_daemon_state *state)
+{
+ /* assert current thread holding state->main_lock */
+
+ struct fsmonitor_token_data *free_me = NULL;
+ struct fsmonitor_token_data *new_one = NULL;
+
+ new_one = fsmonitor_new_token_data();
+
+ if (state->current_token_data->client_ref_count == 0)
+ free_me = state->current_token_data;
+ state->current_token_data = new_one;
+
+ fsmonitor_free_token_data(free_me);
+
+ with_lock__abort_all_cookies(state);
+}
+
+void fsmonitor_force_resync(struct fsmonitor_daemon_state *state)
+{
+ pthread_mutex_lock(&state->main_lock);
+ with_lock__do_force_resync(state);
+ pthread_mutex_unlock(&state->main_lock);
+}
+
+/*
+ * Format an opaque token string to send to the client.
+ */
+static void with_lock__format_response_token(
+ struct strbuf *response_token,
+ const struct strbuf *response_token_id,
+ const struct fsmonitor_batch *batch)
+{
+ /* assert current thread holding state->main_lock */
+
+ strbuf_reset(response_token);
+ strbuf_addf(response_token, "builtin:%s:%"PRIu64,
+ response_token_id->buf, batch->batch_seq_nr);
+}
+
+/*
+ * Parse an opaque token from the client.
+ * Returns -1 on error.
+ */
+static int fsmonitor_parse_client_token(const char *buf_token,
+ struct strbuf *requested_token_id,
+ uint64_t *seq_nr)
+{
+ const char *p;
+ char *p_end;
+
+ strbuf_reset(requested_token_id);
+ *seq_nr = 0;
+
+ if (!skip_prefix(buf_token, "builtin:", &p))
+ return -1;
+
+ while (*p && *p != ':')
+ strbuf_addch(requested_token_id, *p++);
+ if (!*p++)
+ return -1;
+
+ *seq_nr = (uint64_t)strtoumax(p, &p_end, 10);
+ if (*p_end)
+ return -1;
+
+ return 0;
+}
+
+KHASH_INIT(str, const char *, int, 0, kh_str_hash_func, kh_str_hash_equal)
+
+static int do_handle_client(struct fsmonitor_daemon_state *state,
+ const char *command,
+ ipc_server_reply_cb *reply,
+ struct ipc_server_reply_data *reply_data)
+{
+ struct fsmonitor_token_data *token_data = NULL;
+ struct strbuf response_token = STRBUF_INIT;
+ struct strbuf requested_token_id = STRBUF_INIT;
+ struct strbuf payload = STRBUF_INIT;
+ uint64_t requested_oldest_seq_nr = 0;
+ uint64_t total_response_len = 0;
+ const char *p;
+ const struct fsmonitor_batch *batch_head;
+ const struct fsmonitor_batch *batch;
+ struct fsmonitor_batch *remainder = NULL;
+ intmax_t count = 0, duplicates = 0;
+ kh_str_t *shown;
+ int hash_ret;
+ int do_trivial = 0;
+ int do_flush = 0;
+ int do_cookie = 0;
+ enum fsmonitor_cookie_item_result cookie_result;
+
+ /*
+ * We expect `command` to be of the form:
+ *
+ * <command> := quit NUL
+ * | flush NUL
+ * | <V1-time-since-epoch-ns> NUL
+ * | <V2-opaque-fsmonitor-token> NUL
+ */
+
+ if (!strcmp(command, "quit")) {
+ /*
+ * A client has requested over the socket/pipe that the
+ * daemon shutdown.
+ *
+ * Tell the IPC thread pool to shutdown (which completes
+ * the await in the main thread (which can stop the
+ * fsmonitor listener thread)).
+ *
+ * There is no reply to the client.
+ */
+ return SIMPLE_IPC_QUIT;
+
+ } else if (!strcmp(command, "flush")) {
+ /*
+ * Flush all of our cached data and generate a new token
+ * just like if we lost sync with the filesystem.
+ *
+ * Then send a trivial response using the new token.
+ */
+ do_flush = 1;
+ do_trivial = 1;
+
+ } else if (!skip_prefix(command, "builtin:", &p)) {
+ /* assume V1 timestamp or garbage */
+
+ char *p_end;
+
+ strtoumax(command, &p_end, 10);
+ trace_printf_key(&trace_fsmonitor,
+ ((*p_end) ?
+ "fsmonitor: invalid command line '%s'" :
+ "fsmonitor: unsupported V1 protocol '%s'"),
+ command);
+ do_trivial = 1;
+
+ } else {
+ /* We have "builtin:*" */
+ if (fsmonitor_parse_client_token(command, &requested_token_id,
+ &requested_oldest_seq_nr)) {
+ trace_printf_key(&trace_fsmonitor,
+ "fsmonitor: invalid V2 protocol token '%s'",
+ command);
+ do_trivial = 1;
+
+ } else {
+ /*
+ * We have a V2 valid token:
+ * "builtin:<token_id>:<seq_nr>"
+ */
+ do_cookie = 1;
+ }
+ }
+
+ pthread_mutex_lock(&state->main_lock);
+
+ if (!state->current_token_data)
+ BUG("fsmonitor state does not have a current token");
+
+ /*
+ * Write a cookie file inside the directory being watched in
+ * an effort to flush out existing filesystem events that we
+ * actually care about. Suspend this client thread until we
+ * see the filesystem events for this cookie file.
+ *
+ * Creating the cookie lets us guarantee that our FS listener
+ * thread has drained the kernel queue and we are caught up
+ * with the kernel.
+ *
+ * If we cannot create the cookie (or otherwise guarantee that
+ * we are caught up), we send a trivial response. We have to
+ * assume that there might be some very, very recent activity
+ * on the FS still in flight.
+ */
+ if (do_cookie) {
+ cookie_result = with_lock__wait_for_cookie(state);
+ if (cookie_result != FCIR_SEEN) {
+ error(_("fsmonitor: cookie_result '%d' != SEEN"),
+ cookie_result);
+ do_trivial = 1;
+ }
+ }
+
+ if (do_flush)
+ with_lock__do_force_resync(state);
+
+ /*
+ * We mark the current head of the batch list as "pinned" so
+ * that the listener thread will treat this item as read-only
+ * (and prevent any more paths from being added to it) from
+ * now on.
+ */
+ token_data = state->current_token_data;
+ batch_head = token_data->batch_head;
+ ((struct fsmonitor_batch *)batch_head)->pinned_time = time(NULL);
+
+ /*
+ * FSMonitor Protocol V2 requires that we send a response header
+ * with a "new current token" and then all of the paths that changed
+ * since the "requested token". We send the seq_nr of the just-pinned
+ * head batch so that future requests from a client will be relative
+ * to it.
+ */
+ with_lock__format_response_token(&response_token,
+ &token_data->token_id, batch_head);
+
+ reply(reply_data, response_token.buf, response_token.len + 1);
+ total_response_len += response_token.len + 1;
+
+ trace2_data_string("fsmonitor", the_repository, "response/token",
+ response_token.buf);
+ trace_printf_key(&trace_fsmonitor, "response token: %s",
+ response_token.buf);
+
+ if (!do_trivial) {
+ if (strcmp(requested_token_id.buf, token_data->token_id.buf)) {
+ /*
+ * The client last spoke to a different daemon
+ * instance -OR- the daemon had to resync with
+ * the filesystem (and lost events), so reject.
+ */
+ trace2_data_string("fsmonitor", the_repository,
+ "response/token", "different");
+ do_trivial = 1;
+
+ } else if (requested_oldest_seq_nr <
+ token_data->batch_tail->batch_seq_nr) {
+ /*
+ * The client wants older events than we have for
+ * this token_id. This means that the end of our
+ * batch list was truncated and we cannot give the
+ * client a complete snapshot relative to their
+ * request.
+ */
+ trace_printf_key(&trace_fsmonitor,
+ "client requested truncated data");
+ do_trivial = 1;
+ }
+ }
+
+ if (do_trivial) {
+ pthread_mutex_unlock(&state->main_lock);
+
+ reply(reply_data, "/", 2);
+
+ trace2_data_intmax("fsmonitor", the_repository,
+ "response/trivial", 1);
+
+ goto cleanup;
+ }
+
+ /*
+ * We're going to hold onto a pointer to the current
+ * token-data while we walk the list of batches of files.
+ * During this time, we will NOT be under the lock.
+ * So we ref-count it.
+ *
+ * This allows the listener thread to continue prepending
+ * new batches of items to the token-data (which we'll ignore).
+ *
+ * AND it allows the listener thread to do a token-reset
+ * (and install a new `current_token_data`).
+ */
+ token_data->client_ref_count++;
+
+ pthread_mutex_unlock(&state->main_lock);
+
+ /*
+ * The client request is relative to the token that they sent,
+ * so walk the batch list backwards from the current head back
+ * to the batch (sequence number) they named.
+ *
+ * We use khash to de-dup the list of pathnames.
+ *
+ * NEEDSWORK: each batch contains a list of interned strings,
+ * so we only need to do pointer comparisons here to build the
+ * hash table. Currently, we're still comparing the string
+ * values.
+ */
+ shown = kh_init_str();
+ for (batch = batch_head;
+ batch && batch->batch_seq_nr > requested_oldest_seq_nr;
+ batch = batch->next) {
+ size_t k;
+
+ for (k = 0; k < batch->nr; k++) {
+ const char *s = batch->interned_paths[k];
+ size_t s_len;
+
+ if (kh_get_str(shown, s) != kh_end(shown))
+ duplicates++;
+ else {
+ kh_put_str(shown, s, &hash_ret);
+
+ trace_printf_key(&trace_fsmonitor,
+ "send[%"PRIuMAX"]: %s",
+ count, s);
+
+ /* Each path gets written with a trailing NUL */
+ s_len = strlen(s) + 1;
+
+ if (payload.len + s_len >=
+ LARGE_PACKET_DATA_MAX) {
+ reply(reply_data, payload.buf,
+ payload.len);
+ total_response_len += payload.len;
+ strbuf_reset(&payload);
+ }
+
+ strbuf_add(&payload, s, s_len);
+ count++;
+ }
+ }
+ }
+
+ if (payload.len) {
+ reply(reply_data, payload.buf, payload.len);
+ total_response_len += payload.len;
+ }
+
+ kh_release_str(shown);
+
+ pthread_mutex_lock(&state->main_lock);
+
+ if (token_data->client_ref_count > 0)
+ token_data->client_ref_count--;
+
+ if (token_data->client_ref_count == 0) {
+ if (token_data != state->current_token_data) {
+ /*
+ * The listener thread did a token-reset while we were
+ * walking the batch list. Therefore, this token is
+ * stale and can be discarded completely. If we are
+ * the last reader thread using this token, we own
+ * that work.
+ */
+ fsmonitor_free_token_data(token_data);
+ } else if (batch) {
+ /*
+ * We are holding the lock and are the only
+ * reader of the ref-counted portion of the
+ * list, so we get the honor of seeing if the
+ * list can be truncated to save memory.
+ *
+ * The main loop did not walk to the end of the
+ * list, so this batch is the first item in the
+ * batch-list that is older than the requested
+ * end-point sequence number. See if the tail
+ * end of the list is obsolete.
+ */
+ remainder = with_lock__truncate_old_batches(state,
+ batch);
+ }
+ }
+
+ pthread_mutex_unlock(&state->main_lock);
+
+ if (remainder)
+ fsmonitor_batch__free_list(remainder);
+
+ trace2_data_intmax("fsmonitor", the_repository, "response/length", total_response_len);
+ trace2_data_intmax("fsmonitor", the_repository, "response/count/files", count);
+ trace2_data_intmax("fsmonitor", the_repository, "response/count/duplicates", duplicates);
+
+cleanup:
+ strbuf_release(&response_token);
+ strbuf_release(&requested_token_id);
+ strbuf_release(&payload);
+
+ return 0;
+}
+
+static ipc_server_application_cb handle_client;
+
+static int handle_client(void *data,
+ const char *command, size_t command_len,
+ ipc_server_reply_cb *reply,
+ struct ipc_server_reply_data *reply_data)
+{
+ struct fsmonitor_daemon_state *state = data;
+ int result;
+
+ /*
+ * The Simple IPC API now supports {char*, len} arguments, but
+ * FSMonitor always uses proper null-terminated strings, so
+ * we can ignore the command_len argument. (Trust, but verify.)
+ */
+ if (command_len != strlen(command))
+ BUG("FSMonitor assumes text messages");
+
+ trace_printf_key(&trace_fsmonitor, "requested token: %s", command);
+
+ trace2_region_enter("fsmonitor", "handle_client", the_repository);
+ trace2_data_string("fsmonitor", the_repository, "request", command);
+
+ result = do_handle_client(state, command, reply, reply_data);
+
+ trace2_region_leave("fsmonitor", "handle_client", the_repository);
+
+ return result;
+}
+
+#define FSMONITOR_DIR "fsmonitor--daemon"
+#define FSMONITOR_COOKIE_DIR "cookies"
+#define FSMONITOR_COOKIE_PREFIX (FSMONITOR_DIR "/" FSMONITOR_COOKIE_DIR "/")
+
+enum fsmonitor_path_type fsmonitor_classify_path_workdir_relative(
+ const char *rel)
+{
+ if (fspathncmp(rel, ".git", 4))
+ return IS_WORKDIR_PATH;
+ rel += 4;
+
+ if (!*rel)
+ return IS_DOT_GIT;
+ if (*rel != '/')
+ return IS_WORKDIR_PATH; /* e.g. .gitignore */
+ rel++;
+
+ if (!fspathncmp(rel, FSMONITOR_COOKIE_PREFIX,
+ strlen(FSMONITOR_COOKIE_PREFIX)))
+ return IS_INSIDE_DOT_GIT_WITH_COOKIE_PREFIX;
+
+ return IS_INSIDE_DOT_GIT;
+}
+
+enum fsmonitor_path_type fsmonitor_classify_path_gitdir_relative(
+ const char *rel)
+{
+ if (!fspathncmp(rel, FSMONITOR_COOKIE_PREFIX,
+ strlen(FSMONITOR_COOKIE_PREFIX)))
+ return IS_INSIDE_GITDIR_WITH_COOKIE_PREFIX;
+
+ return IS_INSIDE_GITDIR;
+}
+
+static enum fsmonitor_path_type try_classify_workdir_abs_path(
+ struct fsmonitor_daemon_state *state,
+ const char *path)
+{
+ const char *rel;
+
+ if (fspathncmp(path, state->path_worktree_watch.buf,
+ state->path_worktree_watch.len))
+ return IS_OUTSIDE_CONE;
+
+ rel = path + state->path_worktree_watch.len;
+
+ if (!*rel)
+ return IS_WORKDIR_PATH; /* it is the root dir exactly */
+ if (*rel != '/')
+ return IS_OUTSIDE_CONE;
+ rel++;
+
+ return fsmonitor_classify_path_workdir_relative(rel);
+}
+
+enum fsmonitor_path_type fsmonitor_classify_path_absolute(
+ struct fsmonitor_daemon_state *state,
+ const char *path)
+{
+ const char *rel;
+ enum fsmonitor_path_type t;
+
+ t = try_classify_workdir_abs_path(state, path);
+ if (state->nr_paths_watching == 1)
+ return t;
+ if (t != IS_OUTSIDE_CONE)
+ return t;
+
+ if (fspathncmp(path, state->path_gitdir_watch.buf,
+ state->path_gitdir_watch.len))
+ return IS_OUTSIDE_CONE;
+
+ rel = path + state->path_gitdir_watch.len;
+
+ if (!*rel)
+ return IS_GITDIR; /* it is the <gitdir> exactly */
+ if (*rel != '/')
+ return IS_OUTSIDE_CONE;
+ rel++;
+
+ return fsmonitor_classify_path_gitdir_relative(rel);
+}
+
+/*
+ * We try to combine small batches at the front of the batch-list to avoid
+ * having a long list. This hopefully makes it a little easier when we want
+ * to truncate and maintain the list. However, we don't want the paths array
+ * to just keep growing and growing with realloc, so we insert an arbitrary
+ * limit.
+ */
+#define MY_COMBINE_LIMIT (1024)
+
+void fsmonitor_publish(struct fsmonitor_daemon_state *state,
+ struct fsmonitor_batch *batch,
+ const struct string_list *cookie_names)
+{
+ if (!batch && !cookie_names->nr)
+ return;
+
+ pthread_mutex_lock(&state->main_lock);
+
+ if (batch) {
+ struct fsmonitor_batch *head;
+
+ head = state->current_token_data->batch_head;
+ if (!head) {
+ BUG("token does not have batch");
+ } else if (head->pinned_time) {
+ /*
+ * We cannot alter the current batch list
+ * because:
+ *
+ * [a] it is being transmitted to at least one
+ * client and the handle_client() thread has a
+ * ref-count, but not a lock on the batch list
+ * starting with this item.
+ *
+ * [b] it has been transmitted in the past to
+ * at least one client such that future
+ * requests are relative to this head batch.
+ *
+ * So, we can only prepend a new batch onto
+ * the front of the list.
+ */
+ batch->batch_seq_nr = head->batch_seq_nr + 1;
+ batch->next = head;
+ state->current_token_data->batch_head = batch;
+ } else if (!head->batch_seq_nr) {
+ /*
+ * Batch 0 is unpinned. See the note in
+ * `fsmonitor_new_token_data()` about why we
+ * don't need to accumulate these paths.
+ */
+ fsmonitor_batch__free_list(batch);
+ } else if (head->nr + batch->nr > MY_COMBINE_LIMIT) {
+ /*
+ * The head batch in the list has never been
+ * transmitted to a client, but folding the
+ * contents of the new batch onto it would
+ * exceed our arbitrary limit, so just prepend
+ * the new batch onto the list.
+ */
+ batch->batch_seq_nr = head->batch_seq_nr + 1;
+ batch->next = head;
+ state->current_token_data->batch_head = batch;
+ } else {
+ /*
+ * We are free to add the paths in the given
+ * batch onto the end of the current head batch.
+ */
+ fsmonitor_batch__combine(head, batch);
+ fsmonitor_batch__free_list(batch);
+ }
+ }
+
+ if (cookie_names->nr)
+ with_lock__mark_cookies_seen(state, cookie_names);
+
+ pthread_mutex_unlock(&state->main_lock);
+}
+
+static void *fsm_health__thread_proc(void *_state)
+{
+ struct fsmonitor_daemon_state *state = _state;
+
+ trace2_thread_start("fsm-health");
+
+ fsm_health__loop(state);
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+static void *fsm_listen__thread_proc(void *_state)
+{
+ struct fsmonitor_daemon_state *state = _state;
+
+ trace2_thread_start("fsm-listen");
+
+ trace_printf_key(&trace_fsmonitor, "Watching: worktree '%s'",
+ state->path_worktree_watch.buf);
+ if (state->nr_paths_watching > 1)
+ trace_printf_key(&trace_fsmonitor, "Watching: gitdir '%s'",
+ state->path_gitdir_watch.buf);
+
+ fsm_listen__loop(state);
+
+ pthread_mutex_lock(&state->main_lock);
+ if (state->current_token_data &&
+ state->current_token_data->client_ref_count == 0)
+ fsmonitor_free_token_data(state->current_token_data);
+ state->current_token_data = NULL;
+ pthread_mutex_unlock(&state->main_lock);
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+static int fsmonitor_run_daemon_1(struct fsmonitor_daemon_state *state)
+{
+ struct ipc_server_opts ipc_opts = {
+ .nr_threads = fsmonitor__ipc_threads,
+
+ /*
+ * We know that there are no other active threads yet,
+ * so we can let the IPC layer temporarily chdir() if
+ * it needs to when creating the server side of the
+ * Unix domain socket.
+ */
+ .uds_disallow_chdir = 0
+ };
+ int health_started = 0;
+ int listener_started = 0;
+ int err = 0;
+
+ /*
+ * Start the IPC thread pool before the we've started the file
+ * system event listener thread so that we have the IPC handle
+ * before we need it.
+ */
+ if (ipc_server_run_async(&state->ipc_server_data,
+ state->path_ipc.buf, &ipc_opts,
+ handle_client, state))
+ return error_errno(
+ _("could not start IPC thread pool on '%s'"),
+ state->path_ipc.buf);
+
+ /*
+ * Start the fsmonitor listener thread to collect filesystem
+ * events.
+ */
+ if (pthread_create(&state->listener_thread, NULL,
+ fsm_listen__thread_proc, state) < 0) {
+ ipc_server_stop_async(state->ipc_server_data);
+ err = error(_("could not start fsmonitor listener thread"));
+ goto cleanup;
+ }
+ listener_started = 1;
+
+ /*
+ * Start the health thread to watch over our process.
+ */
+ if (pthread_create(&state->health_thread, NULL,
+ fsm_health__thread_proc, state) < 0) {
+ ipc_server_stop_async(state->ipc_server_data);
+ err = error(_("could not start fsmonitor health thread"));
+ goto cleanup;
+ }
+ health_started = 1;
+
+ /*
+ * The daemon is now fully functional in background threads.
+ * Our primary thread should now just wait while the threads
+ * do all the work.
+ */
+cleanup:
+ /*
+ * Wait for the IPC thread pool to shutdown (whether by client
+ * request, from filesystem activity, or an error).
+ */
+ ipc_server_await(state->ipc_server_data);
+
+ /*
+ * The fsmonitor listener thread may have received a shutdown
+ * event from the IPC thread pool, but it doesn't hurt to tell
+ * it again. And wait for it to shutdown.
+ */
+ if (listener_started) {
+ fsm_listen__stop_async(state);
+ pthread_join(state->listener_thread, NULL);
+ }
+
+ if (health_started) {
+ fsm_health__stop_async(state);
+ pthread_join(state->health_thread, NULL);
+ }
+
+ if (err)
+ return err;
+ if (state->listen_error_code)
+ return state->listen_error_code;
+ if (state->health_error_code)
+ return state->health_error_code;
+ return 0;
+}
+
+static int fsmonitor_run_daemon(void)
+{
+ struct fsmonitor_daemon_state state;
+ const char *home;
+ int err;
+
+ memset(&state, 0, sizeof(state));
+
+ hashmap_init(&state.cookies, cookies_cmp, NULL, 0);
+ pthread_mutex_init(&state.main_lock, NULL);
+ pthread_cond_init(&state.cookies_cond, NULL);
+ state.listen_error_code = 0;
+ state.health_error_code = 0;
+ state.current_token_data = fsmonitor_new_token_data();
+
+ /* Prepare to (recursively) watch the <worktree-root> directory. */
+ strbuf_init(&state.path_worktree_watch, 0);
+ strbuf_addstr(&state.path_worktree_watch, absolute_path(get_git_work_tree()));
+ state.nr_paths_watching = 1;
+
+ /*
+ * We create and delete cookie files somewhere inside the .git
+ * directory to help us keep sync with the file system. If
+ * ".git" is not a directory, then <gitdir> is not inside the
+ * cone of <worktree-root>, so set up a second watch to watch
+ * the <gitdir> so that we get events for the cookie files.
+ */
+ strbuf_init(&state.path_gitdir_watch, 0);
+ strbuf_addbuf(&state.path_gitdir_watch, &state.path_worktree_watch);
+ strbuf_addstr(&state.path_gitdir_watch, "/.git");
+ if (!is_directory(state.path_gitdir_watch.buf)) {
+ strbuf_reset(&state.path_gitdir_watch);
+ strbuf_addstr(&state.path_gitdir_watch, absolute_path(get_git_dir()));
+ state.nr_paths_watching = 2;
+ }
+
+ /*
+ * We will write filesystem syncing cookie files into
+ * <gitdir>/<fsmonitor-dir>/<cookie-dir>/<pid>-<seq>.
+ *
+ * The extra layers of subdirectories here keep us from
+ * changing the mtime on ".git/" or ".git/foo/" when we create
+ * or delete cookie files.
+ *
+ * There have been problems with some IDEs that do a
+ * non-recursive watch of the ".git/" directory and run a
+ * series of commands any time something happens.
+ *
+ * For example, if we place our cookie files directly in
+ * ".git/" or ".git/foo/" then a `git status` (or similar
+ * command) from the IDE will cause a cookie file to be
+ * created in one of those dirs. This causes the mtime of
+ * those dirs to change. This triggers the IDE's watch
+ * notification. This triggers the IDE to run those commands
+ * again. And the process repeats and the machine never goes
+ * idle.
+ *
+ * Adding the extra layers of subdirectories prevents the
+ * mtime of ".git/" and ".git/foo" from changing when a
+ * cookie file is created.
+ */
+ strbuf_init(&state.path_cookie_prefix, 0);
+ strbuf_addbuf(&state.path_cookie_prefix, &state.path_gitdir_watch);
+
+ strbuf_addch(&state.path_cookie_prefix, '/');
+ strbuf_addstr(&state.path_cookie_prefix, FSMONITOR_DIR);
+ mkdir(state.path_cookie_prefix.buf, 0777);
+
+ strbuf_addch(&state.path_cookie_prefix, '/');
+ strbuf_addstr(&state.path_cookie_prefix, FSMONITOR_COOKIE_DIR);
+ mkdir(state.path_cookie_prefix.buf, 0777);
+
+ strbuf_addch(&state.path_cookie_prefix, '/');
+
+ /*
+ * We create a named-pipe or unix domain socket inside of the
+ * ".git" directory. (Well, on Windows, we base our named
+ * pipe in the NPFS on the absolute path of the git
+ * directory.)
+ */
+ strbuf_init(&state.path_ipc, 0);
+ strbuf_addstr(&state.path_ipc, absolute_path(fsmonitor_ipc__get_path()));
+
+ /*
+ * Confirm that we can create platform-specific resources for the
+ * filesystem listener before we bother starting all the threads.
+ */
+ if (fsm_listen__ctor(&state)) {
+ err = error(_("could not initialize listener thread"));
+ goto done;
+ }
+
+ if (fsm_health__ctor(&state)) {
+ err = error(_("could not initialize health thread"));
+ goto done;
+ }
+
+ /*
+ * CD out of the worktree root directory.
+ *
+ * The common Git startup mechanism causes our CWD to be the
+ * root of the worktree. On Windows, this causes our process
+ * to hold a locked handle on the CWD. This prevents the
+ * worktree from being moved or deleted while the daemon is
+ * running.
+ *
+ * We assume that our FS and IPC listener threads have either
+ * opened all of the handles that they need or will do
+ * everything using absolute paths.
+ */
+ home = getenv("HOME");
+ if (home && *home && chdir(home))
+ die_errno(_("could not cd home '%s'"), home);
+
+ err = fsmonitor_run_daemon_1(&state);
+
+done:
+ pthread_cond_destroy(&state.cookies_cond);
+ pthread_mutex_destroy(&state.main_lock);
+ fsm_listen__dtor(&state);
+ fsm_health__dtor(&state);
+
+ ipc_server_free(state.ipc_server_data);
+
+ strbuf_release(&state.path_worktree_watch);
+ strbuf_release(&state.path_gitdir_watch);
+ strbuf_release(&state.path_cookie_prefix);
+ strbuf_release(&state.path_ipc);
+
+ return err;
+}
+
+static int try_to_run_foreground_daemon(int detach_console)
+{
+ /*
+ * Technically, we don't need to probe for an existing daemon
+ * process, since we could just call `fsmonitor_run_daemon()`
+ * and let it fail if the pipe/socket is busy.
+ *
+ * However, this method gives us a nicer error message for a
+ * common error case.
+ */
+ if (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
+ die(_("fsmonitor--daemon is already running '%s'"),
+ the_repository->worktree);
+
+ if (fsmonitor__announce_startup) {
+ fprintf(stderr, _("running fsmonitor-daemon in '%s'\n"),
+ the_repository->worktree);
+ fflush(stderr);
+ }
+
+#ifdef GIT_WINDOWS_NATIVE
+ if (detach_console)
+ FreeConsole();
+#endif
+
+ return !!fsmonitor_run_daemon();
+}
+
+static start_bg_wait_cb bg_wait_cb;
+
+static int bg_wait_cb(const struct child_process *cp, void *cb_data)
+{
+ enum ipc_active_state s = fsmonitor_ipc__get_state();
+
+ switch (s) {
+ case IPC_STATE__LISTENING:
+ /* child is "ready" */
+ return 0;
+
+ case IPC_STATE__NOT_LISTENING:
+ case IPC_STATE__PATH_NOT_FOUND:
+ /* give child more time */
+ return 1;
+
+ default:
+ case IPC_STATE__INVALID_PATH:
+ case IPC_STATE__OTHER_ERROR:
+ /* all the time in world won't help */
+ return -1;
+ }
+}
+
+static int try_to_start_background_daemon(void)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ enum start_bg_result sbgr;
+
+ /*
+ * Before we try to create a background daemon process, see
+ * if a daemon process is already listening. This makes it
+ * easier for us to report an already-listening error to the
+ * console, since our spawn/daemon can only report the success
+ * of creating the background process (and not whether it
+ * immediately exited).
+ */
+ if (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
+ die(_("fsmonitor--daemon is already running '%s'"),
+ the_repository->worktree);
+
+ if (fsmonitor__announce_startup) {
+ fprintf(stderr, _("starting fsmonitor-daemon in '%s'\n"),
+ the_repository->worktree);
+ fflush(stderr);
+ }
+
+ cp.git_cmd = 1;
+
+ strvec_push(&cp.args, "fsmonitor--daemon");
+ strvec_push(&cp.args, "run");
+ strvec_push(&cp.args, "--detach");
+ strvec_pushf(&cp.args, "--ipc-threads=%d", fsmonitor__ipc_threads);
+
+ cp.no_stdin = 1;
+ cp.no_stdout = 1;
+ cp.no_stderr = 1;
+
+ sbgr = start_bg_command(&cp, bg_wait_cb, NULL,
+ fsmonitor__start_timeout_sec);
+
+ switch (sbgr) {
+ case SBGR_READY:
+ return 0;
+
+ default:
+ case SBGR_ERROR:
+ case SBGR_CB_ERROR:
+ return error(_("daemon failed to start"));
+
+ case SBGR_TIMEOUT:
+ return error(_("daemon not online yet"));
+
+ case SBGR_DIED:
+ return error(_("daemon terminated"));
+ }
+}
+
+int cmd_fsmonitor__daemon(int argc, const char **argv, const char *prefix)
+{
+ const char *subcmd;
+ enum fsmonitor_reason reason;
+ int detach_console = 0;
+
+ struct option options[] = {
+ OPT_BOOL(0, "detach", &detach_console, N_("detach from console")),
+ OPT_INTEGER(0, "ipc-threads",
+ &fsmonitor__ipc_threads,
+ N_("use <n> ipc worker threads")),
+ OPT_INTEGER(0, "start-timeout",
+ &fsmonitor__start_timeout_sec,
+ N_("max seconds to wait for background daemon startup")),
+
+ OPT_END()
+ };
+
+ git_config(fsmonitor_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_fsmonitor__daemon_usage, 0);
+ if (argc != 1)
+ usage_with_options(builtin_fsmonitor__daemon_usage, options);
+ subcmd = argv[0];
+
+ if (fsmonitor__ipc_threads < 1)
+ die(_("invalid 'ipc-threads' value (%d)"),
+ fsmonitor__ipc_threads);
+
+ prepare_repo_settings(the_repository);
+ /*
+ * If the repo is fsmonitor-compatible, explicitly set IPC-mode
+ * (without bothering to load the `core.fsmonitor` config settings).
+ *
+ * If the repo is not compatible, the repo-settings will be set to
+ * incompatible rather than IPC, so we can use one of the __get
+ * routines to detect the discrepancy.
+ */
+ fsm_settings__set_ipc(the_repository);
+
+ reason = fsm_settings__get_reason(the_repository);
+ if (reason > FSMONITOR_REASON_OK)
+ die("%s",
+ fsm_settings__get_incompatible_msg(the_repository,
+ reason));
+
+ if (!strcmp(subcmd, "start"))
+ return !!try_to_start_background_daemon();
+
+ if (!strcmp(subcmd, "run"))
+ return !!try_to_run_foreground_daemon(detach_console);
+
+ if (!strcmp(subcmd, "stop"))
+ return !!do_as_client__send_stop();
+
+ if (!strcmp(subcmd, "status"))
+ return !!do_as_client__status();
+
+ die(_("Unhandled subcommand '%s'"), subcmd);
+}
+
+#else
+int cmd_fsmonitor__daemon(int argc, const char **argv, const char *prefix)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(builtin_fsmonitor__daemon_usage, options);
+
+ die(_("fsmonitor--daemon not supported on this platform"));
+}
+#endif
diff --git a/builtin/gc.c b/builtin/gc.c
index b335cff..eeff2b7 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -42,6 +42,7 @@ static const char * const builtin_gc_usage[] = {
static int pack_refs = 1;
static int prune_reflogs = 1;
+static int cruft_packs = 0;
static int aggressive_depth = 50;
static int aggressive_window = 250;
static int gc_auto_threshold = 6700;
@@ -152,6 +153,7 @@ static void gc_config(void)
git_config_get_int("gc.auto", &gc_auto_threshold);
git_config_get_int("gc.autopacklimit", &gc_auto_pack_limit);
git_config_get_bool("gc.autodetach", &detach_auto);
+ git_config_get_bool("gc.cruftpacks", &cruft_packs);
git_config_get_expiry("gc.pruneexpire", &prune_expire);
git_config_get_expiry("gc.worktreepruneexpire", &prune_worktrees_expire);
git_config_get_expiry("gc.logexpiry", &gc_log_expire);
@@ -166,9 +168,15 @@ struct maintenance_run_opts;
static int maintenance_task_pack_refs(MAYBE_UNUSED struct maintenance_run_opts *opts)
{
struct strvec pack_refs_cmd = STRVEC_INIT;
+ int ret;
+
strvec_pushl(&pack_refs_cmd, "pack-refs", "--all", "--prune", NULL);
- return run_command_v_opt(pack_refs_cmd.v, RUN_GIT_CMD);
+ ret = run_command_v_opt(pack_refs_cmd.v, RUN_GIT_CMD);
+
+ strvec_clear(&pack_refs_cmd);
+
+ return ret;
}
static int too_many_loose_objects(void)
@@ -331,7 +339,11 @@ static void add_repack_all_option(struct string_list *keep_pack)
{
if (prune_expire && !strcmp(prune_expire, "now"))
strvec_push(&repack, "-a");
- else {
+ else if (cruft_packs) {
+ strvec_push(&repack, "--cruft");
+ if (prune_expire)
+ strvec_pushf(&repack, "--cruft-expiration=%s", prune_expire);
+ } else {
strvec_push(&repack, "-A");
if (prune_expire)
strvec_pushf(&repack, "--unpack-unreachable=%s", prune_expire);
@@ -446,7 +458,7 @@ static const char *lock_repo_for_gc(int force, pid_t* ret_pid)
fscanf(fp, scan_fmt, &pid, locking_host) == 2 &&
/* be gentle to concurrent "gc" on remote hosts */
(strcmp(locking_host, my_host) || !kill(pid, 0) || errno == EPERM);
- if (fp != NULL)
+ if (fp)
fclose(fp);
if (should_exit) {
if (fd >= 0)
@@ -551,6 +563,7 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
{ OPTION_STRING, 0, "prune", &prune_expire, N_("date"),
N_("prune unreferenced objects"),
PARSE_OPT_OPTARG, NULL, (intptr_t)prune_expire },
+ OPT_BOOL(0, "cruft", &cruft_packs, N_("pack unreferenced objects separately")),
OPT_BOOL(0, "aggressive", &aggressive, N_("be more thorough (increased runtime)")),
OPT_BOOL_F(0, "auto", &auto_gc, N_("enable auto-gc mode"),
PARSE_OPT_NOCOMPLETE),
@@ -574,7 +587,7 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
/* default expiry time, overwritten in gc_config */
gc_config();
if (parse_expiry_date(gc_log_expire, &gc_log_expire_time))
- die(_("failed to parse gc.logexpiry value %s"), gc_log_expire);
+ die(_("failed to parse gc.logExpiry value %s"), gc_log_expire);
if (pack_refs < 0)
pack_refs = !is_bare_repository();
@@ -670,6 +683,7 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
die(FAILED_RUN, repack.v[0]);
if (prune_expire) {
+ /* run `git prune` even if using cruft packs */
strvec_push(&prune, prune_expire);
if (quiet)
strvec_push(&prune, "--no-progress");
@@ -2238,7 +2252,7 @@ static int systemd_timer_write_unit_templates(const char *exec_path)
goto error;
}
file = fopen_or_warn(filename, "w");
- if (file == NULL)
+ if (!file)
goto error;
unit = "# This file was created and is maintained by Git.\n"
@@ -2267,7 +2281,7 @@ static int systemd_timer_write_unit_templates(const char *exec_path)
filename = xdg_config_home_systemd("git-maintenance@.service");
file = fopen_or_warn(filename, "w");
- if (file == NULL)
+ if (!file)
goto error;
unit = "# This file was created and is maintained by Git.\n"
diff --git a/builtin/grep.c b/builtin/grep.c
index bcb07ea..e6bcdf8 100644
--- a/builtin/grep.c
+++ b/builtin/grep.c
@@ -961,6 +961,8 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
OPT_BOOL_F(0, "ext-grep", &external_grep_allowed__ignored,
N_("allow calling of grep(1) (ignored by this build)"),
PARSE_OPT_NOCOMPLETE),
+ OPT_INTEGER('m', "max-count", &opt.max_count,
+ N_("maximum number of results per file")),
OPT_END()
};
grep_prefix = prefix;
@@ -1101,6 +1103,13 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
if (recurse_submodules && untracked)
die(_("--untracked not supported with --recurse-submodules"));
+ /*
+ * Optimize out the case where the amount of matches is limited to zero.
+ * We do this to keep results consistent with GNU grep(1).
+ */
+ if (opt.max_count == 0)
+ return 1;
+
if (show_in_pager) {
if (num_threads > 1)
warning(_("invalid option combination, ignoring --threads"));
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index 680b66b..6648f2d 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -1575,7 +1575,7 @@ static int git_index_pack_config(const char *k, const char *v, void *cb)
if (!strcmp(k, "pack.indexversion")) {
opts->version = git_config_int(k, v);
if (opts->version > 2)
- die(_("bad pack.indexversion=%"PRIu32), opts->version);
+ die(_("bad pack.indexVersion=%"PRIu32), opts->version);
return 0;
}
if (!strcmp(k, "pack.threads")) {
@@ -1942,11 +1942,11 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
free(objects);
strbuf_release(&index_name_buf);
strbuf_release(&rev_index_name_buf);
- if (pack_name == NULL)
+ if (!pack_name)
free((void *) curr_pack);
- if (index_name == NULL)
+ if (!index_name)
free((void *) curr_index);
- if (rev_index_name == NULL)
+ if (!rev_index_name)
free((void *) curr_rev_index);
/*
diff --git a/builtin/log.c b/builtin/log.c
index c211d66..88a5e98 100644
--- a/builtin/log.c
+++ b/builtin/log.c
@@ -231,7 +231,8 @@ static void cmd_log_init_finish(int argc, const char **argv, const char *prefix,
}
if (mailmap) {
- rev->mailmap = xcalloc(1, sizeof(struct string_list));
+ rev->mailmap = xmalloc(sizeof(struct string_list));
+ string_list_init_nodup(rev->mailmap);
read_mailmap(rev->mailmap);
}
@@ -294,6 +295,12 @@ static void cmd_log_init(int argc, const char **argv, const char *prefix,
cmd_log_init_finish(argc, argv, prefix, rev, opt);
}
+static int cmd_log_deinit(int ret, struct rev_info *rev)
+{
+ release_revisions(rev);
+ return ret;
+}
+
/*
* This gives a rough estimate for how many commits we
* will print out in the list.
@@ -417,7 +424,7 @@ static void finish_early_output(struct rev_info *rev)
show_early_header(rev, "done", n);
}
-static int cmd_log_walk(struct rev_info *rev)
+static int cmd_log_walk_no_free(struct rev_info *rev)
{
struct commit *commit;
int saved_nrl = 0;
@@ -444,7 +451,6 @@ static int cmd_log_walk(struct rev_info *rev)
* and HAS_CHANGES being accumulated in rev->diffopt, so be careful to
* retain that state information if replacing rev->diffopt in this loop
*/
- rev->diffopt.no_free = 1;
while ((commit = get_revision(rev)) != NULL) {
if (!log_tree_commit(rev, commit) && rev->max_count >= 0)
/*
@@ -469,8 +475,6 @@ static int cmd_log_walk(struct rev_info *rev)
}
rev->diffopt.degraded_cc_to_c = saved_dcctc;
rev->diffopt.needed_rename_limit = saved_nrl;
- rev->diffopt.no_free = 0;
- diff_free(&rev->diffopt);
if (rev->remerge_diff) {
tmp_objdir_destroy(rev->remerge_objdir);
@@ -484,6 +488,17 @@ static int cmd_log_walk(struct rev_info *rev)
return diff_result_code(&rev->diffopt, 0);
}
+static int cmd_log_walk(struct rev_info *rev)
+{
+ int retval;
+
+ rev->diffopt.no_free = 1;
+ retval = cmd_log_walk_no_free(rev);
+ rev->diffopt.no_free = 0;
+ diff_free(&rev->diffopt);
+ return retval;
+}
+
static int git_log_config(const char *var, const char *value, void *cb)
{
const char *slot_name;
@@ -557,7 +572,7 @@ int cmd_whatchanged(int argc, const char **argv, const char *prefix)
cmd_log_init(argc, argv, prefix, &rev, &opt);
if (!rev.diffopt.output_format)
rev.diffopt.output_format = DIFF_FORMAT_RAW;
- return cmd_log_walk(&rev);
+ return cmd_log_deinit(cmd_log_walk(&rev), &rev);
}
static void show_tagger(const char *buf, struct rev_info *rev)
@@ -661,6 +676,11 @@ int cmd_show(int argc, const char **argv, const char *prefix)
init_log_defaults();
git_config(git_log_config, NULL);
+ if (the_repository->gitdir) {
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+ }
+
memset(&match_all, 0, sizeof(match_all));
repo_init_revisions(the_repository, &rev, prefix);
git_config(grep_config, &rev.grep_filter);
@@ -676,10 +696,11 @@ int cmd_show(int argc, const char **argv, const char *prefix)
cmd_log_init(argc, argv, prefix, &rev, &opt);
if (!rev.no_walk)
- return cmd_log_walk(&rev);
+ return cmd_log_deinit(cmd_log_walk(&rev), &rev);
count = rev.pending.nr;
objects = rev.pending.objects;
+ rev.diffopt.no_free = 1;
for (i = 0; i < count && !ret; i++) {
struct object *o = objects[i].item;
const char *name = objects[i].name;
@@ -725,14 +746,17 @@ int cmd_show(int argc, const char **argv, const char *prefix)
rev.pending.nr = rev.pending.alloc = 0;
rev.pending.objects = NULL;
add_object_array(o, name, &rev.pending);
- ret = cmd_log_walk(&rev);
+ ret = cmd_log_walk_no_free(&rev);
break;
default:
ret = error(_("unknown type: %d"), o->type);
}
}
- free(objects);
- return ret;
+
+ rev.diffopt.no_free = 0;
+ diff_free(&rev.diffopt);
+
+ return cmd_log_deinit(ret, &rev);
}
/*
@@ -760,7 +784,7 @@ int cmd_log_reflog(int argc, const char **argv, const char *prefix)
rev.always_show_header = 1;
cmd_log_init_finish(argc, argv, prefix, &rev, &opt);
- return cmd_log_walk(&rev);
+ return cmd_log_deinit(cmd_log_walk(&rev), &rev);
}
static void log_setup_revisions_tweak(struct rev_info *rev,
@@ -791,7 +815,7 @@ int cmd_log(int argc, const char **argv, const char *prefix)
opt.revarg_opt = REVARG_COMMITTISH;
opt.tweak = log_setup_revisions_tweak;
cmd_log_init(argc, argv, prefix, &rev, &opt);
- return cmd_log_walk(&rev);
+ return cmd_log_deinit(cmd_log_walk(&rev), &rev);
}
/* format-patch */
@@ -1012,7 +1036,7 @@ static int open_next_file(struct commit *commit, const char *subject,
if (!quiet)
printf("%s\n", filename.buf + outdir_offset);
- if ((rev->diffopt.file = fopen(filename.buf, "w")) == NULL) {
+ if (!(rev->diffopt.file = fopen(filename.buf, "w"))) {
error_errno(_("cannot open patch file %s"), filename.buf);
strbuf_release(&filename);
return -1;
@@ -1746,6 +1770,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
struct commit *commit;
struct commit **list = NULL;
struct rev_info rev;
+ char *to_free = NULL;
struct setup_revision_opt s_r_opt;
int nr = 0, total, i;
int use_stdout = 0;
@@ -1883,6 +1908,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
rev.diff = 1;
rev.max_parents = 1;
rev.diffopt.flags.recursive = 1;
+ rev.diffopt.no_free = 1;
rev.subject_prefix = fmt_patch_subject_prefix;
memset(&s_r_opt, 0, sizeof(s_r_opt));
s_r_opt.def = "HEAD";
@@ -1946,7 +1972,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
strbuf_addch(&buf, '\n');
}
- rev.extra_headers = strbuf_detach(&buf, NULL);
+ rev.extra_headers = to_free = strbuf_detach(&buf, NULL);
if (from) {
if (split_ident_line(&rev.from_ident, from, strlen(from)))
@@ -2008,13 +2034,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
if (use_stdout) {
setup_pager();
- } else if (rev.diffopt.close_file) {
- /*
- * The diff code parsed --output; it has already opened the
- * file, but we must instruct it not to close after each diff.
- */
- rev.diffopt.no_free = 1;
- } else {
+ } else if (!rev.diffopt.close_file) {
int saved;
if (!output_directory)
@@ -2173,8 +2193,10 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
prepare_bases(&bases, base, list, nr);
}
- if (in_reply_to || thread || cover_letter)
- rev.ref_message_ids = xcalloc(1, sizeof(struct string_list));
+ if (in_reply_to || thread || cover_letter) {
+ rev.ref_message_ids = xmalloc(sizeof(*rev.ref_message_ids));
+ string_list_init_nodup(rev.ref_message_ids);
+ }
if (in_reply_to) {
const char *msgid = clean_message_id(in_reply_to);
string_list_append(rev.ref_message_ids, msgid);
@@ -2281,8 +2303,11 @@ done:
strbuf_release(&rdiff1);
strbuf_release(&rdiff2);
strbuf_release(&rdiff_title);
- UNLEAK(rev);
- return 0;
+ free(to_free);
+ if (rev.ref_message_ids)
+ string_list_clear(rev.ref_message_ids, 0);
+ free(rev.ref_message_ids);
+ return cmd_log_deinit(0, &rev);
}
static int add_pending_commit(const char *arg, struct rev_info *revs, int flags)
diff --git a/builtin/ls-remote.c b/builtin/ls-remote.c
index d856085..df44e5c 100644
--- a/builtin/ls-remote.c
+++ b/builtin/ls-remote.c
@@ -114,7 +114,7 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
}
transport = transport_get(remote, NULL);
- if (uploadpack != NULL)
+ if (uploadpack)
transport_set_option(transport, TRANS_OPT_UPLOADPACK, uploadpack);
if (server_options.nr)
transport->server_options = &server_options;
diff --git a/builtin/ls-tree.c b/builtin/ls-tree.c
index 6cb554c..e279be8 100644
--- a/builtin/ls-tree.c
+++ b/builtin/ls-tree.c
@@ -16,22 +16,102 @@
static int line_termination = '\n';
#define LS_RECURSIVE 1
-#define LS_TREE_ONLY 2
-#define LS_SHOW_TREES 4
-#define LS_NAME_ONLY 8
-#define LS_SHOW_SIZE 16
+#define LS_TREE_ONLY (1 << 1)
+#define LS_SHOW_TREES (1 << 2)
static int abbrev;
static int ls_options;
static struct pathspec pathspec;
static int chomp_prefix;
static const char *ls_tree_prefix;
+static const char *format;
+struct show_tree_data {
+ unsigned mode;
+ enum object_type type;
+ const struct object_id *oid;
+ const char *pathname;
+ struct strbuf *base;
+};
static const char * const ls_tree_usage[] = {
N_("git ls-tree [<options>] <tree-ish> [<path>...]"),
NULL
};
-static int show_recursive(const char *base, int baselen, const char *pathname)
+static enum ls_tree_cmdmode {
+ MODE_DEFAULT = 0,
+ MODE_LONG,
+ MODE_NAME_ONLY,
+ MODE_NAME_STATUS,
+ MODE_OBJECT_ONLY,
+} cmdmode;
+
+static void expand_objectsize(struct strbuf *line, const struct object_id *oid,
+ const enum object_type type, unsigned int padded)
+{
+ if (type == OBJ_BLOB) {
+ unsigned long size;
+ if (oid_object_info(the_repository, oid, &size) < 0)
+ die(_("could not get object info about '%s'"),
+ oid_to_hex(oid));
+ if (padded)
+ strbuf_addf(line, "%7"PRIuMAX, (uintmax_t)size);
+ else
+ strbuf_addf(line, "%"PRIuMAX, (uintmax_t)size);
+ } else if (padded) {
+ strbuf_addf(line, "%7s", "-");
+ } else {
+ strbuf_addstr(line, "-");
+ }
+}
+
+static size_t expand_show_tree(struct strbuf *sb, const char *start,
+ void *context)
+{
+ struct show_tree_data *data = context;
+ const char *end;
+ const char *p;
+ unsigned int errlen;
+ size_t len = strbuf_expand_literal_cb(sb, start, NULL);
+
+ if (len)
+ return len;
+ if (*start != '(')
+ die(_("bad ls-tree format: element '%s' does not start with '('"), start);
+
+ end = strchr(start + 1, ')');
+ if (!end)
+ die(_("bad ls-tree format: element '%s' does not end in ')'"), start);
+
+ len = end - start + 1;
+ if (skip_prefix(start, "(objectmode)", &p)) {
+ strbuf_addf(sb, "%06o", data->mode);
+ } else if (skip_prefix(start, "(objecttype)", &p)) {
+ strbuf_addstr(sb, type_name(data->type));
+ } else if (skip_prefix(start, "(objectsize:padded)", &p)) {
+ expand_objectsize(sb, data->oid, data->type, 1);
+ } else if (skip_prefix(start, "(objectsize)", &p)) {
+ expand_objectsize(sb, data->oid, data->type, 0);
+ } else if (skip_prefix(start, "(objectname)", &p)) {
+ strbuf_add_unique_abbrev(sb, data->oid, abbrev);
+ } else if (skip_prefix(start, "(path)", &p)) {
+ const char *name = data->base->buf;
+ const char *prefix = chomp_prefix ? ls_tree_prefix : NULL;
+ struct strbuf quoted = STRBUF_INIT;
+ struct strbuf sbuf = STRBUF_INIT;
+ strbuf_addstr(data->base, data->pathname);
+ name = relative_path(data->base->buf, prefix, &sbuf);
+ quote_c_style(name, &quoted, NULL, 0);
+ strbuf_addbuf(sb, &quoted);
+ strbuf_release(&sbuf);
+ strbuf_release(&quoted);
+ } else {
+ errlen = (unsigned long)len;
+ die(_("bad ls-tree format: %%%.*s"), errlen, start);
+ }
+ return len;
+}
+
+static int show_recursive(const char *base, size_t baselen, const char *pathname)
{
int i;
@@ -43,7 +123,7 @@ static int show_recursive(const char *base, int baselen, const char *pathname)
for (i = 0; i < pathspec.nr; i++) {
const char *spec = pathspec.items[i].match;
- int len, speclen;
+ size_t len, speclen;
if (strncmp(base, spec, baselen))
continue;
@@ -61,69 +141,197 @@ static int show_recursive(const char *base, int baselen, const char *pathname)
return 0;
}
-static int show_tree(const struct object_id *oid, struct strbuf *base,
- const char *pathname, unsigned mode, void *context)
+static int show_tree_fmt(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode, void *context)
{
- int retval = 0;
- int baselen;
- const char *type = blob_type;
-
- if (S_ISGITLINK(mode)) {
- /*
- * Maybe we want to have some recursive version here?
- *
- * Something similar to this incomplete example:
- *
- if (show_subprojects(base, baselen, pathname))
- retval = READ_TREE_RECURSIVE;
- *
- */
- type = commit_type;
- } else if (S_ISDIR(mode)) {
- if (show_recursive(base->buf, base->len, pathname)) {
- retval = READ_TREE_RECURSIVE;
- if (!(ls_options & LS_SHOW_TREES))
- return retval;
- }
- type = tree_type;
- }
- else if (ls_options & LS_TREE_ONLY)
+ size_t baselen;
+ int recurse = 0;
+ struct strbuf sb = STRBUF_INIT;
+ enum object_type type = object_type(mode);
+
+ struct show_tree_data data = {
+ .mode = mode,
+ .type = type,
+ .oid = oid,
+ .pathname = pathname,
+ .base = base,
+ };
+
+ if (type == OBJ_TREE && show_recursive(base->buf, base->len, pathname))
+ recurse = READ_TREE_RECURSIVE;
+ if (type == OBJ_TREE && recurse && !(ls_options & LS_SHOW_TREES))
+ return recurse;
+ if (type == OBJ_BLOB && (ls_options & LS_TREE_ONLY))
return 0;
- if (!(ls_options & LS_NAME_ONLY)) {
- if (ls_options & LS_SHOW_SIZE) {
- char size_text[24];
- if (!strcmp(type, blob_type)) {
- unsigned long size;
- if (oid_object_info(the_repository, oid, &size) == OBJ_BAD)
- xsnprintf(size_text, sizeof(size_text),
- "BAD");
- else
- xsnprintf(size_text, sizeof(size_text),
- "%"PRIuMAX, (uintmax_t)size);
- } else
- xsnprintf(size_text, sizeof(size_text), "-");
- printf("%06o %s %s %7s\t", mode, type,
- find_unique_abbrev(oid, abbrev),
- size_text);
- } else
- printf("%06o %s %s\t", mode, type,
- find_unique_abbrev(oid, abbrev));
- }
baselen = base->len;
+ strbuf_expand(&sb, format, expand_show_tree, &data);
+ strbuf_addch(&sb, line_termination);
+ fwrite(sb.buf, sb.len, 1, stdout);
+ strbuf_release(&sb);
+ strbuf_setlen(base, baselen);
+ return recurse;
+}
+
+static int show_tree_common(struct show_tree_data *data, int *recurse,
+ const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode)
+{
+ enum object_type type = object_type(mode);
+ int ret = -1;
+
+ *recurse = 0;
+ data->mode = mode;
+ data->type = type;
+ data->oid = oid;
+ data->pathname = pathname;
+ data->base = base;
+
+ if (type == OBJ_BLOB) {
+ if (ls_options & LS_TREE_ONLY)
+ ret = 0;
+ } else if (type == OBJ_TREE &&
+ show_recursive(base->buf, base->len, pathname)) {
+ *recurse = READ_TREE_RECURSIVE;
+ if (!(ls_options & LS_SHOW_TREES))
+ ret = *recurse;
+ }
+
+ return ret;
+}
+
+static void show_tree_common_default_long(struct strbuf *base,
+ const char *pathname,
+ const size_t baselen)
+{
+ strbuf_addstr(base, pathname);
+ write_name_quoted_relative(base->buf,
+ chomp_prefix ? ls_tree_prefix : NULL, stdout,
+ line_termination);
+ strbuf_setlen(base, baselen);
+}
+
+static int show_tree_default(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode,
+ void *context)
+{
+ int early;
+ int recurse;
+ struct show_tree_data data = { 0 };
+
+ early = show_tree_common(&data, &recurse, oid, base, pathname, mode);
+ if (early >= 0)
+ return early;
+
+ printf("%06o %s %s\t", data.mode, type_name(data.type),
+ find_unique_abbrev(data.oid, abbrev));
+ show_tree_common_default_long(base, pathname, data.base->len);
+ return recurse;
+}
+
+static int show_tree_long(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode, void *context)
+{
+ int early;
+ int recurse;
+ struct show_tree_data data = { 0 };
+ char size_text[24];
+
+ early = show_tree_common(&data, &recurse, oid, base, pathname, mode);
+ if (early >= 0)
+ return early;
+
+ if (data.type == OBJ_BLOB) {
+ unsigned long size;
+ if (oid_object_info(the_repository, data.oid, &size) == OBJ_BAD)
+ xsnprintf(size_text, sizeof(size_text), "BAD");
+ else
+ xsnprintf(size_text, sizeof(size_text),
+ "%" PRIuMAX, (uintmax_t)size);
+ } else {
+ xsnprintf(size_text, sizeof(size_text), "-");
+ }
+
+ printf("%06o %s %s %7s\t", data.mode, type_name(data.type),
+ find_unique_abbrev(data.oid, abbrev), size_text);
+ show_tree_common_default_long(base, pathname, data.base->len);
+ return recurse;
+}
+
+static int show_tree_name_only(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode, void *context)
+{
+ int early;
+ int recurse;
+ const size_t baselen = base->len;
+ struct show_tree_data data = { 0 };
+
+ early = show_tree_common(&data, &recurse, oid, base, pathname, mode);
+ if (early >= 0)
+ return early;
+
strbuf_addstr(base, pathname);
write_name_quoted_relative(base->buf,
chomp_prefix ? ls_tree_prefix : NULL,
stdout, line_termination);
strbuf_setlen(base, baselen);
- return retval;
+ return recurse;
+}
+
+static int show_tree_object(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode, void *context)
+{
+ int early;
+ int recurse;
+ struct show_tree_data data = { 0 };
+
+ early = show_tree_common(&data, &recurse, oid, base, pathname, mode);
+ if (early >= 0)
+ return early;
+
+ printf("%s%c", find_unique_abbrev(oid, abbrev), line_termination);
+ return recurse;
}
+struct ls_tree_cmdmode_to_fmt {
+ enum ls_tree_cmdmode mode;
+ const char *const fmt;
+ read_tree_fn_t fn;
+};
+
+static struct ls_tree_cmdmode_to_fmt ls_tree_cmdmode_format[] = {
+ {
+ .mode = MODE_DEFAULT,
+ .fmt = "%(objectmode) %(objecttype) %(objectname)%x09%(path)",
+ .fn = show_tree_default,
+ },
+ {
+ .mode = MODE_LONG,
+ .fmt = "%(objectmode) %(objecttype) %(objectname) %(objectsize:padded)%x09%(path)",
+ .fn = show_tree_long,
+ },
+ {
+ .mode = MODE_NAME_ONLY, /* And MODE_NAME_STATUS */
+ .fmt = "%(path)",
+ .fn = show_tree_name_only,
+ },
+ {
+ .mode = MODE_OBJECT_ONLY,
+ .fmt = "%(objectname)",
+ .fn = show_tree_object
+ },
+ {
+ /* fallback */
+ .fn = show_tree_default,
+ },
+};
+
int cmd_ls_tree(int argc, const char **argv, const char *prefix)
{
struct object_id oid;
struct tree *tree;
int i, full_tree = 0;
+ read_tree_fn_t fn = NULL;
const struct option ls_tree_options[] = {
OPT_BIT('d', NULL, &ls_options, N_("only show trees"),
LS_TREE_ONLY),
@@ -133,20 +341,26 @@ int cmd_ls_tree(int argc, const char **argv, const char *prefix)
LS_SHOW_TREES),
OPT_SET_INT('z', NULL, &line_termination,
N_("terminate entries with NUL byte"), 0),
- OPT_BIT('l', "long", &ls_options, N_("include object size"),
- LS_SHOW_SIZE),
- OPT_BIT(0, "name-only", &ls_options, N_("list only filenames"),
- LS_NAME_ONLY),
- OPT_BIT(0, "name-status", &ls_options, N_("list only filenames"),
- LS_NAME_ONLY),
+ OPT_CMDMODE('l', "long", &cmdmode, N_("include object size"),
+ MODE_LONG),
+ OPT_CMDMODE(0, "name-only", &cmdmode, N_("list only filenames"),
+ MODE_NAME_ONLY),
+ OPT_CMDMODE(0, "name-status", &cmdmode, N_("list only filenames"),
+ MODE_NAME_STATUS),
+ OPT_CMDMODE(0, "object-only", &cmdmode, N_("list only objects"),
+ MODE_OBJECT_ONLY),
OPT_SET_INT(0, "full-name", &chomp_prefix,
N_("use full path names"), 0),
OPT_BOOL(0, "full-tree", &full_tree,
N_("list entire tree; not just current directory "
"(implies --full-name)")),
+ OPT_STRING_F(0, "format", &format, N_("format"),
+ N_("format to use for the output"),
+ PARSE_OPT_NONEG),
OPT__ABBREV(&abbrev),
OPT_END()
};
+ struct ls_tree_cmdmode_to_fmt *m2f = ls_tree_cmdmode_format;
git_config(git_default_config, NULL);
ls_tree_prefix = prefix;
@@ -159,11 +373,23 @@ int cmd_ls_tree(int argc, const char **argv, const char *prefix)
ls_tree_prefix = prefix = NULL;
chomp_prefix = 0;
}
+ /*
+ * We wanted to detect conflicts between --name-only and
+ * --name-status, but once we're done with that subsequent
+ * code should only need to check the primary name.
+ */
+ if (cmdmode == MODE_NAME_STATUS)
+ cmdmode = MODE_NAME_ONLY;
+
/* -d -r should imply -t, but -d by itself should not have to. */
if ( (LS_TREE_ONLY|LS_RECURSIVE) ==
((LS_TREE_ONLY|LS_RECURSIVE) & ls_options))
ls_options |= LS_SHOW_TREES;
+ if (format && cmdmode)
+ usage_msg_opt(
+ _("--format can't be combined with other format-altering options"),
+ ls_tree_usage, ls_tree_options);
if (argc < 1)
usage_with_options(ls_tree_usage, ls_tree_options);
if (get_oid(argv[0], &oid))
@@ -185,6 +411,24 @@ int cmd_ls_tree(int argc, const char **argv, const char *prefix)
tree = parse_tree_indirect(&oid);
if (!tree)
die("not a tree object");
- return !!read_tree(the_repository, tree,
- &pathspec, show_tree, NULL);
+ /*
+ * The generic show_tree_fmt() is slower than show_tree(), so
+ * take the fast path if possible.
+ */
+ while (m2f) {
+ if (!m2f->fmt) {
+ fn = format ? show_tree_fmt : show_tree_default;
+ } else if (format && !strcmp(format, m2f->fmt)) {
+ cmdmode = m2f->mode;
+ fn = m2f->fn;
+ } else if (!format && cmdmode == m2f->mode) {
+ fn = m2f->fn;
+ } else {
+ m2f++;
+ continue;
+ }
+ break;
+ }
+
+ return !!read_tree(the_repository, tree, &pathspec, fn, NULL);
}
diff --git a/builtin/mailsplit.c b/builtin/mailsplit.c
index 7baef30..73509f6 100644
--- a/builtin/mailsplit.c
+++ b/builtin/mailsplit.c
@@ -120,7 +120,7 @@ static int populate_maildir_list(struct string_list *list, const char *path)
for (sub = subs; *sub; ++sub) {
free(name);
name = xstrfmt("%s/%s", path, *sub);
- if ((dir = opendir(name)) == NULL) {
+ if (!(dir = opendir(name))) {
if (errno == ENOENT)
continue;
error_errno("cannot opendir %s", name);
@@ -223,6 +223,9 @@ static int split_mbox(const char *file, const char *dir, int allow_bare,
FILE *f = !strcmp(file, "-") ? stdin : fopen(file, "r");
int file_done = 0;
+ if (isatty(fileno(f)))
+ warning(_("reading patches from stdin/tty..."));
+
if (!f) {
error_errno("cannot open mbox %s", file);
goto out;
diff --git a/builtin/merge-file.c b/builtin/merge-file.c
index e695867..c923bbf 100644
--- a/builtin/merge-file.c
+++ b/builtin/merge-file.c
@@ -25,10 +25,10 @@ static int label_cb(const struct option *opt, const char *arg, int unset)
int cmd_merge_file(int argc, const char **argv, const char *prefix)
{
- const char *names[3] = { NULL, NULL, NULL };
- mmfile_t mmfs[3];
- mmbuffer_t result = {NULL, 0};
- xmparam_t xmp = {{0}};
+ const char *names[3] = { 0 };
+ mmfile_t mmfs[3] = { 0 };
+ mmbuffer_t result = { 0 };
+ xmparam_t xmp = { 0 };
int ret = 0, i = 0, to_stdout = 0;
int quiet = 0;
struct option options[] = {
@@ -71,21 +71,24 @@ int cmd_merge_file(int argc, const char **argv, const char *prefix)
for (i = 0; i < 3; i++) {
char *fname;
- int ret;
+ mmfile_t *mmf = mmfs + i;
if (!names[i])
names[i] = argv[i];
fname = prefix_filename(prefix, argv[i]);
- ret = read_mmfile(mmfs + i, fname);
+
+ if (read_mmfile(mmf, fname))
+ ret = -1;
+ else if (mmf->size > MAX_XDIFF_SIZE ||
+ buffer_is_binary(mmf->ptr, mmf->size))
+ ret = error("Cannot merge binary files: %s",
+ argv[i]);
+
free(fname);
if (ret)
- return -1;
+ goto cleanup;
- if (mmfs[i].size > MAX_XDIFF_SIZE ||
- buffer_is_binary(mmfs[i].ptr, mmfs[i].size))
- return error("Cannot merge binary files: %s",
- argv[i]);
}
xmp.ancestor = names[1];
@@ -93,9 +96,6 @@ int cmd_merge_file(int argc, const char **argv, const char *prefix)
xmp.file2 = names[2];
ret = xdl_merge(mmfs + 1, mmfs + 0, mmfs + 2, &xmp, &result);
- for (i = 0; i < 3; i++)
- free(mmfs[i].ptr);
-
if (ret >= 0) {
const char *filename = argv[0];
char *fpath = prefix_filename(prefix, argv[0]);
@@ -116,5 +116,9 @@ int cmd_merge_file(int argc, const char **argv, const char *prefix)
if (ret > 127)
ret = 127;
+cleanup:
+ for (i = 0; i < 3; i++)
+ free(mmfs[i].ptr);
+
return ret;
}
diff --git a/builtin/merge-tree.c b/builtin/merge-tree.c
index 5dc94d6..ae57829 100644
--- a/builtin/merge-tree.c
+++ b/builtin/merge-tree.c
@@ -2,13 +2,18 @@
#include "builtin.h"
#include "tree-walk.h"
#include "xdiff-interface.h"
+#include "help.h"
+#include "commit-reach.h"
+#include "merge-ort.h"
#include "object-store.h"
+#include "parse-options.h"
#include "repository.h"
#include "blob.h"
#include "exec-cmd.h"
#include "merge-blobs.h"
+#include "quote.h"
-static const char merge_tree_usage[] = "git merge-tree <base-tree> <branch1> <branch2>";
+static int line_termination = '\n';
struct merge_list {
struct merge_list *next;
@@ -28,7 +33,7 @@ static void add_merge_entry(struct merge_list *entry)
merge_result_end = &entry->next;
}
-static void merge_trees(struct tree_desc t[3], const char *base);
+static void trivial_merge_trees(struct tree_desc t[3], const char *base);
static const char *explanation(struct merge_list *entry)
{
@@ -225,7 +230,7 @@ static void unresolved_directory(const struct traverse_info *info,
buf2 = fill_tree_descriptor(r, t + 2, ENTRY_OID(n + 2));
#undef ENTRY_OID
- merge_trees(t, newbase);
+ trivial_merge_trees(t, newbase);
free(buf0);
free(buf1);
@@ -342,7 +347,7 @@ static int threeway_callback(int n, unsigned long mask, unsigned long dirmask, s
return mask;
}
-static void merge_trees(struct tree_desc t[3], const char *base)
+static void trivial_merge_trees(struct tree_desc t[3], const char *base)
{
struct traverse_info info;
@@ -366,19 +371,18 @@ static void *get_tree_descriptor(struct repository *r,
return buf;
}
-int cmd_merge_tree(int argc, const char **argv, const char *prefix)
+static int trivial_merge(const char *base,
+ const char *branch1,
+ const char *branch2)
{
struct repository *r = the_repository;
struct tree_desc t[3];
void *buf1, *buf2, *buf3;
- if (argc != 4)
- usage(merge_tree_usage);
-
- buf1 = get_tree_descriptor(r, t+0, argv[1]);
- buf2 = get_tree_descriptor(r, t+1, argv[2]);
- buf3 = get_tree_descriptor(r, t+2, argv[3]);
- merge_trees(t, "");
+ buf1 = get_tree_descriptor(r, t+0, base);
+ buf2 = get_tree_descriptor(r, t+1, branch1);
+ buf3 = get_tree_descriptor(r, t+2, branch2);
+ trivial_merge_trees(t, "");
free(buf1);
free(buf2);
free(buf3);
@@ -386,3 +390,162 @@ int cmd_merge_tree(int argc, const char **argv, const char *prefix)
show_result();
return 0;
}
+
+enum mode {
+ MODE_UNKNOWN,
+ MODE_TRIVIAL,
+ MODE_REAL,
+};
+
+struct merge_tree_options {
+ int mode;
+ int allow_unrelated_histories;
+ int show_messages;
+ int name_only;
+};
+
+static int real_merge(struct merge_tree_options *o,
+ const char *branch1, const char *branch2,
+ const char *prefix)
+{
+ struct commit *parent1, *parent2;
+ struct commit_list *merge_bases = NULL;
+ struct merge_options opt;
+ struct merge_result result = { 0 };
+
+ parent1 = get_merge_parent(branch1);
+ if (!parent1)
+ help_unknown_ref(branch1, "merge-tree",
+ _("not something we can merge"));
+
+ parent2 = get_merge_parent(branch2);
+ if (!parent2)
+ help_unknown_ref(branch2, "merge-tree",
+ _("not something we can merge"));
+
+ init_merge_options(&opt, the_repository);
+
+ opt.show_rename_progress = 0;
+
+ opt.branch1 = branch1;
+ opt.branch2 = branch2;
+
+ /*
+ * Get the merge bases, in reverse order; see comment above
+ * merge_incore_recursive in merge-ort.h
+ */
+ merge_bases = get_merge_bases(parent1, parent2);
+ if (!merge_bases && !o->allow_unrelated_histories)
+ die(_("refusing to merge unrelated histories"));
+ merge_bases = reverse_commit_list(merge_bases);
+
+ merge_incore_recursive(&opt, merge_bases, parent1, parent2, &result);
+ if (result.clean < 0)
+ die(_("failure to merge"));
+
+ if (o->show_messages == -1)
+ o->show_messages = !result.clean;
+
+ printf("%s%c", oid_to_hex(&result.tree->object.oid), line_termination);
+ if (!result.clean) {
+ struct string_list conflicted_files = STRING_LIST_INIT_NODUP;
+ const char *last = NULL;
+ int i;
+
+ merge_get_conflicted_files(&result, &conflicted_files);
+ for (i = 0; i < conflicted_files.nr; i++) {
+ const char *name = conflicted_files.items[i].string;
+ struct stage_info *c = conflicted_files.items[i].util;
+ if (!o->name_only)
+ printf("%06o %s %d\t",
+ c->mode, oid_to_hex(&c->oid), c->stage);
+ else if (last && !strcmp(last, name))
+ continue;
+ write_name_quoted_relative(
+ name, prefix, stdout, line_termination);
+ last = name;
+ }
+ string_list_clear(&conflicted_files, 1);
+ }
+ if (o->show_messages) {
+ putchar(line_termination);
+ merge_display_update_messages(&opt, line_termination == '\0',
+ &result);
+ }
+ merge_finalize(&opt, &result);
+ return !result.clean; /* result.clean < 0 handled above */
+}
+
+int cmd_merge_tree(int argc, const char **argv, const char *prefix)
+{
+ struct merge_tree_options o = { .show_messages = -1 };
+ int expected_remaining_argc;
+ int original_argc;
+
+ const char * const merge_tree_usage[] = {
+ N_("git merge-tree [--write-tree] [<options>] <branch1> <branch2>"),
+ N_("git merge-tree [--trivial-merge] <base-tree> <branch1> <branch2>"),
+ NULL
+ };
+ struct option mt_options[] = {
+ OPT_CMDMODE(0, "write-tree", &o.mode,
+ N_("do a real merge instead of a trivial merge"),
+ MODE_REAL),
+ OPT_CMDMODE(0, "trivial-merge", &o.mode,
+ N_("do a trivial merge only"), MODE_TRIVIAL),
+ OPT_BOOL(0, "messages", &o.show_messages,
+ N_("also show informational/conflict messages")),
+ OPT_SET_INT('z', NULL, &line_termination,
+ N_("separate paths with the NUL character"), '\0'),
+ OPT_BOOL_F(0, "name-only",
+ &o.name_only,
+ N_("list filenames without modes/oids/stages"),
+ PARSE_OPT_NONEG),
+ OPT_BOOL_F(0, "allow-unrelated-histories",
+ &o.allow_unrelated_histories,
+ N_("allow merging unrelated histories"),
+ PARSE_OPT_NONEG),
+ OPT_END()
+ };
+
+ /* Parse arguments */
+ original_argc = argc - 1; /* ignoring argv[0] */
+ argc = parse_options(argc, argv, prefix, mt_options,
+ merge_tree_usage, PARSE_OPT_STOP_AT_NON_OPTION);
+ switch (o.mode) {
+ default:
+ BUG("unexpected command mode %d", o.mode);
+ case MODE_UNKNOWN:
+ switch (argc) {
+ default:
+ usage_with_options(merge_tree_usage, mt_options);
+ case 2:
+ o.mode = MODE_REAL;
+ break;
+ case 3:
+ o.mode = MODE_TRIVIAL;
+ break;
+ }
+ expected_remaining_argc = argc;
+ break;
+ case MODE_REAL:
+ expected_remaining_argc = 2;
+ break;
+ case MODE_TRIVIAL:
+ expected_remaining_argc = 3;
+ /* Removal of `--trivial-merge` is expected */
+ original_argc--;
+ break;
+ }
+ if (o.mode == MODE_TRIVIAL && argc < original_argc)
+ die(_("--trivial-merge is incompatible with all other options"));
+
+ if (argc != expected_remaining_argc)
+ usage_with_options(merge_tree_usage, mt_options);
+
+ /* Do the relevant type of merge */
+ if (o.mode == MODE_REAL)
+ return real_merge(&o, argv[0], argv[1], prefix);
+ else
+ return trivial_merge(argv[0], argv[1], argv[2]);
+}
diff --git a/builtin/merge.c b/builtin/merge.c
index f178f5a..23170f2 100644
--- a/builtin/merge.c
+++ b/builtin/merge.c
@@ -375,7 +375,6 @@ static void reset_hard(const struct object_id *oid, int verbose)
static void restore_state(const struct object_id *head,
const struct object_id *stash)
{
- struct strbuf sb = STRBUF_INIT;
const char *args[] = { "stash", "apply", NULL, NULL };
if (is_null_oid(stash))
@@ -391,7 +390,6 @@ static void restore_state(const struct object_id *head,
*/
run_command_v_opt(args, RUN_GIT_CMD);
- strbuf_release(&sb);
refresh_cache(REFRESH_QUIET);
}
@@ -443,6 +441,7 @@ static void squash_message(struct commit *commit, struct commit_list *remotehead
}
write_file_buf(git_path_squash_msg(the_repository), out.buf, out.len);
strbuf_release(&out);
+ release_revisions(&rev);
}
static void finish(struct commit *head_commit,
@@ -501,7 +500,6 @@ static void merge_name(const char *remote, struct strbuf *msg)
{
struct commit *remote_head;
struct object_id branch_head;
- struct strbuf buf = STRBUF_INIT;
struct strbuf bname = STRBUF_INIT;
struct merge_remote_desc *desc;
const char *ptr;
@@ -589,7 +587,6 @@ static void merge_name(const char *remote, struct strbuf *msg)
oid_to_hex(&remote_head->object.oid), remote);
cleanup:
free(found_ref);
- strbuf_release(&buf);
strbuf_release(&bname);
}
@@ -998,6 +995,7 @@ static int evaluate_result(void)
*/
cnt += count_unmerged_entries();
+ release_revisions(&rev);
return cnt;
}
diff --git a/builtin/mktree.c b/builtin/mktree.c
index 902edba..06d8140 100644
--- a/builtin/mktree.c
+++ b/builtin/mktree.c
@@ -74,6 +74,7 @@ static void mktree_line(char *buf, int nul_term_line, int allow_missing)
unsigned mode;
enum object_type mode_type; /* object type derived from mode */
enum object_type obj_type; /* object type derived from sha */
+ struct object_info oi = OBJECT_INFO_INIT;
char *path, *to_free = NULL;
struct object_id oid;
@@ -116,8 +117,14 @@ static void mktree_line(char *buf, int nul_term_line, int allow_missing)
path, ptr, type_name(mode_type));
}
- /* Check the type of object identified by sha1 */
- obj_type = oid_object_info(the_repository, &oid, NULL);
+ /* Check the type of object identified by oid without fetching objects */
+ oi.typep = &obj_type;
+ if (oid_object_info_extended(the_repository, &oid, &oi,
+ OBJECT_INFO_LOOKUP_REPLACE |
+ OBJECT_INFO_QUICK |
+ OBJECT_INFO_SKIP_FETCH_OBJECT) < 0)
+ obj_type = -1;
+
if (obj_type < 0) {
if (allow_missing) {
; /* no problem - missing objects are presumed to be of the right type */
diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c
index 4480ba3..8f24d59 100644
--- a/builtin/multi-pack-index.c
+++ b/builtin/multi-pack-index.c
@@ -44,7 +44,7 @@ static char const * const builtin_multi_pack_index_usage[] = {
};
static struct opts_multi_pack_index {
- const char *object_dir;
+ char *object_dir;
const char *preferred_pack;
const char *refs_snapshot;
unsigned long batch_size;
@@ -52,9 +52,23 @@ static struct opts_multi_pack_index {
int stdin_packs;
} opts;
+
+static int parse_object_dir(const struct option *opt, const char *arg,
+ int unset)
+{
+ free(opts.object_dir);
+ if (unset)
+ opts.object_dir = xstrdup(get_object_directory());
+ else
+ opts.object_dir = real_pathdup(arg, 1);
+ return 0;
+}
+
static struct option common_opts[] = {
- OPT_FILENAME(0, "object-dir", &opts.object_dir,
- N_("object directory containing set of packfile and pack-index pairs")),
+ OPT_CALLBACK(0, "object-dir", &opts.object_dir,
+ N_("directory"),
+ N_("object directory containing set of packfile and pack-index pairs"),
+ parse_object_dir),
OPT_END(),
};
@@ -120,7 +134,7 @@ static int cmd_multi_pack_index_write(int argc, const char **argv)
opts.flags |= MIDX_PROGRESS;
argc = parse_options(argc, argv, NULL,
options, builtin_multi_pack_index_write_usage,
- PARSE_OPT_KEEP_UNKNOWN);
+ 0);
if (argc)
usage_with_options(builtin_multi_pack_index_write_usage,
options);
@@ -162,7 +176,7 @@ static int cmd_multi_pack_index_verify(int argc, const char **argv)
opts.flags |= MIDX_PROGRESS;
argc = parse_options(argc, argv, NULL,
options, builtin_multi_pack_index_verify_usage,
- PARSE_OPT_KEEP_UNKNOWN);
+ 0);
if (argc)
usage_with_options(builtin_multi_pack_index_verify_usage,
options);
@@ -188,7 +202,7 @@ static int cmd_multi_pack_index_expire(int argc, const char **argv)
opts.flags |= MIDX_PROGRESS;
argc = parse_options(argc, argv, NULL,
options, builtin_multi_pack_index_expire_usage,
- PARSE_OPT_KEEP_UNKNOWN);
+ 0);
if (argc)
usage_with_options(builtin_multi_pack_index_expire_usage,
options);
@@ -218,7 +232,7 @@ static int cmd_multi_pack_index_repack(int argc, const char **argv)
argc = parse_options(argc, argv, NULL,
options,
builtin_multi_pack_index_repack_usage,
- PARSE_OPT_KEEP_UNKNOWN);
+ 0);
if (argc)
usage_with_options(builtin_multi_pack_index_repack_usage,
options);
@@ -232,31 +246,40 @@ static int cmd_multi_pack_index_repack(int argc, const char **argv)
int cmd_multi_pack_index(int argc, const char **argv,
const char *prefix)
{
+ int res;
struct option *builtin_multi_pack_index_options = common_opts;
git_config(git_default_config, NULL);
+ if (the_repository &&
+ the_repository->objects &&
+ the_repository->objects->odb)
+ opts.object_dir = xstrdup(the_repository->objects->odb->path);
+
argc = parse_options(argc, argv, prefix,
builtin_multi_pack_index_options,
builtin_multi_pack_index_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- if (!opts.object_dir)
- opts.object_dir = get_object_directory();
-
if (!argc)
goto usage;
if (!strcmp(argv[0], "repack"))
- return cmd_multi_pack_index_repack(argc, argv);
+ res = cmd_multi_pack_index_repack(argc, argv);
else if (!strcmp(argv[0], "write"))
- return cmd_multi_pack_index_write(argc, argv);
+ res = cmd_multi_pack_index_write(argc, argv);
else if (!strcmp(argv[0], "verify"))
- return cmd_multi_pack_index_verify(argc, argv);
+ res = cmd_multi_pack_index_verify(argc, argv);
else if (!strcmp(argv[0], "expire"))
- return cmd_multi_pack_index_expire(argc, argv);
+ res = cmd_multi_pack_index_expire(argc, argv);
+ else {
+ error(_("unrecognized subcommand: %s"), argv[0]);
+ goto usage;
+ }
+
+ free(opts.object_dir);
+ return res;
- error(_("unrecognized subcommand: %s"), argv[0]);
usage:
usage_with_options(builtin_multi_pack_index_usage,
builtin_multi_pack_index_options);
diff --git a/builtin/mv.c b/builtin/mv.c
index 83a465b..4729bb1 100644
--- a/builtin/mv.c
+++ b/builtin/mv.c
@@ -13,12 +13,21 @@
#include "string-list.h"
#include "parse-options.h"
#include "submodule.h"
+#include "entry.h"
static const char * const builtin_mv_usage[] = {
N_("git mv [<options>] <source>... <destination>"),
NULL
};
+enum update_mode {
+ BOTH = 0,
+ WORKING_DIRECTORY = (1 << 1),
+ INDEX = (1 << 2),
+ SPARSE = (1 << 3),
+ SKIP_WORKTREE_DIR = (1 << 4),
+};
+
#define DUP_BASENAME 1
#define KEEP_TRAILING_SLASH 2
@@ -115,6 +124,36 @@ static int index_range_of_same_dir(const char *src, int length,
return last - first;
}
+/*
+ * Check if an out-of-cone directory should be in the index. Imagine this case
+ * that all the files under a directory are marked with 'CE_SKIP_WORKTREE' bit
+ * and thus the directory is sparsified.
+ *
+ * Return 0 if such directory exist (i.e. with any of its contained files not
+ * marked with CE_SKIP_WORKTREE, the directory would be present in working tree).
+ * Return 1 otherwise.
+ */
+static int check_dir_in_index(const char *name)
+{
+ const char *with_slash = add_slash(name);
+ int length = strlen(with_slash);
+
+ int pos = cache_name_pos(with_slash, length);
+ const struct cache_entry *ce;
+
+ if (pos < 0) {
+ pos = -pos - 1;
+ if (pos >= the_index.cache_nr)
+ return 1;
+ ce = active_cache[pos];
+ if (strncmp(with_slash, ce->name, length))
+ return 1;
+ if (ce_skip_worktree(ce))
+ return 0;
+ }
+ return 1;
+}
+
int cmd_mv(int argc, const char **argv, const char *prefix)
{
int i, flags, gitmodules_modified = 0;
@@ -129,7 +168,7 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
OPT_END(),
};
const char **source, **destination, **dest_path, **submodule_gitfile;
- enum update_mode { BOTH = 0, WORKING_DIRECTORY, INDEX, SPARSE } *modes;
+ enum update_mode *modes;
struct stat st;
struct string_list src_for_dst = STRING_LIST_INIT_NODUP;
struct lock_file lock_file = LOCK_INIT;
@@ -148,7 +187,8 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
die(_("index file corrupt"));
source = internal_prefix_pathspec(prefix, argv, argc, 0);
- modes = xcalloc(argc, sizeof(enum update_mode));
+ CALLOC_ARRAY(modes, argc);
+
/*
* Keep trailing slash, needed to let
* "git mv file no-such-dir/" error out, except in the case
@@ -176,7 +216,7 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
/* Checking */
for (i = 0; i < argc; i++) {
const char *src = source[i], *dst = destination[i];
- int length, src_is_dir;
+ int length;
const char *bad = NULL;
int skip_sparse = 0;
@@ -185,54 +225,103 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
length = strlen(src);
if (lstat(src, &st) < 0) {
- /* only error if existence is expected. */
- if (modes[i] != SPARSE)
+ int pos;
+ const struct cache_entry *ce;
+
+ pos = cache_name_pos(src, length);
+ if (pos < 0) {
+ const char *src_w_slash = add_slash(src);
+ if (!path_in_sparse_checkout(src_w_slash, &the_index) &&
+ !check_dir_in_index(src)) {
+ modes[i] |= SKIP_WORKTREE_DIR;
+ goto dir_check;
+ }
+ /* only error if existence is expected. */
+ if (!(modes[i] & SPARSE))
+ bad = _("bad source");
+ goto act_on_entry;
+ }
+ ce = active_cache[pos];
+ if (!ce_skip_worktree(ce)) {
bad = _("bad source");
- } else if (!strncmp(src, dst, length) &&
- (dst[length] == 0 || dst[length] == '/')) {
+ goto act_on_entry;
+ }
+ if (!ignore_sparse) {
+ string_list_append(&only_match_skip_worktree, src);
+ goto act_on_entry;
+ }
+ /* Check if dst exists in index */
+ if (cache_name_pos(dst, strlen(dst)) < 0) {
+ modes[i] |= SPARSE;
+ goto act_on_entry;
+ }
+ if (!force) {
+ bad = _("destination exists");
+ goto act_on_entry;
+ }
+ modes[i] |= SPARSE;
+ goto act_on_entry;
+ }
+ if (!strncmp(src, dst, length) &&
+ (dst[length] == 0 || dst[length] == '/')) {
bad = _("can not move directory into itself");
- } else if ((src_is_dir = S_ISDIR(st.st_mode))
- && lstat(dst, &st) == 0)
+ goto act_on_entry;
+ }
+ if (S_ISDIR(st.st_mode)
+ && lstat(dst, &st) == 0) {
bad = _("cannot move directory over file");
- else if (src_is_dir) {
+ goto act_on_entry;
+ }
+
+dir_check:
+ if (S_ISDIR(st.st_mode)) {
+ int j, dst_len, n;
int first = cache_name_pos(src, length), last;
- if (first >= 0)
+ if (first >= 0) {
prepare_move_submodule(src, first,
submodule_gitfile + i);
- else if (index_range_of_same_dir(src, length,
- &first, &last) < 1)
+ goto act_on_entry;
+ } else if (index_range_of_same_dir(src, length,
+ &first, &last) < 1) {
bad = _("source directory is empty");
- else { /* last - first >= 1 */
- int j, dst_len, n;
-
- modes[i] = WORKING_DIRECTORY;
- n = argc + last - first;
- REALLOC_ARRAY(source, n);
- REALLOC_ARRAY(destination, n);
- REALLOC_ARRAY(modes, n);
- REALLOC_ARRAY(submodule_gitfile, n);
-
- dst = add_slash(dst);
- dst_len = strlen(dst);
-
- for (j = 0; j < last - first; j++) {
- const struct cache_entry *ce = active_cache[first + j];
- const char *path = ce->name;
- source[argc + j] = path;
- destination[argc + j] =
- prefix_path(dst, dst_len, path + length + 1);
- modes[argc + j] = ce_skip_worktree(ce) ? SPARSE : INDEX;
- submodule_gitfile[argc + j] = NULL;
- }
- argc += last - first;
+ goto act_on_entry;
}
- } else if (!(ce = cache_file_exists(src, length, 0))) {
+
+ /* last - first >= 1 */
+ modes[i] |= WORKING_DIRECTORY;
+ n = argc + last - first;
+ REALLOC_ARRAY(source, n);
+ REALLOC_ARRAY(destination, n);
+ REALLOC_ARRAY(modes, n);
+ REALLOC_ARRAY(submodule_gitfile, n);
+
+ dst = add_slash(dst);
+ dst_len = strlen(dst);
+
+ for (j = 0; j < last - first; j++) {
+ const struct cache_entry *ce = active_cache[first + j];
+ const char *path = ce->name;
+ source[argc + j] = path;
+ destination[argc + j] =
+ prefix_path(dst, dst_len, path + length + 1);
+ memset(modes + argc + j, 0, sizeof(enum update_mode));
+ modes[argc + j] |= ce_skip_worktree(ce) ? SPARSE : INDEX;
+ submodule_gitfile[argc + j] = NULL;
+ }
+ argc += last - first;
+ goto act_on_entry;
+ }
+ if (!(ce = cache_file_exists(src, length, 0))) {
bad = _("not under version control");
- } else if (ce_stage(ce)) {
+ goto act_on_entry;
+ }
+ if (ce_stage(ce)) {
bad = _("conflicted");
- } else if (lstat(dst, &st) == 0 &&
- (!ignore_case || strcasecmp(src, dst))) {
+ goto act_on_entry;
+ }
+ if (lstat(dst, &st) == 0 &&
+ (!ignore_case || strcasecmp(src, dst))) {
bad = _("destination exists");
if (force) {
/*
@@ -246,34 +335,40 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
} else
bad = _("Cannot overwrite");
}
- } else if (string_list_has_string(&src_for_dst, dst))
+ goto act_on_entry;
+ }
+ if (string_list_has_string(&src_for_dst, dst)) {
bad = _("multiple sources for the same target");
- else if (is_dir_sep(dst[strlen(dst) - 1]))
+ goto act_on_entry;
+ }
+ if (is_dir_sep(dst[strlen(dst) - 1])) {
bad = _("destination directory does not exist");
- else {
- /*
- * We check if the paths are in the sparse-checkout
- * definition as a very final check, since that
- * allows us to point the user to the --sparse
- * option as a way to have a successful run.
- */
- if (!ignore_sparse &&
- !path_in_sparse_checkout(src, &the_index)) {
- string_list_append(&only_match_skip_worktree, src);
- skip_sparse = 1;
- }
- if (!ignore_sparse &&
- !path_in_sparse_checkout(dst, &the_index)) {
- string_list_append(&only_match_skip_worktree, dst);
- skip_sparse = 1;
- }
-
- if (skip_sparse)
- goto remove_entry;
+ goto act_on_entry;
+ }
- string_list_insert(&src_for_dst, dst);
+ /*
+ * We check if the paths are in the sparse-checkout
+ * definition as a very final check, since that
+ * allows us to point the user to the --sparse
+ * option as a way to have a successful run.
+ */
+ if (!ignore_sparse &&
+ !path_in_sparse_checkout(src, &the_index)) {
+ string_list_append(&only_match_skip_worktree, src);
+ skip_sparse = 1;
}
+ if (!ignore_sparse &&
+ !path_in_sparse_checkout(dst, &the_index)) {
+ string_list_append(&only_match_skip_worktree, dst);
+ skip_sparse = 1;
+ }
+
+ if (skip_sparse)
+ goto remove_entry;
+
+ string_list_insert(&src_for_dst, dst);
+act_on_entry:
if (!bad)
continue;
if (!ignore_errors)
@@ -282,14 +377,11 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
remove_entry:
if (--argc > 0) {
int n = argc - i;
- memmove(source + i, source + i + 1,
- n * sizeof(char *));
- memmove(destination + i, destination + i + 1,
- n * sizeof(char *));
- memmove(modes + i, modes + i + 1,
- n * sizeof(enum update_mode));
- memmove(submodule_gitfile + i, submodule_gitfile + i + 1,
- n * sizeof(char *));
+ MOVE_ARRAY(source + i, source + i + 1, n);
+ MOVE_ARRAY(destination + i, destination + i + 1, n);
+ MOVE_ARRAY(modes + i, modes + i + 1, n);
+ MOVE_ARRAY(submodule_gitfile + i,
+ submodule_gitfile + i + 1, n);
i--;
}
}
@@ -304,11 +396,17 @@ remove_entry:
const char *src = source[i], *dst = destination[i];
enum update_mode mode = modes[i];
int pos;
+ struct checkout state = CHECKOUT_INIT;
+ state.istate = &the_index;
+
+ if (force)
+ state.force = 1;
if (show_only || verbose)
printf(_("Renaming %s to %s\n"), src, dst);
if (show_only)
continue;
- if (mode != INDEX && mode != SPARSE && rename(src, dst) < 0) {
+ if (!(mode & (INDEX | SPARSE | SKIP_WORKTREE_DIR)) &&
+ rename(src, dst) < 0) {
if (ignore_errors)
continue;
die_errno(_("renaming '%s' failed"), src);
@@ -322,12 +420,23 @@ remove_entry:
1);
}
- if (mode == WORKING_DIRECTORY)
+ if (mode & (WORKING_DIRECTORY | SKIP_WORKTREE_DIR))
continue;
pos = cache_name_pos(src, strlen(src));
assert(pos >= 0);
rename_cache_entry_at(pos, dst);
+
+ if ((mode & SPARSE) &&
+ (path_in_sparse_checkout(dst, &the_index))) {
+ int dst_pos;
+
+ dst_pos = cache_name_pos(dst, strlen(dst));
+ active_cache[dst_pos]->ce_flags &= ~CE_SKIP_WORKTREE;
+
+ if (checkout_entry(active_cache[dst_pos], &state, NULL, NULL))
+ die(_("cannot checkout %s"), active_cache[dst_pos]->name);
+ }
}
if (gitmodules_modified)
diff --git a/builtin/name-rev.c b/builtin/name-rev.c
index c59b569..580b1eb 100644
--- a/builtin/name-rev.c
+++ b/builtin/name-rev.c
@@ -18,7 +18,7 @@
#define CUTOFF_DATE_SLOP 86400
struct rev_name {
- char *tip_name;
+ const char *tip_name;
timestamp_t taggerdate;
int generation;
int distance;
@@ -84,7 +84,7 @@ static int commit_is_before_cutoff(struct commit *commit)
static int is_valid_rev_name(const struct rev_name *name)
{
- return name && (name->generation || name->tip_name);
+ return name && name->tip_name;
}
static struct rev_name *get_commit_rev_name(const struct commit *commit)
@@ -146,20 +146,9 @@ static struct rev_name *create_or_update_name(struct commit *commit,
{
struct rev_name *name = commit_rev_name_at(&rev_names, commit);
- if (is_valid_rev_name(name)) {
- if (!is_better_name(name, taggerdate, generation, distance, from_tag))
- return NULL;
-
- /*
- * This string might still be shared with ancestors
- * (generation > 0). We can release it here regardless,
- * because the new name that has just won will be better
- * for them as well, so name_rev() will replace these
- * stale pointers when it processes the parents.
- */
- if (!name->generation)
- free(name->tip_name);
- }
+ if (is_valid_rev_name(name) &&
+ !is_better_name(name, taggerdate, generation, distance, from_tag))
+ return NULL;
name->taggerdate = taggerdate;
name->generation = generation;
@@ -588,7 +577,7 @@ int cmd_name_rev(int argc, const char **argv, const char *prefix)
N_("ignore refs matching <pattern>")),
OPT_GROUP(""),
OPT_BOOL(0, "all", &all, N_("list all commits reachable from all refs")),
- OPT_BOOL(0, "stdin", &transform_stdin, N_("deprecated: use annotate-stdin instead")),
+ OPT_BOOL(0, "stdin", &transform_stdin, N_("deprecated: use --annotate-stdin instead")),
OPT_BOOL(0, "annotate-stdin", &annotate_stdin, N_("annotate text from stdin")),
OPT_BOOL(0, "undefined", &allow_undefined, N_("allow to print `undefined` names (default)")),
OPT_BOOL(0, "always", &always,
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index c23b788..39e28cf 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -36,6 +36,7 @@
#include "trace2.h"
#include "shallow.h"
#include "promisor-remote.h"
+#include "pack-mtimes.h"
/*
* Objects we are going to pack are collected in the `to_pack` structure.
@@ -194,6 +195,8 @@ static int reuse_delta = 1, reuse_object = 1;
static int keep_unreachable, unpack_unreachable, include_tag;
static timestamp_t unpack_unreachable_expiration;
static int pack_loose_unreachable;
+static int cruft;
+static timestamp_t cruft_expiration;
static int local;
static int have_non_local_packs;
static int incremental;
@@ -237,8 +240,6 @@ static unsigned long cache_max_small_delta_size = 1000;
static unsigned long window_memory_limit = 0;
-static struct list_objects_filter_options filter_options;
-
static struct string_list uri_protocols = STRING_LIST_INIT_NODUP;
enum missing_action {
@@ -1262,9 +1263,13 @@ static void write_pack_file(void)
&to_pack, written_list, nr_written);
}
+ if (cruft)
+ pack_idx_opts.flags |= WRITE_MTIMES;
+
stage_tmp_packfiles(&tmpname, pack_tmp_name,
written_list, nr_written,
- &pack_idx_opts, hash, &idx_tmp_name);
+ &to_pack, &pack_idx_opts, hash,
+ &idx_tmp_name);
if (write_bitmap_index) {
size_t tmpname_len = tmpname.len;
@@ -1359,6 +1364,9 @@ static int want_found_object(const struct object_id *oid, int exclude,
if (incremental)
return 0;
+ if (!is_pack_valid(p))
+ return -1;
+
/*
* When asked to do --local (do not include an object that appears in a
* pack we borrow from elsewhere) or --honor-pack-keep (do not include
@@ -1474,6 +1482,9 @@ static int want_object_in_pack(const struct object_id *oid,
want = want_found_object(oid, exclude, *found_pack);
if (want != -1)
return want;
+
+ *found_pack = NULL;
+ *found_offset = 0;
}
for (m = get_multi_pack_index(the_repository); m; m = m->next) {
@@ -1517,13 +1528,13 @@ static int want_object_in_pack(const struct object_id *oid,
return 1;
}
-static void create_object_entry(const struct object_id *oid,
- enum object_type type,
- uint32_t hash,
- int exclude,
- int no_try_delta,
- struct packed_git *found_pack,
- off_t found_offset)
+static struct object_entry *create_object_entry(const struct object_id *oid,
+ enum object_type type,
+ uint32_t hash,
+ int exclude,
+ int no_try_delta,
+ struct packed_git *found_pack,
+ off_t found_offset)
{
struct object_entry *entry;
@@ -1540,6 +1551,8 @@ static void create_object_entry(const struct object_id *oid,
}
entry->no_try_delta = no_try_delta;
+
+ return entry;
}
static const char no_closure_warning[] = N_(
@@ -3157,7 +3170,7 @@ static int git_pack_config(const char *k, const char *v, void *cb)
if (!strcmp(k, "pack.indexversion")) {
pack_idx_opts.version = git_config_int(k, v);
if (pack_idx_opts.version > 2)
- die(_("bad pack.indexversion=%"PRIu32),
+ die(_("bad pack.indexVersion=%"PRIu32),
pack_idx_opts.version);
return 0;
}
@@ -3203,10 +3216,8 @@ static int add_object_entry_from_pack(const struct object_id *oid,
uint32_t pos,
void *_data)
{
- struct rev_info *revs = _data;
- struct object_info oi = OBJECT_INFO_INIT;
off_t ofs;
- enum object_type type;
+ enum object_type type = OBJ_NONE;
display_progress(progress_state, ++nr_seen);
@@ -3217,19 +3228,24 @@ static int add_object_entry_from_pack(const struct object_id *oid,
if (!want_object_in_pack(oid, 0, &p, &ofs))
return 0;
- oi.typep = &type;
- if (packed_object_info(the_repository, p, ofs, &oi) < 0)
- die(_("could not get type of object %s in pack %s"),
- oid_to_hex(oid), p->pack_name);
- else if (type == OBJ_COMMIT) {
- /*
- * commits in included packs are used as starting points for the
- * subsequent revision walk
- */
- add_pending_oid(revs, NULL, oid, 0);
- }
+ if (p) {
+ struct rev_info *revs = _data;
+ struct object_info oi = OBJECT_INFO_INIT;
+
+ oi.typep = &type;
+ if (packed_object_info(the_repository, p, ofs, &oi) < 0) {
+ die(_("could not get type of object %s in pack %s"),
+ oid_to_hex(oid), p->pack_name);
+ } else if (type == OBJ_COMMIT) {
+ /*
+ * commits in included packs are used as starting points for the
+ * subsequent revision walk
+ */
+ add_pending_oid(revs, NULL, oid, 0);
+ }
- stdin_packs_found_nr++;
+ stdin_packs_found_nr++;
+ }
create_object_entry(oid, type, 0, 0, 0, p, ofs);
@@ -3348,6 +3364,8 @@ static void read_packs_list_from_stdin(void)
struct packed_git *p = item->util;
if (!p)
die(_("could not find pack '%s'"), item->string);
+ if (!is_pack_valid(p))
+ die(_("packfile %s cannot be accessed"), p->pack_name);
}
/*
@@ -3371,8 +3389,6 @@ static void read_packs_list_from_stdin(void)
for_each_string_list_item(item, &include_packs) {
struct packed_git *p = item->util;
- if (!p)
- die(_("could not find pack '%s'"), item->string);
for_each_object_in_pack(p,
add_object_entry_from_pack,
&revs,
@@ -3396,6 +3412,217 @@ static void read_packs_list_from_stdin(void)
string_list_clear(&exclude_packs, 0);
}
+static void add_cruft_object_entry(const struct object_id *oid, enum object_type type,
+ struct packed_git *pack, off_t offset,
+ const char *name, uint32_t mtime)
+{
+ struct object_entry *entry;
+
+ display_progress(progress_state, ++nr_seen);
+
+ entry = packlist_find(&to_pack, oid);
+ if (entry) {
+ if (name) {
+ entry->hash = pack_name_hash(name);
+ entry->no_try_delta = no_try_delta(name);
+ }
+ } else {
+ if (!want_object_in_pack(oid, 0, &pack, &offset))
+ return;
+ if (!pack && type == OBJ_BLOB && !has_loose_object(oid)) {
+ /*
+ * If a traversed tree has a missing blob then we want
+ * to avoid adding that missing object to our pack.
+ *
+ * This only applies to missing blobs, not trees,
+ * because the traversal needs to parse sub-trees but
+ * not blobs.
+ *
+ * Note we only perform this check when we couldn't
+ * already find the object in a pack, so we're really
+ * limited to "ensure non-tip blobs which don't exist in
+ * packs do exist via loose objects". Confused?
+ */
+ return;
+ }
+
+ entry = create_object_entry(oid, type, pack_name_hash(name),
+ 0, name && no_try_delta(name),
+ pack, offset);
+ }
+
+ if (mtime > oe_cruft_mtime(&to_pack, entry))
+ oe_set_cruft_mtime(&to_pack, entry, mtime);
+ return;
+}
+
+static void show_cruft_object(struct object *obj, const char *name, void *data)
+{
+ /*
+ * if we did not record it earlier, it's at least as old as our
+ * expiration value. Rather than find it exactly, just use that
+ * value. This may bump it forward from its real mtime, but it
+ * will still be "too old" next time we run with the same
+ * expiration.
+ *
+ * if obj does appear in the packing list, this call is a noop (or may
+ * set the namehash).
+ */
+ add_cruft_object_entry(&obj->oid, obj->type, NULL, 0, name, cruft_expiration);
+}
+
+static void show_cruft_commit(struct commit *commit, void *data)
+{
+ show_cruft_object((struct object*)commit, NULL, data);
+}
+
+static int cruft_include_check_obj(struct object *obj, void *data)
+{
+ return !has_object_kept_pack(&obj->oid, IN_CORE_KEEP_PACKS);
+}
+
+static int cruft_include_check(struct commit *commit, void *data)
+{
+ return cruft_include_check_obj((struct object*)commit, data);
+}
+
+static void set_cruft_mtime(const struct object *object,
+ struct packed_git *pack,
+ off_t offset, time_t mtime)
+{
+ add_cruft_object_entry(&object->oid, object->type, pack, offset, NULL,
+ mtime);
+}
+
+static void mark_pack_kept_in_core(struct string_list *packs, unsigned keep)
+{
+ struct string_list_item *item = NULL;
+ for_each_string_list_item(item, packs) {
+ struct packed_git *p = item->util;
+ if (!p)
+ die(_("could not find pack '%s'"), item->string);
+ p->pack_keep_in_core = keep;
+ }
+}
+
+static void add_unreachable_loose_objects(void);
+static void add_objects_in_unpacked_packs(void);
+
+static void enumerate_cruft_objects(void)
+{
+ if (progress)
+ progress_state = start_progress(_("Enumerating cruft objects"), 0);
+
+ add_objects_in_unpacked_packs();
+ add_unreachable_loose_objects();
+
+ stop_progress(&progress_state);
+}
+
+static void enumerate_and_traverse_cruft_objects(struct string_list *fresh_packs)
+{
+ struct packed_git *p;
+ struct rev_info revs;
+ int ret;
+
+ repo_init_revisions(the_repository, &revs, NULL);
+
+ revs.tag_objects = 1;
+ revs.tree_objects = 1;
+ revs.blob_objects = 1;
+
+ revs.include_check = cruft_include_check;
+ revs.include_check_obj = cruft_include_check_obj;
+
+ revs.ignore_missing_links = 1;
+
+ if (progress)
+ progress_state = start_progress(_("Enumerating cruft objects"), 0);
+ ret = add_unseen_recent_objects_to_traversal(&revs, cruft_expiration,
+ set_cruft_mtime, 1);
+ stop_progress(&progress_state);
+
+ if (ret)
+ die(_("unable to add cruft objects"));
+
+ /*
+ * Re-mark only the fresh packs as kept so that objects in
+ * unknown packs do not halt the reachability traversal early.
+ */
+ for (p = get_all_packs(the_repository); p; p = p->next)
+ p->pack_keep_in_core = 0;
+ mark_pack_kept_in_core(fresh_packs, 1);
+
+ if (prepare_revision_walk(&revs))
+ die(_("revision walk setup failed"));
+ if (progress)
+ progress_state = start_progress(_("Traversing cruft objects"), 0);
+ nr_seen = 0;
+ traverse_commit_list(&revs, show_cruft_commit, show_cruft_object, NULL);
+
+ stop_progress(&progress_state);
+}
+
+static void read_cruft_objects(void)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct string_list discard_packs = STRING_LIST_INIT_DUP;
+ struct string_list fresh_packs = STRING_LIST_INIT_DUP;
+ struct packed_git *p;
+
+ ignore_packed_keep_in_core = 1;
+
+ while (strbuf_getline(&buf, stdin) != EOF) {
+ if (!buf.len)
+ continue;
+
+ if (*buf.buf == '-')
+ string_list_append(&discard_packs, buf.buf + 1);
+ else
+ string_list_append(&fresh_packs, buf.buf);
+ strbuf_reset(&buf);
+ }
+
+ string_list_sort(&discard_packs);
+ string_list_sort(&fresh_packs);
+
+ for (p = get_all_packs(the_repository); p; p = p->next) {
+ const char *pack_name = pack_basename(p);
+ struct string_list_item *item;
+
+ item = string_list_lookup(&fresh_packs, pack_name);
+ if (!item)
+ item = string_list_lookup(&discard_packs, pack_name);
+
+ if (item) {
+ item->util = p;
+ } else {
+ /*
+ * This pack wasn't mentioned in either the "fresh" or
+ * "discard" list, so the caller didn't know about it.
+ *
+ * Mark it as kept so that its objects are ignored by
+ * add_unseen_recent_objects_to_traversal(). We'll
+ * unmark it before starting the traversal so it doesn't
+ * halt the traversal early.
+ */
+ p->pack_keep_in_core = 1;
+ }
+ }
+
+ mark_pack_kept_in_core(&fresh_packs, 1);
+ mark_pack_kept_in_core(&discard_packs, 0);
+
+ if (cruft_expiration)
+ enumerate_and_traverse_cruft_objects(&fresh_packs);
+ else
+ enumerate_cruft_objects();
+
+ strbuf_release(&buf);
+ string_list_clear(&discard_packs, 0);
+ string_list_clear(&fresh_packs, 0);
+}
+
static void read_object_list_from_stdin(void)
{
char line[GIT_MAX_HEXSZ + 1 + PATH_MAX + 2];
@@ -3528,7 +3755,24 @@ static int add_object_in_unpacked_pack(const struct object_id *oid,
uint32_t pos,
void *_data)
{
- add_object_entry(oid, OBJ_NONE, "", 0);
+ if (cruft) {
+ off_t offset;
+ time_t mtime;
+
+ if (pack->is_cruft) {
+ if (load_pack_mtimes(pack) < 0)
+ die(_("could not load cruft pack .mtimes"));
+ mtime = nth_packed_mtime(pack, pos);
+ } else {
+ mtime = pack->mtime;
+ }
+ offset = nth_packed_object_offset(pack, pos);
+
+ add_cruft_object_entry(oid, OBJ_NONE, pack, offset,
+ NULL, mtime);
+ } else {
+ add_object_entry(oid, OBJ_NONE, "", 0);
+ }
return 0;
}
@@ -3552,7 +3796,19 @@ static int add_loose_object(const struct object_id *oid, const char *path,
return 0;
}
- add_object_entry(oid, type, "", 0);
+ if (cruft) {
+ struct stat st;
+ if (stat(path, &st) < 0) {
+ if (errno == ENOENT)
+ return 0;
+ return error_errno("unable to stat %s", oid_to_hex(oid));
+ }
+
+ add_cruft_object_entry(oid, type, NULL, 0, NULL,
+ st.st_mtime);
+ } else {
+ add_object_entry(oid, type, "", 0);
+ }
return 0;
}
@@ -3724,9 +3980,8 @@ static void mark_bitmap_preferred_tips(void)
}
}
-static void get_object_list(int ac, const char **av)
+static void get_object_list(struct rev_info *revs, int ac, const char **av)
{
- struct rev_info revs;
struct setup_revision_opt s_r_opt = {
.allow_exclude_promisor_objects = 1,
};
@@ -3734,10 +3989,8 @@ static void get_object_list(int ac, const char **av)
int flags = 0;
int save_warning;
- repo_init_revisions(the_repository, &revs, NULL);
save_commit_buffer = 0;
- setup_revisions(ac, av, &revs, &s_r_opt);
- list_objects_filter_copy(&revs.filter, &filter_options);
+ setup_revisions(ac, av, revs, &s_r_opt);
/* make sure shallows are read */
is_repository_shallow(the_repository);
@@ -3767,13 +4020,13 @@ static void get_object_list(int ac, const char **av)
}
die(_("not a rev '%s'"), line);
}
- if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))
+ if (handle_revision_arg(line, revs, flags, REVARG_CANNOT_BE_FILENAME))
die(_("bad revision '%s'"), line);
}
warn_on_object_refname_ambiguity = save_warning;
- if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
+ if (use_bitmap_index && !get_object_list_from_bitmap(revs))
return;
if (use_delta_islands)
@@ -3782,24 +4035,24 @@ static void get_object_list(int ac, const char **av)
if (write_bitmap_index)
mark_bitmap_preferred_tips();
- if (prepare_revision_walk(&revs))
+ if (prepare_revision_walk(revs))
die(_("revision walk setup failed"));
- mark_edges_uninteresting(&revs, show_edge, sparse);
+ mark_edges_uninteresting(revs, show_edge, sparse);
if (!fn_show_object)
fn_show_object = show_object;
- traverse_commit_list(&revs,
+ traverse_commit_list(revs,
show_commit, fn_show_object,
NULL);
if (unpack_unreachable_expiration) {
- revs.ignore_missing_links = 1;
- if (add_unseen_recent_objects_to_traversal(&revs,
- unpack_unreachable_expiration))
+ revs->ignore_missing_links = 1;
+ if (add_unseen_recent_objects_to_traversal(revs,
+ unpack_unreachable_expiration, NULL, 0))
die(_("unable to add recent objects"));
- if (prepare_revision_walk(&revs))
+ if (prepare_revision_walk(revs))
die(_("revision walk setup failed"));
- traverse_commit_list(&revs, record_recent_commit,
+ traverse_commit_list(revs, record_recent_commit,
record_recent_object, NULL);
}
@@ -3872,6 +4125,35 @@ static int option_parse_unpack_unreachable(const struct option *opt,
return 0;
}
+static int option_parse_cruft_expiration(const struct option *opt,
+ const char *arg, int unset)
+{
+ if (unset) {
+ cruft = 0;
+ cruft_expiration = 0;
+ } else {
+ cruft = 1;
+ if (arg)
+ cruft_expiration = approxidate(arg);
+ }
+ return 0;
+}
+
+struct po_filter_data {
+ unsigned have_revs:1;
+ struct rev_info revs;
+};
+
+static struct list_objects_filter_options *po_filter_revs_init(void *value)
+{
+ struct po_filter_data *data = value;
+
+ repo_init_revisions(the_repository, &data->revs, NULL);
+ data->have_revs = 1;
+
+ return &data->revs.filter;
+}
+
int cmd_pack_objects(int argc, const char **argv, const char *prefix)
{
int use_internal_rev_list = 0;
@@ -3882,6 +4164,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
int rev_list_index = 0;
int stdin_packs = 0;
struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
+ struct po_filter_data pfd = { .have_revs = 0 };
+
struct option pack_objects_options[] = {
OPT_SET_INT('q', "quiet", &progress,
N_("do not show progress meter"), 0),
@@ -3944,6 +4228,10 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
OPT_CALLBACK_F(0, "unpack-unreachable", NULL, N_("time"),
N_("unpack unreachable objects newer than <time>"),
PARSE_OPT_OPTARG, option_parse_unpack_unreachable),
+ OPT_BOOL(0, "cruft", &cruft, N_("create a cruft pack")),
+ OPT_CALLBACK_F(0, "cruft-expiration", NULL, N_("time"),
+ N_("expire cruft objects older than <time>"),
+ PARSE_OPT_OPTARG, option_parse_cruft_expiration),
OPT_BOOL(0, "sparse", &sparse,
N_("use the sparse reachability algorithm")),
OPT_BOOL(0, "thin", &thin,
@@ -3967,7 +4255,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
&write_bitmap_index,
N_("write a bitmap index if possible"),
WRITE_BITMAP_QUIET, PARSE_OPT_HIDDEN),
- OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
+ OPT_PARSE_LIST_OBJECTS_FILTER_INIT(&pfd, po_filter_revs_init),
OPT_CALLBACK_F(0, "missing", NULL, N_("action"),
N_("handling for missing objects"), PARSE_OPT_NONEG,
option_parse_missing_action),
@@ -4070,7 +4358,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (!HAVE_THREADS && delta_search_threads != 1)
warning(_("no threads support, ignoring --threads"));
- if (!pack_to_stdout && !pack_size_limit)
+ if (!pack_to_stdout && !pack_size_limit && !cruft)
pack_size_limit = pack_size_limit_cfg;
if (pack_to_stdout && pack_size_limit)
die(_("--max-pack-size cannot be used to build a pack for transfer"));
@@ -4087,7 +4375,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (!rev_list_all || !rev_list_reflog || !rev_list_index)
unpack_unreachable_expiration = 0;
- if (filter_options.choice) {
+ if (pfd.have_revs && pfd.revs.filter.choice) {
if (!pack_to_stdout)
die(_("cannot use --filter without --stdout"));
if (stdin_packs)
@@ -4097,6 +4385,15 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (stdin_packs && use_internal_rev_list)
die(_("cannot use internal rev list with --stdin-packs"));
+ if (cruft) {
+ if (use_internal_rev_list)
+ die(_("cannot use internal rev list with --cruft"));
+ if (stdin_packs)
+ die(_("cannot use --stdin-packs with --cruft"));
+ if (pack_size_limit)
+ die(_("cannot use --max-pack-size with --cruft"));
+ }
+
/*
* "soft" reasons not to use bitmaps - for on-disk repack by default we want
*
@@ -4153,7 +4450,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
the_repository);
prepare_packing_data(the_repository, &to_pack);
- if (progress)
+ if (progress && !cruft)
progress_state = start_progress(_("Enumerating objects"), 0);
if (stdin_packs) {
/* avoids adding objects in excluded packs */
@@ -4161,10 +4458,19 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
read_packs_list_from_stdin();
if (rev_list_unpacked)
add_unreachable_loose_objects();
+ } else if (cruft) {
+ read_cruft_objects();
} else if (!use_internal_rev_list) {
read_object_list_from_stdin();
+ } else if (pfd.have_revs) {
+ get_object_list(&pfd.revs, rp.nr, rp.v);
+ release_revisions(&pfd.revs);
} else {
- get_object_list(rp.nr, rp.v);
+ struct rev_info revs;
+
+ repo_init_revisions(the_repository, &revs, NULL);
+ get_object_list(&revs, rp.nr, rp.v);
+ release_revisions(&revs);
}
cleanup_preferred_base();
if (include_tag && nr_result)
diff --git a/builtin/pack-redundant.c b/builtin/pack-redundant.c
index 8bf5c0a..ed9b901 100644
--- a/builtin/pack-redundant.c
+++ b/builtin/pack-redundant.c
@@ -101,7 +101,7 @@ static inline struct llist_item *llist_insert(struct llist *list,
oidread(&new_item->oid, oid);
new_item->next = NULL;
- if (after != NULL) {
+ if (after) {
new_item->next = after->next;
after->next = new_item;
if (after == list->back)
@@ -157,7 +157,7 @@ redo_from_start:
if (cmp > 0) /* not in list, since sorted */
return prev;
if (!cmp) { /* found */
- if (prev == NULL) {
+ if (!prev) {
if (hint != NULL && hint != list->front) {
/* we don't know the previous element */
hint = NULL;
@@ -219,7 +219,7 @@ static struct pack_list * pack_list_difference(const struct pack_list *A,
struct pack_list *ret;
const struct pack_list *pl;
- if (A == NULL)
+ if (!A)
return NULL;
pl = B;
@@ -317,7 +317,7 @@ static size_t get_pack_redundancy(struct pack_list *pl)
struct pack_list *subset;
size_t ret = 0;
- if (pl == NULL)
+ if (!pl)
return 0;
while ((subset = pl->next)) {
@@ -611,7 +611,7 @@ int cmd_pack_redundant(int argc, const char **argv, const char *prefix)
while (*(argv + i) != NULL)
add_pack_file(*(argv + i++));
- if (local_packs == NULL)
+ if (!local_packs)
die("Zero packs found!");
load_all_objects();
diff --git a/builtin/prune.c b/builtin/prune.c
index c2bcdc0..df376b2 100644
--- a/builtin/prune.c
+++ b/builtin/prune.c
@@ -196,5 +196,6 @@ int cmd_prune(int argc, const char **argv, const char *prefix)
prune_shallow(show_only ? PRUNE_SHOW_ONLY : 0);
}
+ release_revisions(&revs);
return 0;
}
diff --git a/builtin/pull.c b/builtin/pull.c
index 4d667ab..403a24d 100644
--- a/builtin/pull.c
+++ b/builtin/pull.c
@@ -72,6 +72,7 @@ static const char * const pull_usage[] = {
static int opt_verbosity;
static char *opt_progress;
static int recurse_submodules = RECURSE_SUBMODULES_DEFAULT;
+static int recurse_submodules_cli = RECURSE_SUBMODULES_DEFAULT;
/* Options passed to git-merge or git-rebase */
static enum rebase_type opt_rebase = -1;
@@ -120,7 +121,7 @@ static struct option pull_options[] = {
N_("force progress reporting"),
PARSE_OPT_NOARG),
OPT_CALLBACK_F(0, "recurse-submodules",
- &recurse_submodules, N_("on-demand"),
+ &recurse_submodules_cli, N_("on-demand"),
N_("control for recursive fetching of submodules"),
PARSE_OPT_OPTARG, option_fetch_parse_recurse_submodules),
@@ -536,8 +537,8 @@ static int run_fetch(const char *repo, const char **refspecs)
strvec_push(&args, opt_tags);
if (opt_prune)
strvec_push(&args, opt_prune);
- if (recurse_submodules != RECURSE_SUBMODULES_DEFAULT)
- switch (recurse_submodules) {
+ if (recurse_submodules_cli != RECURSE_SUBMODULES_DEFAULT)
+ switch (recurse_submodules_cli) {
case RECURSE_SUBMODULES_ON:
strvec_push(&args, "--recurse-submodules=on");
break;
@@ -989,6 +990,7 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
int rebase_unspecified = 0;
int can_ff;
int divergent;
+ int ret;
if (!getenv("GIT_REFLOG_ACTION"))
set_reflog_message(argc, argv);
@@ -1001,6 +1003,9 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, pull_options, pull_usage, 0);
+ if (recurse_submodules_cli != RECURSE_SUBMODULES_DEFAULT)
+ recurse_submodules = recurse_submodules_cli;
+
if (cleanup_arg)
/*
* this only checks the validity of cleanup_arg; we don't need
@@ -1096,7 +1101,8 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
if (is_null_oid(&orig_head)) {
if (merge_heads.nr > 1)
die(_("Cannot merge multiple branches into empty head."));
- return pull_into_void(merge_heads.oid, &curr_head);
+ ret = pull_into_void(merge_heads.oid, &curr_head);
+ goto cleanup;
}
if (merge_heads.nr > 1) {
if (opt_rebase)
@@ -1121,8 +1127,6 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
}
if (opt_rebase) {
- int ret = 0;
-
struct object_id newbase;
struct object_id upstream;
get_rebase_newbase_and_upstream(&newbase, &upstream, &curr_head,
@@ -1145,12 +1149,16 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
recurse_submodules == RECURSE_SUBMODULES_ON_DEMAND))
ret = rebase_submodules();
- return ret;
+ goto cleanup;
} else {
- int ret = run_merge();
+ ret = run_merge();
if (!ret && (recurse_submodules == RECURSE_SUBMODULES_ON ||
recurse_submodules == RECURSE_SUBMODULES_ON_DEMAND))
ret = update_submodules();
- return ret;
+ goto cleanup;
}
+
+cleanup:
+ oid_array_clear(&merge_heads);
+ return ret;
}
diff --git a/builtin/push.c b/builtin/push.c
index cad9979..df0d68e 100644
--- a/builtin/push.c
+++ b/builtin/push.c
@@ -2,6 +2,7 @@
* "git push"
*/
#include "cache.h"
+#include "branch.h"
#include "config.h"
#include "refs.h"
#include "refspec.h"
@@ -151,7 +152,8 @@ static NORETURN void die_push_simple(struct branch *branch,
* upstream to a non-branch, we should probably be showing
* them the big ugly fully qualified ref.
*/
- const char *advice_maybe = "";
+ const char *advice_pushdefault_maybe = "";
+ const char *advice_automergesimple_maybe = "";
const char *short_upstream = branch->merge[0]->src;
skip_prefix(short_upstream, "refs/heads/", &short_upstream);
@@ -161,9 +163,16 @@ static NORETURN void die_push_simple(struct branch *branch,
* push.default.
*/
if (push_default == PUSH_DEFAULT_UNSPECIFIED)
- advice_maybe = _("\n"
+ advice_pushdefault_maybe = _("\n"
"To choose either option permanently, "
- "see push.default in 'git help config'.");
+ "see push.default in 'git help config'.\n");
+ if (git_branch_track != BRANCH_TRACK_SIMPLE)
+ advice_automergesimple_maybe = _("\n"
+ "To avoid automatically configuring "
+ "upstream branches when their name\n"
+ "doesn't match the local branch, see option "
+ "'simple' of branch.autoSetupMerge\n"
+ "in 'git help config'.\n");
die(_("The upstream branch of your current branch does not match\n"
"the name of your current branch. To push to the upstream branch\n"
"on the remote, use\n"
@@ -173,9 +182,10 @@ static NORETURN void die_push_simple(struct branch *branch,
"To push to the branch of the same name on the remote, use\n"
"\n"
" git push %s HEAD\n"
- "%s"),
+ "%s%s"),
remote->name, short_upstream,
- remote->name, advice_maybe);
+ remote->name, advice_pushdefault_maybe,
+ advice_automergesimple_maybe);
}
static const char message_detached_head_die[] =
@@ -185,16 +195,32 @@ static const char message_detached_head_die[] =
"\n"
" git push %s HEAD:<name-of-remote-branch>\n");
-static const char *get_upstream_ref(struct branch *branch, const char *remote_name)
+static const char *get_upstream_ref(int flags, struct branch *branch, const char *remote_name)
{
- if (!branch->merge_nr || !branch->merge || !branch->remote_name)
+ if (branch->merge_nr == 0 && (flags & TRANSPORT_PUSH_AUTO_UPSTREAM)) {
+ /* if missing, assume same; set_upstream will be defined later */
+ return branch->refname;
+ }
+
+ if (!branch->merge_nr || !branch->merge || !branch->remote_name) {
+ const char *advice_autosetup_maybe = "";
+ if (!(flags & TRANSPORT_PUSH_AUTO_UPSTREAM)) {
+ advice_autosetup_maybe = _("\n"
+ "To have this happen automatically for "
+ "branches without a tracking\n"
+ "upstream, see 'push.autoSetupRemote' "
+ "in 'git help config'.\n");
+ }
die(_("The current branch %s has no upstream branch.\n"
"To push the current branch and set the remote as upstream, use\n"
"\n"
- " git push --set-upstream %s %s\n"),
+ " git push --set-upstream %s %s\n"
+ "%s"),
branch->name,
remote_name,
- branch->name);
+ branch->name,
+ advice_autosetup_maybe);
+ }
if (branch->merge_nr != 1)
die(_("The current branch %s has multiple upstream branches, "
"refusing to push."), branch->name);
@@ -202,7 +228,7 @@ static const char *get_upstream_ref(struct branch *branch, const char *remote_na
return branch->merge[0]->src;
}
-static void setup_default_push_refspecs(struct remote *remote)
+static void setup_default_push_refspecs(int *flags, struct remote *remote)
{
struct branch *branch;
const char *dst;
@@ -234,7 +260,7 @@ static void setup_default_push_refspecs(struct remote *remote)
case PUSH_DEFAULT_SIMPLE:
if (!same_remote)
break;
- if (strcmp(branch->refname, get_upstream_ref(branch, remote->name)))
+ if (strcmp(branch->refname, get_upstream_ref(*flags, branch, remote->name)))
die_push_simple(branch, remote);
break;
@@ -244,13 +270,21 @@ static void setup_default_push_refspecs(struct remote *remote)
"your current branch '%s', without telling me what to push\n"
"to update which remote branch."),
remote->name, branch->name);
- dst = get_upstream_ref(branch, remote->name);
+ dst = get_upstream_ref(*flags, branch, remote->name);
break;
case PUSH_DEFAULT_CURRENT:
break;
}
+ /*
+ * this is a default push - if auto-upstream is enabled and there is
+ * no upstream defined, then set it (with options 'simple', 'upstream',
+ * and 'current').
+ */
+ if ((*flags & TRANSPORT_PUSH_AUTO_UPSTREAM) && branch->merge_nr == 0)
+ *flags |= TRANSPORT_PUSH_SET_UPSTREAM;
+
refspec_appendf(&rs, "%s:%s", branch->refname, dst);
}
@@ -401,7 +435,7 @@ static int do_push(int flags,
if (remote->push.nr) {
push_refspec = &remote->push;
} else if (!(flags & TRANSPORT_PUSH_MIRROR))
- setup_default_push_refspecs(remote);
+ setup_default_push_refspecs(&flags, remote);
}
errs = 0;
url_nr = push_url_of_remote(remote, &url);
@@ -472,6 +506,10 @@ static int git_push_config(const char *k, const char *v, void *cb)
else
*flags &= ~TRANSPORT_PUSH_FOLLOW_TAGS;
return 0;
+ } else if (!strcmp(k, "push.autosetupremote")) {
+ if (git_config_bool(k, v))
+ *flags |= TRANSPORT_PUSH_AUTO_UPSTREAM;
+ return 0;
} else if (!strcmp(k, "push.gpgsign")) {
const char *value;
if (!git_config_get_value("push.gpgsign", &value)) {
diff --git a/builtin/rebase.c b/builtin/rebase.c
index b29ad2b..56e4214 100644
--- a/builtin/rebase.c
+++ b/builtin/rebase.c
@@ -102,6 +102,7 @@ struct rebase_options {
int reschedule_failed_exec;
int reapply_cherry_picks;
int fork_point;
+ int update_refs;
};
#define REBASE_OPTIONS_INIT { \
@@ -298,6 +299,7 @@ static int do_interactive_rebase(struct rebase_options *opts, unsigned flags)
ret = complete_action(the_repository, &replay, flags,
shortrevisions, opts->onto_name, opts->onto,
&opts->orig_head, &commands, opts->autosquash,
+ opts->update_refs,
&todo_list);
}
@@ -800,6 +802,11 @@ static int rebase_config(const char *var, const char *value, void *data)
return 0;
}
+ if (!strcmp(var, "rebase.updaterefs")) {
+ opts->update_refs = git_config_bool(var, value);
+ return 0;
+ }
+
if (!strcmp(var, "rebase.reschedulefailedexec")) {
opts->reschedule_failed_exec = git_config_bool(var, value);
return 0;
@@ -829,6 +836,8 @@ static int checkout_up_to_date(struct rebase_options *options)
ropts.oid = &options->orig_head;
ropts.branch = options->head_name;
ropts.flags = RESET_HEAD_RUN_POST_CHECKOUT_HOOK;
+ if (!ropts.branch)
+ ropts.flags |= RESET_HEAD_DETACH;
ropts.head_msg = buf.buf;
if (reset_head(the_repository, &ropts) < 0)
ret = error(_("could not switch to %s"), options->switch_to);
@@ -1108,8 +1117,8 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
PARSE_OPT_NOARG | PARSE_OPT_NONEG,
parse_opt_interactive),
OPT_SET_INT_F('p', "preserve-merges", &preserve_merges_selected,
- N_("(DEPRECATED) try to recreate merges instead of "
- "ignoring them"),
+ N_("(REMOVED) was: try to recreate merges "
+ "instead of ignoring them"),
1, PARSE_OPT_HIDDEN),
OPT_RERERE_AUTOUPDATE(&options.allow_rerere_autoupdate),
OPT_CALLBACK_F(0, "empty", &options, "{drop,keep,ask}",
@@ -1122,6 +1131,9 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
OPT_BOOL(0, "autosquash", &options.autosquash,
N_("move commits that begin with "
"squash!/fixup! under -i")),
+ OPT_BOOL(0, "update-refs", &options.update_refs,
+ N_("update branches that point to commits "
+ "that are being rebased")),
{ OPTION_STRING, 'S', "gpg-sign", &gpg_sign, N_("key-id"),
N_("GPG-sign commits"),
PARSE_OPT_OPTARG, NULL, (intptr_t) "" },
@@ -1180,16 +1192,16 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
} else if (is_directory(merge_dir())) {
strbuf_reset(&buf);
strbuf_addf(&buf, "%s/rewritten", merge_dir());
- if (is_directory(buf.buf)) {
- die("`rebase -p` is no longer supported");
+ if (!(action == ACTION_ABORT) && is_directory(buf.buf)) {
+ die(_("`rebase --preserve-merges` (-p) is no longer supported.\n"
+ "Use `git rebase --abort` to terminate current rebase.\n"
+ "Or downgrade to v2.33, or earlier, to complete the rebase."));
} else {
strbuf_reset(&buf);
strbuf_addf(&buf, "%s/interactive", merge_dir());
- if(file_exists(buf.buf)) {
- options.type = REBASE_MERGE;
+ options.type = REBASE_MERGE;
+ if (file_exists(buf.buf))
options.flags |= REBASE_INTERACTIVE_EXPLICIT;
- } else
- options.type = REBASE_MERGE;
}
options.state_dir = merge_dir();
}
@@ -1203,7 +1215,9 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
builtin_rebase_usage, 0);
if (preserve_merges_selected)
- die(_("--preserve-merges was replaced by --rebase-merges"));
+ die(_("--preserve-merges was replaced by --rebase-merges\n"
+ "Note: Your `pull.rebase` configuration may also be set to 'preserve',\n"
+ "which is no longer supported; use 'merges' instead"));
if (action != ACTION_NONE && total_argc != 2) {
usage_with_options(builtin_rebase_usage,
@@ -1581,33 +1595,6 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
options.upstream_arg = "--root";
}
- /* Make sure the branch to rebase onto is valid. */
- if (keep_base) {
- strbuf_reset(&buf);
- strbuf_addstr(&buf, options.upstream_name);
- strbuf_addstr(&buf, "...");
- options.onto_name = xstrdup(buf.buf);
- } else if (!options.onto_name)
- options.onto_name = options.upstream_name;
- if (strstr(options.onto_name, "...")) {
- if (get_oid_mb(options.onto_name, &merge_base) < 0) {
- if (keep_base)
- die(_("'%s': need exactly one merge base with branch"),
- options.upstream_name);
- else
- die(_("'%s': need exactly one merge base"),
- options.onto_name);
- }
- options.onto = lookup_commit_or_die(&merge_base,
- options.onto_name);
- } else {
- options.onto =
- lookup_commit_reference_by_name(options.onto_name);
- if (!options.onto)
- die(_("Does not point to a valid commit '%s'"),
- options.onto_name);
- }
-
/*
* If the branch to rebase is given, that is the branch we will rebase
* branch_name -- branch/commit being rebased, or
@@ -1657,6 +1644,34 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
} else
BUG("unexpected number of arguments left to parse");
+ /* Make sure the branch to rebase onto is valid. */
+ if (keep_base) {
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, options.upstream_name);
+ strbuf_addstr(&buf, "...");
+ strbuf_addstr(&buf, branch_name);
+ options.onto_name = xstrdup(buf.buf);
+ } else if (!options.onto_name)
+ options.onto_name = options.upstream_name;
+ if (strstr(options.onto_name, "...")) {
+ if (get_oid_mb(options.onto_name, &merge_base) < 0) {
+ if (keep_base)
+ die(_("'%s': need exactly one merge base with branch"),
+ options.upstream_name);
+ else
+ die(_("'%s': need exactly one merge base"),
+ options.onto_name);
+ }
+ options.onto = lookup_commit_or_die(&merge_base,
+ options.onto_name);
+ } else {
+ options.onto =
+ lookup_commit_reference_by_name(options.onto_name);
+ if (!options.onto)
+ die(_("Does not point to a valid commit '%s'"),
+ options.onto_name);
+ }
+
if (options.fork_point > 0) {
struct commit *head =
lookup_commit_reference(the_repository,
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
index 9aabffa..31b48e7 100644
--- a/builtin/receive-pack.c
+++ b/builtin/receive-pack.c
@@ -764,23 +764,23 @@ static void prepare_push_cert_sha1(struct child_process *proc)
nonce_status = check_nonce(push_cert.buf, bogs);
}
if (!is_null_oid(&push_cert_oid)) {
- strvec_pushf(&proc->env_array, "GIT_PUSH_CERT=%s",
+ strvec_pushf(&proc->env, "GIT_PUSH_CERT=%s",
oid_to_hex(&push_cert_oid));
- strvec_pushf(&proc->env_array, "GIT_PUSH_CERT_SIGNER=%s",
+ strvec_pushf(&proc->env, "GIT_PUSH_CERT_SIGNER=%s",
sigcheck.signer ? sigcheck.signer : "");
- strvec_pushf(&proc->env_array, "GIT_PUSH_CERT_KEY=%s",
+ strvec_pushf(&proc->env, "GIT_PUSH_CERT_KEY=%s",
sigcheck.key ? sigcheck.key : "");
- strvec_pushf(&proc->env_array, "GIT_PUSH_CERT_STATUS=%c",
+ strvec_pushf(&proc->env, "GIT_PUSH_CERT_STATUS=%c",
sigcheck.result);
if (push_cert_nonce) {
- strvec_pushf(&proc->env_array,
+ strvec_pushf(&proc->env,
"GIT_PUSH_CERT_NONCE=%s",
push_cert_nonce);
- strvec_pushf(&proc->env_array,
+ strvec_pushf(&proc->env,
"GIT_PUSH_CERT_NONCE_STATUS=%s",
nonce_status);
if (nonce_status == NONCE_SLOP)
- strvec_pushf(&proc->env_array,
+ strvec_pushf(&proc->env,
"GIT_PUSH_CERT_NONCE_SLOP=%ld",
nonce_stamp_slop);
}
@@ -815,17 +815,17 @@ static int run_and_feed_hook(const char *hook_name, feed_fn feed,
if (feed_state->push_options) {
size_t i;
for (i = 0; i < feed_state->push_options->nr; i++)
- strvec_pushf(&proc.env_array,
+ strvec_pushf(&proc.env,
"GIT_PUSH_OPTION_%"PRIuMAX"=%s",
(uintmax_t)i,
feed_state->push_options->items[i].string);
- strvec_pushf(&proc.env_array, "GIT_PUSH_OPTION_COUNT=%"PRIuMAX"",
+ strvec_pushf(&proc.env, "GIT_PUSH_OPTION_COUNT=%"PRIuMAX"",
(uintmax_t)feed_state->push_options->nr);
} else
- strvec_pushf(&proc.env_array, "GIT_PUSH_OPTION_COUNT");
+ strvec_pushf(&proc.env, "GIT_PUSH_OPTION_COUNT");
if (tmp_objdir)
- strvec_pushv(&proc.env_array, tmp_objdir_env(tmp_objdir));
+ strvec_pushv(&proc.env, tmp_objdir_env(tmp_objdir));
if (use_sideband) {
memset(&muxer, 0, sizeof(muxer));
@@ -1357,7 +1357,7 @@ static const char *push_to_deploy(unsigned char *sha1,
strvec_pushl(&child.args, "update-index", "-q", "--ignore-submodules",
"--refresh", NULL);
- strvec_pushv(&child.env_array, env->v);
+ strvec_pushv(&child.env, env->v);
child.dir = work_tree;
child.no_stdin = 1;
child.stdout_to_stderr = 1;
@@ -1369,7 +1369,7 @@ static const char *push_to_deploy(unsigned char *sha1,
child_process_init(&child);
strvec_pushl(&child.args, "diff-files", "--quiet",
"--ignore-submodules", "--", NULL);
- strvec_pushv(&child.env_array, env->v);
+ strvec_pushv(&child.env, env->v);
child.dir = work_tree;
child.no_stdin = 1;
child.stdout_to_stderr = 1;
@@ -1383,7 +1383,7 @@ static const char *push_to_deploy(unsigned char *sha1,
/* diff-index with either HEAD or an empty tree */
head_has_history() ? "HEAD" : empty_tree_oid_hex(),
"--", NULL);
- strvec_pushv(&child.env_array, env->v);
+ strvec_pushv(&child.env, env->v);
child.no_stdin = 1;
child.no_stdout = 1;
child.stdout_to_stderr = 0;
@@ -1394,7 +1394,7 @@ static const char *push_to_deploy(unsigned char *sha1,
child_process_init(&child);
strvec_pushl(&child.args, "read-tree", "-u", "-m", hash_to_hex(sha1),
NULL);
- strvec_pushv(&child.env_array, env->v);
+ strvec_pushv(&child.env, env->v);
child.dir = work_tree;
child.no_stdin = 1;
child.no_stdout = 1;
@@ -1664,7 +1664,7 @@ static void check_aliased_update_internal(struct command *cmd,
}
dst_name = strip_namespace(dst_name);
- if ((item = string_list_lookup(list, dst_name)) == NULL)
+ if (!(item = string_list_lookup(list, dst_name)))
return;
cmd->skip_update = 1;
@@ -1810,21 +1810,17 @@ static int should_process_cmd(struct command *cmd)
return !cmd->error_string && !cmd->skip_update;
}
-static void warn_if_skipped_connectivity_check(struct command *commands,
+static void BUG_if_skipped_connectivity_check(struct command *commands,
struct shallow_info *si)
{
struct command *cmd;
- int checked_connectivity = 1;
for (cmd = commands; cmd; cmd = cmd->next) {
- if (should_process_cmd(cmd) && si->shallow_ref[cmd->index]) {
- error("BUG: connectivity check has not been run on ref %s",
- cmd->ref_name);
- checked_connectivity = 0;
- }
+ if (should_process_cmd(cmd) && si->shallow_ref[cmd->index])
+ bug("connectivity check has not been run on ref %s",
+ cmd->ref_name);
}
- if (!checked_connectivity)
- BUG("connectivity check skipped???");
+ BUG_if_bug("connectivity check skipped???");
}
static void execute_commands_non_atomic(struct command *commands,
@@ -2005,7 +2001,7 @@ static void execute_commands(struct command *commands,
execute_commands_non_atomic(commands, si);
if (shallow_update)
- warn_if_skipped_connectivity_check(commands, si);
+ BUG_if_skipped_connectivity_check(commands, si);
}
static struct command **queue_command(struct command **tail,
@@ -2214,8 +2210,7 @@ static const char *unpack(int err_fd, struct shallow_info *si)
close(err_fd);
return "unable to create temporary object directory";
}
- if (tmp_objdir)
- strvec_pushv(&child.env_array, tmp_objdir_env(tmp_objdir));
+ strvec_pushv(&child.env, tmp_objdir_env(tmp_objdir));
/*
* Normally we just pass the tmp_objdir environment to the child
@@ -2538,7 +2533,7 @@ int cmd_receive_pack(int argc, const char **argv, const char *prefix)
PACKET_READ_CHOMP_NEWLINE |
PACKET_READ_DIE_ON_ERR_PACKET);
- if ((commands = read_head_info(&reader, &shallow)) != NULL) {
+ if ((commands = read_head_info(&reader, &shallow))) {
const char *unpack_status = NULL;
struct string_list push_options = STRING_LIST_INIT_DUP;
diff --git a/builtin/reflog.c b/builtin/reflog.c
index 9407f83..4dd297d 100644
--- a/builtin/reflog.c
+++ b/builtin/reflog.c
@@ -5,8 +5,48 @@
#include "worktree.h"
#include "reflog.h"
-static const char reflog_exists_usage[] =
-N_("git reflog exists <ref>");
+#define BUILTIN_REFLOG_SHOW_USAGE \
+ N_("git reflog [show] [<log-options>] [<ref>]")
+
+#define BUILTIN_REFLOG_EXPIRE_USAGE \
+ N_("git reflog expire [--expire=<time>] [--expire-unreachable=<time>]\n" \
+ " [--rewrite] [--updateref] [--stale-fix]\n" \
+ " [--dry-run | -n] [--verbose] [--all [--single-worktree] | <refs>...]")
+
+#define BUILTIN_REFLOG_DELETE_USAGE \
+ N_("git reflog delete [--rewrite] [--updateref]\n" \
+ " [--dry-run | -n] [--verbose] <ref>@{<specifier>}...")
+
+#define BUILTIN_REFLOG_EXISTS_USAGE \
+ N_("git reflog exists <ref>")
+
+static const char *const reflog_show_usage[] = {
+ BUILTIN_REFLOG_SHOW_USAGE,
+ NULL,
+};
+
+static const char *const reflog_expire_usage[] = {
+ BUILTIN_REFLOG_EXPIRE_USAGE,
+ NULL
+};
+
+static const char *const reflog_delete_usage[] = {
+ BUILTIN_REFLOG_DELETE_USAGE,
+ NULL
+};
+
+static const char *const reflog_exists_usage[] = {
+ BUILTIN_REFLOG_EXISTS_USAGE,
+ NULL,
+};
+
+static const char *const reflog_usage[] = {
+ BUILTIN_REFLOG_SHOW_USAGE,
+ BUILTIN_REFLOG_EXPIRE_USAGE,
+ BUILTIN_REFLOG_DELETE_USAGE,
+ BUILTIN_REFLOG_EXISTS_USAGE,
+ NULL
+};
static timestamp_t default_reflog_expire;
static timestamp_t default_reflog_expire_unreachable;
@@ -147,14 +187,6 @@ static void set_reflog_expiry_param(struct cmd_reflog_expire_cb *cb, const char
cb->expire_unreachable = default_reflog_expire_unreachable;
}
-static const char * reflog_expire_usage[] = {
- N_("git reflog expire [--expire=<time>] "
- "[--expire-unreachable=<time>] "
- "[--rewrite] [--updateref] [--stale-fix] [--dry-run | -n] "
- "[--verbose] [--all] <refs>..."),
- NULL
-};
-
static int expire_unreachable_callback(const struct option *opt,
const char *arg,
int unset)
@@ -183,6 +215,19 @@ static int expire_total_callback(const struct option *opt,
return 0;
}
+static int cmd_reflog_show(int argc, const char **argv, const char *prefix)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+
+ parse_options(argc, argv, prefix, options, reflog_show_usage,
+ PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0 |
+ PARSE_OPT_KEEP_UNKNOWN);
+
+ return cmd_log_reflog(argc, argv, prefix);
+}
+
static int cmd_reflog_expire(int argc, const char **argv, const char *prefix)
{
struct cmd_reflog_expire_cb cmd = { 0 };
@@ -248,6 +293,7 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix)
if (verbose)
printf(_("Marking reachable objects..."));
mark_reachable_objects(&revs, 0, 0, NULL);
+ release_revisions(&revs);
if (verbose)
putchar('\n');
}
@@ -304,12 +350,6 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix)
return status;
}
-static const char * reflog_delete_usage[] = {
- N_("git reflog delete [--rewrite] [--updateref] "
- "[--dry-run | -n] [--verbose] <refs>..."),
- NULL
-};
-
static int cmd_reflog_delete(int argc, const char **argv, const char *prefix)
{
int i, status = 0;
@@ -342,57 +382,62 @@ static int cmd_reflog_delete(int argc, const char **argv, const char *prefix)
static int cmd_reflog_exists(int argc, const char **argv, const char *prefix)
{
- int i, start = 0;
-
- for (i = 1; i < argc; i++) {
- const char *arg = argv[i];
- if (!strcmp(arg, "--")) {
- i++;
- break;
- }
- else if (arg[0] == '-')
- usage(_(reflog_exists_usage));
- else
- break;
- }
-
- start = i;
+ struct option options[] = {
+ OPT_END()
+ };
+ const char *refname;
- if (argc - start != 1)
- usage(_(reflog_exists_usage));
+ argc = parse_options(argc, argv, prefix, options, reflog_exists_usage,
+ 0);
+ if (!argc)
+ usage_with_options(reflog_exists_usage, options);
- if (check_refname_format(argv[start], REFNAME_ALLOW_ONELEVEL))
- die(_("invalid ref format: %s"), argv[start]);
- return !reflog_exists(argv[start]);
+ refname = argv[0];
+ if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL))
+ die(_("invalid ref format: %s"), refname);
+ return !reflog_exists(refname);
}
/*
* main "reflog"
*/
-static const char reflog_usage[] =
-"git reflog [ show | expire | delete | exists ]";
-
int cmd_reflog(int argc, const char **argv, const char *prefix)
{
- if (argc > 1 && !strcmp(argv[1], "-h"))
- usage(_(reflog_usage));
+ struct option options[] = {
+ OPT_END()
+ };
- /* With no command, we default to showing it. */
- if (argc < 2 || *argv[1] == '-')
- return cmd_log_reflog(argc, argv, prefix);
+ argc = parse_options(argc, argv, prefix, options, reflog_usage,
+ PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0 |
+ PARSE_OPT_KEEP_UNKNOWN |
+ PARSE_OPT_NO_INTERNAL_HELP);
- if (!strcmp(argv[1], "show"))
- return cmd_log_reflog(argc - 1, argv + 1, prefix);
+ /*
+ * With "git reflog" we default to showing it. !argc is
+ * impossible with PARSE_OPT_KEEP_ARGV0.
+ */
+ if (argc == 1)
+ goto log_reflog;
- if (!strcmp(argv[1], "expire"))
- return cmd_reflog_expire(argc - 1, argv + 1, prefix);
+ if (!strcmp(argv[1], "-h"))
+ usage_with_options(reflog_usage, options);
+ else if (*argv[1] == '-')
+ goto log_reflog;
- if (!strcmp(argv[1], "delete"))
+ if (!strcmp(argv[1], "show"))
+ return cmd_reflog_show(argc - 1, argv + 1, prefix);
+ else if (!strcmp(argv[1], "expire"))
+ return cmd_reflog_expire(argc - 1, argv + 1, prefix);
+ else if (!strcmp(argv[1], "delete"))
return cmd_reflog_delete(argc - 1, argv + 1, prefix);
-
- if (!strcmp(argv[1], "exists"))
+ else if (!strcmp(argv[1], "exists"))
return cmd_reflog_exists(argc - 1, argv + 1, prefix);
+ /*
+ * Fall-through for e.g. "git reflog -1", "git reflog master",
+ * as well as the plain "git reflog" above goto above.
+ */
+log_reflog:
return cmd_log_reflog(argc, argv, prefix);
}
diff --git a/builtin/remote.c b/builtin/remote.c
index 5f4cde9..d9b8746 100644
--- a/builtin/remote.c
+++ b/builtin/remote.c
@@ -344,12 +344,13 @@ static void read_branches(void)
struct ref_states {
struct remote *remote;
- struct string_list new_refs, stale, tracked, heads, push;
+ struct string_list new_refs, skipped, stale, tracked, heads, push;
int queried;
};
#define REF_STATES_INIT { \
.new_refs = STRING_LIST_INIT_DUP, \
+ .skipped = STRING_LIST_INIT_DUP, \
.stale = STRING_LIST_INIT_DUP, \
.tracked = STRING_LIST_INIT_DUP, \
.heads = STRING_LIST_INIT_DUP, \
@@ -368,7 +369,9 @@ static int get_ref_states(const struct ref *remote_refs, struct ref_states *stat
states->remote->fetch.raw[i]);
for (ref = fetch_map; ref; ref = ref->next) {
- if (!ref->peer_ref || !ref_exists(ref->peer_ref->name))
+ if (omit_name_by_refspec(ref->name, &states->remote->fetch))
+ string_list_append(&states->skipped, abbrev_branch(ref->name));
+ else if (!ref->peer_ref || !ref_exists(ref->peer_ref->name))
string_list_append(&states->new_refs, abbrev_branch(ref->name));
else
string_list_append(&states->tracked, abbrev_branch(ref->name));
@@ -383,6 +386,7 @@ static int get_ref_states(const struct ref *remote_refs, struct ref_states *stat
free_refs(fetch_map);
string_list_sort(&states->new_refs);
+ string_list_sort(&states->skipped);
string_list_sort(&states->tracked);
string_list_sort(&states->stale);
@@ -941,6 +945,7 @@ static void clear_push_info(void *util, const char *string)
static void free_remote_ref_states(struct ref_states *states)
{
string_list_clear(&states->new_refs, 0);
+ string_list_clear(&states->skipped, 0);
string_list_clear(&states->stale, 1);
string_list_clear(&states->tracked, 0);
string_list_clear(&states->heads, 0);
@@ -1035,6 +1040,8 @@ static int show_remote_info_item(struct string_list_item *item, void *cb_data)
arg = states->remote->name;
} else if (string_list_has_string(&states->tracked, name))
arg = _(" tracked");
+ else if (string_list_has_string(&states->skipped, name))
+ arg = _(" skipped");
else if (string_list_has_string(&states->stale, name))
arg = _(" stale (use 'git remote prune' to remove)");
else
@@ -1185,14 +1192,22 @@ static int show_push_info_item(struct string_list_item *item, void *cb_data)
static int get_one_entry(struct remote *remote, void *priv)
{
struct string_list *list = priv;
- struct strbuf url_buf = STRBUF_INIT;
+ struct strbuf remote_info_buf = STRBUF_INIT;
const char **url;
int i, url_nr;
if (remote->url_nr > 0) {
- strbuf_addf(&url_buf, "%s (fetch)", remote->url[0]);
+ struct strbuf promisor_config = STRBUF_INIT;
+ const char *partial_clone_filter = NULL;
+
+ strbuf_addf(&promisor_config, "remote.%s.partialclonefilter", remote->name);
+ strbuf_addf(&remote_info_buf, "%s (fetch)", remote->url[0]);
+ if (!git_config_get_string_tmp(promisor_config.buf, &partial_clone_filter))
+ strbuf_addf(&remote_info_buf, " [%s]", partial_clone_filter);
+
+ strbuf_release(&promisor_config);
string_list_append(list, remote->name)->util =
- strbuf_detach(&url_buf, NULL);
+ strbuf_detach(&remote_info_buf, NULL);
} else
string_list_append(list, remote->name)->util = NULL;
if (remote->pushurl_nr) {
@@ -1204,9 +1219,9 @@ static int get_one_entry(struct remote *remote, void *priv)
}
for (i = 0; i < url_nr; i++)
{
- strbuf_addf(&url_buf, "%s (push)", url[i]);
+ strbuf_addf(&remote_info_buf, "%s (push)", url[i]);
string_list_append(list, remote->name)->util =
- strbuf_detach(&url_buf, NULL);
+ strbuf_detach(&remote_info_buf, NULL);
}
return 0;
@@ -1300,6 +1315,7 @@ static int show(int argc, const char **argv)
/* remote branch info */
info.width = 0;
for_each_string_list(&info.states.new_refs, add_remote_to_show_info, &info);
+ for_each_string_list(&info.states.skipped, add_remote_to_show_info, &info);
for_each_string_list(&info.states.tracked, add_remote_to_show_info, &info);
for_each_string_list(&info.states.stale, add_remote_to_show_info, &info);
if (info.list.nr)
diff --git a/builtin/repack.c b/builtin/repack.c
index d1a563d..482b66f 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -18,12 +18,21 @@
#include "pack-bitmap.h"
#include "refs.h"
+#define ALL_INTO_ONE 1
+#define LOOSEN_UNREACHABLE 2
+#define PACK_CRUFT 4
+
+#define DELETE_PACK 1
+#define CRUFT_PACK 2
+
+static int pack_everything;
static int delta_base_offset = 1;
static int pack_kept_objects = -1;
static int write_bitmaps = -1;
static int use_delta_islands;
static int run_update_server_info = 1;
static char *packdir, *packtmp_name, *packtmp;
+static char *cruft_expiration;
static const char *const git_repack_usage[] = {
N_("git repack [<options>]"),
@@ -32,12 +41,24 @@ static const char *const git_repack_usage[] = {
static const char incremental_bitmap_conflict_error[] = N_(
"Incremental repacks are incompatible with bitmap indexes. Use\n"
-"--no-write-bitmap-index or disable the pack.writebitmaps configuration."
+"--no-write-bitmap-index or disable the pack.writeBitmaps configuration."
);
+struct pack_objects_args {
+ const char *window;
+ const char *window_memory;
+ const char *depth;
+ const char *threads;
+ const char *max_pack_size;
+ int no_reuse_delta;
+ int no_reuse_object;
+ int quiet;
+ int local;
+};
static int repack_config(const char *var, const char *value, void *cb)
{
+ struct pack_objects_args *cruft_po_args = cb;
if (!strcmp(var, "repack.usedeltabaseoffset")) {
delta_base_offset = git_config_bool(var, value);
return 0;
@@ -59,6 +80,14 @@ static int repack_config(const char *var, const char *value, void *cb)
run_update_server_info = git_config_bool(var, value);
return 0;
}
+ if (!strcmp(var, "repack.cruftwindow"))
+ return git_config_string(&cruft_po_args->window, var, value);
+ if (!strcmp(var, "repack.cruftwindowmemory"))
+ return git_config_string(&cruft_po_args->window_memory, var, value);
+ if (!strcmp(var, "repack.cruftdepth"))
+ return git_config_string(&cruft_po_args->depth, var, value);
+ if (!strcmp(var, "repack.cruftthreads"))
+ return git_config_string(&cruft_po_args->threads, var, value);
return git_default_config(var, value, cb);
}
@@ -131,12 +160,19 @@ static void collect_pack_filenames(struct string_list *fname_nonkept_list,
fname = xmemdupz(e->d_name, len);
if ((extra_keep->nr > 0 && i < extra_keep->nr) ||
- (file_exists(mkpath("%s/%s.keep", packdir, fname))))
+ (file_exists(mkpath("%s/%s.keep", packdir, fname)))) {
string_list_append_nodup(fname_kept_list, fname);
- else
- string_list_append_nodup(fname_nonkept_list, fname);
+ } else {
+ struct string_list_item *item;
+ item = string_list_append_nodup(fname_nonkept_list,
+ fname);
+ if (file_exists(mkpath("%s/%s.mtimes", packdir, fname)))
+ item->util = (void*)(uintptr_t)CRUFT_PACK;
+ }
}
closedir(dir);
+
+ string_list_sort(fname_kept_list);
}
static void remove_redundant_pack(const char *dir_name, const char *base_name)
@@ -151,18 +187,6 @@ static void remove_redundant_pack(const char *dir_name, const char *base_name)
strbuf_release(&buf);
}
-struct pack_objects_args {
- const char *window;
- const char *window_memory;
- const char *depth;
- const char *threads;
- const char *max_pack_size;
- int no_reuse_delta;
- int no_reuse_object;
- int quiet;
- int local;
-};
-
static void prepare_pack_objects(struct child_process *cmd,
const struct pack_objects_args *args)
{
@@ -217,6 +241,7 @@ static struct {
} exts[] = {
{".pack"},
{".rev", 1},
+ {".mtimes", 1},
{".bitmap", 1},
{".promisor", 1},
{".idx"},
@@ -304,9 +329,6 @@ static void repack_promisor_objects(const struct pack_objects_args *args,
die(_("could not finish pack-objects to repack promisor objects"));
}
-#define ALL_INTO_ONE 1
-#define LOOSEN_UNREACHABLE 2
-
struct pack_geometry {
struct packed_git **pack;
uint32_t pack_nr, pack_alloc;
@@ -332,16 +354,39 @@ static int geometry_cmp(const void *va, const void *vb)
return 0;
}
-static void init_pack_geometry(struct pack_geometry **geometry_p)
+static void init_pack_geometry(struct pack_geometry **geometry_p,
+ struct string_list *existing_kept_packs)
{
struct packed_git *p;
struct pack_geometry *geometry;
+ struct strbuf buf = STRBUF_INIT;
*geometry_p = xcalloc(1, sizeof(struct pack_geometry));
geometry = *geometry_p;
for (p = get_all_packs(the_repository); p; p = p->next) {
- if (!pack_kept_objects && p->pack_keep)
+ if (!pack_kept_objects) {
+ /*
+ * Any pack that has its pack_keep bit set will appear
+ * in existing_kept_packs below, but this saves us from
+ * doing a more expensive check.
+ */
+ if (p->pack_keep)
+ continue;
+
+ /*
+ * The pack may be kept via the --keep-pack option;
+ * check 'existing_kept_packs' to determine whether to
+ * ignore it.
+ */
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, pack_basename(p));
+ strbuf_strip_suffix(&buf, ".pack");
+
+ if (string_list_has_string(existing_kept_packs, buf.buf))
+ continue;
+ }
+ if (p->is_cruft)
continue;
ALLOC_GROW(geometry->pack,
@@ -353,6 +398,7 @@ static void init_pack_geometry(struct pack_geometry **geometry_p)
}
QSORT(geometry->pack, geometry->pack_nr, geometry_cmp);
+ strbuf_release(&buf);
}
static void split_pack_geometry(struct pack_geometry *geometry, int factor)
@@ -548,9 +594,20 @@ static void midx_included_packs(struct string_list *include,
string_list_insert(include, strbuf_detach(&buf, NULL));
}
+
+ for_each_string_list_item(item, existing_nonkept_packs) {
+ if (!((uintptr_t)item->util & CRUFT_PACK)) {
+ /*
+ * no need to check DELETE_PACK, since we're not
+ * doing an ALL_INTO_ONE repack
+ */
+ continue;
+ }
+ string_list_insert(include, xstrfmt("%s.idx", item->string));
+ }
} else {
for_each_string_list_item(item, existing_nonkept_packs) {
- if (item->util)
+ if ((uintptr_t)item->util & DELETE_PACK)
continue;
string_list_insert(include, xstrfmt("%s.idx", item->string));
}
@@ -604,12 +661,72 @@ static int write_midx_included_packs(struct string_list *include,
return finish_command(&cmd);
}
+static int write_cruft_pack(const struct pack_objects_args *args,
+ const char *pack_prefix,
+ struct string_list *names,
+ struct string_list *existing_packs,
+ struct string_list *existing_kept_packs)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ struct strbuf line = STRBUF_INIT;
+ struct string_list_item *item;
+ FILE *in, *out;
+ int ret;
+
+ prepare_pack_objects(&cmd, args);
+
+ strvec_push(&cmd.args, "--cruft");
+ if (cruft_expiration)
+ strvec_pushf(&cmd.args, "--cruft-expiration=%s",
+ cruft_expiration);
+
+ strvec_push(&cmd.args, "--honor-pack-keep");
+ strvec_push(&cmd.args, "--non-empty");
+ strvec_push(&cmd.args, "--max-pack-size=0");
+
+ cmd.in = -1;
+
+ ret = start_command(&cmd);
+ if (ret)
+ return ret;
+
+ /*
+ * names has a confusing double use: it both provides the list
+ * of just-written new packs, and accepts the name of the cruft
+ * pack we are writing.
+ *
+ * By the time it is read here, it contains only the pack(s)
+ * that were just written, which is exactly the set of packs we
+ * want to consider kept.
+ */
+ in = xfdopen(cmd.in, "w");
+ for_each_string_list_item(item, names)
+ fprintf(in, "%s-%s.pack\n", pack_prefix, item->string);
+ for_each_string_list_item(item, existing_packs)
+ fprintf(in, "-%s.pack\n", item->string);
+ for_each_string_list_item(item, existing_kept_packs)
+ fprintf(in, "%s.pack\n", item->string);
+ fclose(in);
+
+ out = xfdopen(cmd.out, "r");
+ while (strbuf_getline_lf(&line, out) != EOF) {
+ if (line.len != the_hash_algo->hexsz)
+ die(_("repack: Expecting full hex object ID lines only "
+ "from pack-objects."));
+ string_list_append(names, line.buf);
+ }
+ fclose(out);
+
+ strbuf_release(&line);
+
+ return finish_command(&cmd);
+}
+
int cmd_repack(int argc, const char **argv, const char *prefix)
{
struct child_process cmd = CHILD_PROCESS_INIT;
struct string_list_item *item;
struct string_list names = STRING_LIST_INIT_DUP;
- struct string_list rollback = STRING_LIST_INIT_NODUP;
struct string_list existing_nonkept_packs = STRING_LIST_INIT_DUP;
struct string_list existing_kept_packs = STRING_LIST_INIT_DUP;
struct pack_geometry *geometry = NULL;
@@ -620,12 +737,12 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
int show_progress;
/* variables to be filled by option parsing */
- int pack_everything = 0;
int delete_redundant = 0;
const char *unpack_unreachable = NULL;
int keep_unreachable = 0;
struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
struct pack_objects_args po_args = {NULL};
+ struct pack_objects_args cruft_po_args = {NULL};
int geometric_factor = 0;
int write_midx = 0;
@@ -635,6 +752,11 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
OPT_BIT('A', NULL, &pack_everything,
N_("same as -a, and turn unreachable objects loose"),
LOOSEN_UNREACHABLE | ALL_INTO_ONE),
+ OPT_BIT(0, "cruft", &pack_everything,
+ N_("same as -a, pack unreachable cruft objects separately"),
+ PACK_CRUFT),
+ OPT_STRING(0, "cruft-expiration", &cruft_expiration, N_("approxidate"),
+ N_("with -C, expire objects older than this")),
OPT_BOOL('d', NULL, &delete_redundant,
N_("remove redundant packs, and run git-prune-packed")),
OPT_BOOL('f', NULL, &po_args.no_reuse_delta,
@@ -675,7 +797,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
OPT_END()
};
- git_config(repack_config, NULL);
+ git_config(repack_config, &cruft_po_args);
argc = parse_options(argc, argv, prefix, builtin_repack_options,
git_repack_usage, 0);
@@ -687,6 +809,15 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
(unpack_unreachable || (pack_everything & LOOSEN_UNREACHABLE)))
die(_("options '%s' and '%s' cannot be used together"), "--keep-unreachable", "-A");
+ if (pack_everything & PACK_CRUFT) {
+ pack_everything |= ALL_INTO_ONE;
+
+ if (unpack_unreachable || (pack_everything & LOOSEN_UNREACHABLE))
+ die(_("options '%s' and '%s' cannot be used together"), "--cruft", "-A");
+ if (keep_unreachable)
+ die(_("options '%s' and '%s' cannot be used together"), "--cruft", "-k");
+ }
+
if (write_bitmaps < 0) {
if (!write_midx &&
(!(pack_everything & ALL_INTO_ONE) || !is_bare_repository()))
@@ -714,17 +845,20 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
strbuf_release(&path);
}
+ packdir = mkpathdup("%s/pack", get_object_directory());
+ packtmp_name = xstrfmt(".tmp-%d-pack", (int)getpid());
+ packtmp = mkpathdup("%s/%s", packdir, packtmp_name);
+
+ collect_pack_filenames(&existing_nonkept_packs, &existing_kept_packs,
+ &keep_pack_list);
+
if (geometric_factor) {
if (pack_everything)
die(_("options '%s' and '%s' cannot be used together"), "--geometric", "-A/-a");
- init_pack_geometry(&geometry);
+ init_pack_geometry(&geometry, &existing_kept_packs);
split_pack_geometry(geometry, geometric_factor);
}
- packdir = mkpathdup("%s/pack", get_object_directory());
- packtmp_name = xstrfmt(".tmp-%d-pack", (int)getpid());
- packtmp = mkpathdup("%s/%s", packdir, packtmp_name);
-
sigchain_push_common(remove_pack_on_signal);
prepare_pack_objects(&cmd, &po_args);
@@ -764,13 +898,11 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
if (use_delta_islands)
strvec_push(&cmd.args, "--delta-islands");
- collect_pack_filenames(&existing_nonkept_packs, &existing_kept_packs,
- &keep_pack_list);
-
if (pack_everything & ALL_INTO_ONE) {
repack_promisor_objects(&po_args, &names);
- if (existing_nonkept_packs.nr && delete_redundant) {
+ if (existing_nonkept_packs.nr && delete_redundant &&
+ !(pack_everything & PACK_CRUFT)) {
for_each_string_list_item(item, &names) {
strvec_pushf(&cmd.args, "--keep-pack=%s-%s.pack",
packtmp_name, item->string);
@@ -832,6 +964,35 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
if (!names.nr && !po_args.quiet)
printf_ln(_("Nothing new to pack."));
+ if (pack_everything & PACK_CRUFT) {
+ const char *pack_prefix;
+ if (!skip_prefix(packtmp, packdir, &pack_prefix))
+ die(_("pack prefix %s does not begin with objdir %s"),
+ packtmp, packdir);
+ if (*pack_prefix == '/')
+ pack_prefix++;
+
+ if (!cruft_po_args.window)
+ cruft_po_args.window = po_args.window;
+ if (!cruft_po_args.window_memory)
+ cruft_po_args.window_memory = po_args.window_memory;
+ if (!cruft_po_args.depth)
+ cruft_po_args.depth = po_args.depth;
+ if (!cruft_po_args.threads)
+ cruft_po_args.threads = po_args.threads;
+
+ cruft_po_args.local = po_args.local;
+ cruft_po_args.quiet = po_args.quiet;
+
+ ret = write_cruft_pack(&cruft_po_args, pack_prefix, &names,
+ &existing_nonkept_packs,
+ &existing_kept_packs);
+ if (ret)
+ return ret;
+ }
+
+ string_list_sort(&names);
+
for_each_string_list_item(item, &names) {
item->util = (void *)(uintptr_t)populate_pack_exts(item->string);
}
@@ -872,7 +1033,6 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
if (delete_redundant && pack_everything & ALL_INTO_ONE) {
const int hexsz = the_hash_algo->hexsz;
- string_list_sort(&names);
for_each_string_list_item(item, &existing_nonkept_packs) {
char *sha1;
size_t len = strlen(item->string);
@@ -885,7 +1045,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
* was given) and that we will actually delete this pack
* (if `-d` was given).
*/
- item->util = (void*)(intptr_t)!string_list_has_string(&names, sha1);
+ if (!string_list_has_string(&names, sha1))
+ item->util = (void*)(uintptr_t)((size_t)item->util | DELETE_PACK);
}
}
@@ -909,7 +1070,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
if (delete_redundant) {
int opts = 0;
for_each_string_list_item(item, &existing_nonkept_packs) {
- if (!item->util)
+ if (!((uintptr_t)item->util & DELETE_PACK))
continue;
remove_redundant_pack(packdir, item->string);
}
@@ -955,7 +1116,6 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
}
string_list_clear(&names, 0);
- string_list_clear(&rollback, 0);
string_list_clear(&existing_nonkept_packs, 0);
string_list_clear(&existing_kept_packs, 0);
clear_pack_geometry(geometry);
diff --git a/builtin/replace.c b/builtin/replace.c
index 5068f4f..583702a 100644
--- a/builtin/replace.c
+++ b/builtin/replace.c
@@ -72,7 +72,7 @@ static int list_replace_refs(const char *pattern, const char *format)
{
struct show_data data;
- if (pattern == NULL)
+ if (!pattern)
pattern = "*";
data.pattern = pattern;
diff --git a/builtin/reset.c b/builtin/reset.c
index 6e65e90..344fff8 100644
--- a/builtin/reset.c
+++ b/builtin/reset.c
@@ -392,6 +392,7 @@ static int git_reset_config(const char *var, const char *value, void *cb)
int cmd_reset(int argc, const char **argv, const char *prefix)
{
int reset_type = NONE, update_ref_status = 0, quiet = 0;
+ int no_refresh = 0;
int patch_mode = 0, pathspec_file_nul = 0, unborn;
const char *rev, *pathspec_from_file = NULL;
struct object_id oid;
@@ -399,6 +400,8 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
int intent_to_add = 0;
const struct option options[] = {
OPT__QUIET(&quiet, N_("be quiet, only report errors")),
+ OPT_BOOL(0, "no-refresh", &no_refresh,
+ N_("skip refreshing the index after reset")),
OPT_SET_INT(0, "mixed", &reset_type,
N_("reset HEAD and index"), MIXED),
OPT_SET_INT(0, "soft", &reset_type, N_("reset only HEAD"), SOFT),
@@ -420,7 +423,6 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
};
git_config(git_reset_config, NULL);
- git_config_get_bool("reset.quiet", &quiet);
argc = parse_options(argc, argv, prefix, options, git_reset_usage,
PARSE_OPT_KEEP_DASHDASH);
@@ -517,17 +519,16 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
if (read_from_tree(&pathspec, &oid, intent_to_add))
return 1;
the_index.updated_skipworktree = 1;
- if (!quiet && get_git_work_tree()) {
+ if (!no_refresh && get_git_work_tree()) {
uint64_t t_begin, t_delta_in_ms;
t_begin = getnanotime();
refresh_index(&the_index, flags, NULL, NULL,
_("Unstaged changes after reset:"));
t_delta_in_ms = (getnanotime() - t_begin) / 1000000;
- if (advice_enabled(ADVICE_RESET_QUIET_WARNING) && t_delta_in_ms > REFRESH_INDEX_DELAY_WARNING_IN_MS) {
- printf(_("\nIt took %.2f seconds to enumerate unstaged changes after reset. You can\n"
- "use '--quiet' to avoid this. Set the config setting reset.quiet to true\n"
- "to make this the default.\n"), t_delta_in_ms / 1000.0);
+ if (!quiet && advice_enabled(ADVICE_RESET_NO_REFRESH_WARNING) && t_delta_in_ms > REFRESH_INDEX_DELAY_WARNING_IN_MS) {
+ advise(_("It took %.2f seconds to refresh the index after reset. You can use\n"
+ "'--no-refresh' to avoid this."), t_delta_in_ms / 1000.0);
}
}
} else {
diff --git a/builtin/rev-list.c b/builtin/rev-list.c
index 572da14..30fd8e8 100644
--- a/builtin/rev-list.c
+++ b/builtin/rev-list.c
@@ -213,10 +213,8 @@ static void show_commit(struct commit *commit, void *data)
static void finish_commit(struct commit *commit)
{
- if (commit->parents) {
- free_commit_list(commit->parents);
- commit->parents = NULL;
- }
+ free_commit_list(commit->parents);
+ commit->parents = NULL;
free_commit_buffer(the_repository->parsed_objects,
commit);
}
@@ -502,6 +500,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
int use_bitmap_index = 0;
int filter_provided_objects = 0;
const char *show_progress = NULL;
+ int ret = 0;
if (argc == 2 && !strcmp(argv[1], "-h"))
usage(rev_list_usage);
@@ -585,7 +584,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
}
if (!strcmp(arg, "--test-bitmap")) {
test_bitmap_walk(&revs);
- return 0;
+ goto cleanup;
}
if (skip_prefix(arg, "--progress=", &arg)) {
show_progress = arg;
@@ -674,11 +673,11 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
if (use_bitmap_index) {
if (!try_bitmap_count(&revs, filter_provided_objects))
- return 0;
+ goto cleanup;
if (!try_bitmap_disk_usage(&revs, filter_provided_objects))
- return 0;
+ goto cleanup;
if (!try_bitmap_traversal(&revs, filter_provided_objects))
- return 0;
+ goto cleanup;
}
if (prepare_revision_walk(&revs))
@@ -698,8 +697,10 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
find_bisection(&revs.commits, &reaches, &all, bisect_flags);
- if (bisect_show_vars)
- return show_bisect_vars(&info, reaches, all);
+ if (bisect_show_vars) {
+ ret = show_bisect_vars(&info, reaches, all);
+ goto cleanup;
+ }
}
if (filter_provided_objects) {
@@ -754,5 +755,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
if (show_disk_usage)
printf("%"PRIuMAX"\n", (uintmax_t)total_disk_usage);
- return 0;
+cleanup:
+ release_revisions(&revs);
+ return ret;
}
diff --git a/builtin/rev-parse.c b/builtin/rev-parse.c
index 8480a59..b259d89 100644
--- a/builtin/rev-parse.c
+++ b/builtin/rev-parse.c
@@ -476,7 +476,7 @@ static int cmd_parseopt(int argc, const char **argv, const char *prefix)
/* name(s) */
s = strpbrk(sb.buf, flag_chars);
- if (s == NULL)
+ if (!s)
s = help;
if (s - sb.buf == 1) /* short option only */
@@ -723,6 +723,9 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
prefix = setup_git_directory();
git_config(git_default_config, NULL);
did_repo_setup = 1;
+
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
}
if (!strcmp(arg, "--")) {
diff --git a/builtin/revert.c b/builtin/revert.c
index 51776ab..2554f90 100644
--- a/builtin/revert.c
+++ b/builtin/revert.c
@@ -130,6 +130,13 @@ static int run_sequencer(int argc, const char **argv, struct replay_opts *opts)
OPT_END(),
};
options = parse_options_concat(options, cp_extra);
+ } else if (opts->action == REPLAY_REVERT) {
+ struct option cp_extra[] = {
+ OPT_BOOL(0, "reference", &opts->commit_use_reference,
+ N_("use the 'reference' format to refer to commits")),
+ OPT_END(),
+ };
+ options = parse_options_concat(options, cp_extra);
}
argc = parse_options(argc, argv, NULL, options, usage_str,
@@ -239,6 +246,9 @@ int cmd_revert(int argc, const char **argv, const char *prefix)
res = run_sequencer(argc, argv, &opts);
if (res < 0)
die(_("revert failed"));
+ if (opts.revs)
+ release_revisions(opts.revs);
+ free(opts.revs);
return res;
}
diff --git a/builtin/shortlog.c b/builtin/shortlog.c
index 26c5c0c..086dfee 100644
--- a/builtin/shortlog.c
+++ b/builtin/shortlog.c
@@ -81,8 +81,10 @@ static void insert_one_record(struct shortlog *log,
format_subject(&subject, oneline, " ");
buffer = strbuf_detach(&subject, NULL);
- if (item->util == NULL)
- item->util = xcalloc(1, sizeof(struct string_list));
+ if (!item->util) {
+ item->util = xmalloc(sizeof(struct string_list));
+ string_list_init_nodup(item->util);
+ }
string_list_append(item->util, buffer);
}
}
@@ -420,6 +422,8 @@ parse_done:
else
get_from_rev(&rev, &log);
+ release_revisions(&rev);
+
shortlog_output(&log);
if (log.file != stdout)
fclose(log.file);
@@ -439,7 +443,7 @@ void shortlog_output(struct shortlog *log)
struct strbuf sb = STRBUF_INIT;
if (log->sort_by_number)
- QSORT(log->list.items, log->list.nr,
+ STABLE_QSORT(log->list.items, log->list.nr,
log->summary ? compare_by_counter : compare_by_list);
for (i = 0; i < log->list.nr; i++) {
const struct string_list_item *item = &log->list.items[i];
diff --git a/builtin/show-branch.c b/builtin/show-branch.c
index 330b055..64c649c 100644
--- a/builtin/show-branch.c
+++ b/builtin/show-branch.c
@@ -712,6 +712,10 @@ int cmd_show_branch(int ac, const char **av, const char *prefix)
"--all/--remotes/--independent/--merge-base");
}
+ if (with_current_branch && reflog)
+ die(_("options '%s' and '%s' cannot be used together"),
+ "--reflog", "--current");
+
/* If nothing is specified, show all branches by default */
if (ac <= topics && all_heads + all_remotes == 0)
all_heads = 1;
diff --git a/builtin/show-ref.c b/builtin/show-ref.c
index 7f8a533..5fa207a 100644
--- a/builtin/show-ref.c
+++ b/builtin/show-ref.c
@@ -52,14 +52,6 @@ static int show_ref(const char *refname, const struct object_id *oid,
if (show_head && !strcmp(refname, "HEAD"))
goto match;
- if (tags_only || heads_only) {
- int match;
-
- match = heads_only && starts_with(refname, "refs/heads/");
- match |= tags_only && starts_with(refname, "refs/tags/");
- if (!match)
- return 0;
- }
if (pattern) {
int reflen = strlen(refname);
const char **p = pattern, *m;
@@ -216,7 +208,14 @@ int cmd_show_ref(int argc, const char **argv, const char *prefix)
if (show_head)
head_ref(show_ref, NULL);
- for_each_ref(show_ref, NULL);
+ if (heads_only || tags_only) {
+ if (heads_only)
+ for_each_fullref_in("refs/heads/", show_ref, NULL);
+ if (tags_only)
+ for_each_fullref_in("refs/tags/", show_ref, NULL);
+ } else {
+ for_each_ref(show_ref, NULL);
+ }
if (!found_match) {
if (verify && !quiet)
die("No match");
diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c
index 0217d44..f91e29b 100644
--- a/builtin/sparse-checkout.c
+++ b/builtin/sparse-checkout.c
@@ -128,7 +128,7 @@ static void clean_tracked_sparse_directories(struct repository *r)
* sparse index will not delete directories that contain
* conflicted entries or submodules.
*/
- if (!r->index->sparse_index) {
+ if (r->index->sparse_index == INDEX_EXPANDED) {
/*
* If something, such as a merge conflict or other concern,
* prevents us from converting to a sparse index, then do
@@ -395,7 +395,7 @@ static int update_modes(int *cone_mode, int *sparse_index)
/* Set cone/non-cone mode appropriately */
core_apply_sparse_checkout = 1;
- if (*cone_mode == 1) {
+ if (*cone_mode == 1 || *cone_mode == -1) {
mode = MODE_CONE_PATTERNS;
core_sparse_checkout_cone = 1;
} else {
@@ -413,6 +413,9 @@ static int update_modes(int *cone_mode, int *sparse_index)
/* force an index rewrite */
repo_read_index(the_repository);
the_repository->index->updated_workdir = 1;
+
+ if (!*sparse_index)
+ ensure_full_index(the_repository->index);
}
return 0;
@@ -934,6 +937,9 @@ int cmd_sparse_checkout(int argc, const char **argv, const char *prefix)
git_config(git_default_config, NULL);
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
if (argc > 0) {
if (!strcmp(argv[0], "list"))
return sparse_checkout_list(argc, argv);
diff --git a/builtin/stash.c b/builtin/stash.c
index ccdfdab..30fa101 100644
--- a/builtin/stash.c
+++ b/builtin/stash.c
@@ -7,6 +7,7 @@
#include "cache-tree.h"
#include "unpack-trees.h"
#include "merge-recursive.h"
+#include "merge-ort-wrappers.h"
#include "strvec.h"
#include "run-command.h"
#include "dir.h"
@@ -116,6 +117,10 @@ struct stash_info {
int has_u;
};
+#define STASH_INFO_INIT { \
+ .revision = STRBUF_INIT, \
+}
+
static void free_stash_info(struct stash_info *info)
{
strbuf_release(&info->revision);
@@ -157,10 +162,8 @@ static int get_stash_info(struct stash_info *info, int argc, const char **argv)
if (argc == 1)
commit = argv[0];
- strbuf_init(&info->revision, 0);
if (!commit) {
if (!ref_exists(ref_stash)) {
- free_stash_info(info);
fprintf_ln(stderr, _("No stash entries found."));
return -1;
}
@@ -174,11 +177,8 @@ static int get_stash_info(struct stash_info *info, int argc, const char **argv)
revision = info->revision.buf;
- if (get_oid(revision, &info->w_commit)) {
- error(_("%s is not a valid reference"), revision);
- free_stash_info(info);
- return -1;
- }
+ if (get_oid(revision, &info->w_commit))
+ return error(_("%s is not a valid reference"), revision);
assert_stash_like(info, revision);
@@ -197,7 +197,7 @@ static int get_stash_info(struct stash_info *info, int argc, const char **argv)
info->is_stash_ref = !strcmp(expanded_ref, ref_stash);
break;
default: /* Invalid or ambiguous */
- free_stash_info(info);
+ break;
}
free(expanded_ref);
@@ -310,7 +310,7 @@ static int reset_head(void)
* API for resetting.
*/
cp.git_cmd = 1;
- strvec_push(&cp.args, "reset");
+ strvec_pushl(&cp.args, "reset", "--quiet", "--refresh", NULL);
return run_command(&cp);
}
@@ -356,7 +356,7 @@ static int restore_untracked(struct object_id *u_tree)
cp.git_cmd = 1;
strvec_push(&cp.args, "read-tree");
strvec_push(&cp.args, oid_to_hex(u_tree));
- strvec_pushf(&cp.env_array, "GIT_INDEX_FILE=%s",
+ strvec_pushf(&cp.env, "GIT_INDEX_FILE=%s",
stash_index_path.buf);
if (run_command(&cp)) {
remove_path(stash_index_path.buf);
@@ -366,7 +366,7 @@ static int restore_untracked(struct object_id *u_tree)
child_process_init(&cp);
cp.git_cmd = 1;
strvec_pushl(&cp.args, "checkout-index", "--all", NULL);
- strvec_pushf(&cp.env_array, "GIT_INDEX_FILE=%s",
+ strvec_pushf(&cp.env, "GIT_INDEX_FILE=%s",
stash_index_path.buf);
res = run_command(&cp);
@@ -492,13 +492,13 @@ static void unstage_changes_unless_new(struct object_id *orig_tree)
static int do_apply_stash(const char *prefix, struct stash_info *info,
int index, int quiet)
{
- int ret;
+ int clean, ret;
int has_index = index;
struct merge_options o;
struct object_id c_tree;
struct object_id index_tree;
- struct commit *result;
- const struct object_id *bases[1];
+ struct tree *head, *merge, *merge_base;
+ struct lock_file lock = LOCK_INIT;
read_cache_preload(NULL);
if (refresh_and_write_cache(REFRESH_QUIET, 0, 0))
@@ -541,6 +541,7 @@ static int do_apply_stash(const char *prefix, struct stash_info *info,
o.branch1 = "Updated upstream";
o.branch2 = "Stashed changes";
+ o.ancestor = "Stash base";
if (oideq(&info->b_tree, &c_tree))
o.branch1 = "Version stash was based on";
@@ -551,10 +552,26 @@ static int do_apply_stash(const char *prefix, struct stash_info *info,
if (o.verbosity >= 3)
printf_ln(_("Merging %s with %s"), o.branch1, o.branch2);
- bases[0] = &info->b_tree;
+ head = lookup_tree(o.repo, &c_tree);
+ merge = lookup_tree(o.repo, &info->w_tree);
+ merge_base = lookup_tree(o.repo, &info->b_tree);
+
+ repo_hold_locked_index(o.repo, &lock, LOCK_DIE_ON_ERROR);
+ clean = merge_ort_nonrecursive(&o, head, merge, merge_base);
+
+ /*
+ * If 'clean' >= 0, reverse the value for 'ret' so 'ret' is 0 when the
+ * merge was clean, and nonzero if the merge was unclean or encountered
+ * an error.
+ */
+ ret = clean >= 0 ? !clean : clean;
+
+ if (ret < 0)
+ rollback_lock_file(&lock);
+ else if (write_locked_index(o.repo->index, &lock,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
+ ret = error(_("could not write index"));
- ret = merge_recursive_generic(&o, &c_tree, &info->w_tree, 1, bases,
- &result);
if (ret) {
rerere(0);
@@ -585,9 +602,9 @@ restore_untracked:
*/
cp.git_cmd = 1;
cp.dir = prefix;
- strvec_pushf(&cp.env_array, GIT_WORK_TREE_ENVIRONMENT"=%s",
+ strvec_pushf(&cp.env, GIT_WORK_TREE_ENVIRONMENT"=%s",
absolute_path(get_git_work_tree()));
- strvec_pushf(&cp.env_array, GIT_DIR_ENVIRONMENT"=%s",
+ strvec_pushf(&cp.env, GIT_DIR_ENVIRONMENT"=%s",
absolute_path(get_git_dir()));
strvec_push(&cp.args, "status");
run_command(&cp);
@@ -598,10 +615,10 @@ restore_untracked:
static int apply_stash(int argc, const char **argv, const char *prefix)
{
- int ret;
+ int ret = -1;
int quiet = 0;
int index = 0;
- struct stash_info info;
+ struct stash_info info = STASH_INFO_INIT;
struct option options[] = {
OPT__QUIET(&quiet, N_("be quiet, only report errors")),
OPT_BOOL(0, "index", &index,
@@ -613,9 +630,10 @@ static int apply_stash(int argc, const char **argv, const char *prefix)
git_stash_apply_usage, 0);
if (get_stash_info(&info, argc, argv))
- return -1;
+ goto cleanup;
ret = do_apply_stash(prefix, &info, index, quiet);
+cleanup:
free_stash_info(&info);
return ret;
}
@@ -651,20 +669,25 @@ static int do_drop_stash(struct stash_info *info, int quiet)
return 0;
}
-static void assert_stash_ref(struct stash_info *info)
+static int get_stash_info_assert(struct stash_info *info, int argc,
+ const char **argv)
{
- if (!info->is_stash_ref) {
- error(_("'%s' is not a stash reference"), info->revision.buf);
- free_stash_info(info);
- exit(1);
- }
+ int ret = get_stash_info(info, argc, argv);
+
+ if (ret < 0)
+ return ret;
+
+ if (!info->is_stash_ref)
+ return error(_("'%s' is not a stash reference"), info->revision.buf);
+
+ return 0;
}
static int drop_stash(int argc, const char **argv, const char *prefix)
{
- int ret;
+ int ret = -1;
int quiet = 0;
- struct stash_info info;
+ struct stash_info info = STASH_INFO_INIT;
struct option options[] = {
OPT__QUIET(&quiet, N_("be quiet, only report errors")),
OPT_END()
@@ -673,22 +696,21 @@ static int drop_stash(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, options,
git_stash_drop_usage, 0);
- if (get_stash_info(&info, argc, argv))
- return -1;
-
- assert_stash_ref(&info);
+ if (get_stash_info_assert(&info, argc, argv))
+ goto cleanup;
ret = do_drop_stash(&info, quiet);
+cleanup:
free_stash_info(&info);
return ret;
}
static int pop_stash(int argc, const char **argv, const char *prefix)
{
- int ret;
+ int ret = -1;
int index = 0;
int quiet = 0;
- struct stash_info info;
+ struct stash_info info = STASH_INFO_INIT;
struct option options[] = {
OPT__QUIET(&quiet, N_("be quiet, only report errors")),
OPT_BOOL(0, "index", &index,
@@ -699,25 +721,25 @@ static int pop_stash(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, options,
git_stash_pop_usage, 0);
- if (get_stash_info(&info, argc, argv))
- return -1;
+ if (get_stash_info_assert(&info, argc, argv))
+ goto cleanup;
- assert_stash_ref(&info);
if ((ret = do_apply_stash(prefix, &info, index, quiet)))
printf_ln(_("The stash entry is kept in case "
"you need it again."));
else
ret = do_drop_stash(&info, quiet);
+cleanup:
free_stash_info(&info);
return ret;
}
static int branch_stash(int argc, const char **argv, const char *prefix)
{
- int ret;
+ int ret = -1;
const char *branch = NULL;
- struct stash_info info;
+ struct stash_info info = STASH_INFO_INIT;
struct child_process cp = CHILD_PROCESS_INIT;
struct option options[] = {
OPT_END()
@@ -734,7 +756,7 @@ static int branch_stash(int argc, const char **argv, const char *prefix)
branch = argv[0];
if (get_stash_info(&info, argc - 1, argv + 1))
- return -1;
+ goto cleanup;
cp.git_cmd = 1;
strvec_pushl(&cp.args, "checkout", "-b", NULL);
@@ -746,8 +768,8 @@ static int branch_stash(int argc, const char **argv, const char *prefix)
if (!ret && info.is_stash_ref)
ret = do_drop_stash(&info, 0);
+cleanup:
free_stash_info(&info);
-
return ret;
}
@@ -825,8 +847,8 @@ static void diff_include_untracked(const struct stash_info *info, struct diff_op
static int show_stash(int argc, const char **argv, const char *prefix)
{
int i;
- int ret = 0;
- struct stash_info info;
+ int ret = -1;
+ struct stash_info info = STASH_INFO_INIT;
struct rev_info rev;
struct strvec stash_args = STRVEC_INIT;
struct strvec revision_args = STRVEC_INIT;
@@ -844,6 +866,7 @@ static int show_stash(int argc, const char **argv, const char *prefix)
UNTRACKED_ONLY, PARSE_OPT_NONEG),
OPT_END()
};
+ int do_usage = 0;
init_diff_ui_defaults();
git_config(git_diff_ui_config, NULL);
@@ -861,10 +884,8 @@ static int show_stash(int argc, const char **argv, const char *prefix)
strvec_push(&revision_args, argv[i]);
}
- ret = get_stash_info(&info, stash_args.nr, stash_args.v);
- strvec_clear(&stash_args);
- if (ret)
- return -1;
+ if (get_stash_info(&info, stash_args.nr, stash_args.v))
+ goto cleanup;
/*
* The config settings are applied only if there are not passed
@@ -878,16 +899,14 @@ static int show_stash(int argc, const char **argv, const char *prefix)
rev.diffopt.output_format |= DIFF_FORMAT_PATCH;
if (!show_stat && !show_patch) {
- free_stash_info(&info);
- return 0;
+ ret = 0;
+ goto cleanup;
}
}
argc = setup_revisions(revision_args.nr, revision_args.v, &rev, NULL);
- if (argc > 1) {
- free_stash_info(&info);
- usage_with_options(git_stash_show_usage, options);
- }
+ if (argc > 1)
+ goto usage;
if (!rev.diffopt.output_format) {
rev.diffopt.output_format = DIFF_FORMAT_PATCH;
diff_setup_done(&rev.diffopt);
@@ -912,8 +931,17 @@ static int show_stash(int argc, const char **argv, const char *prefix)
}
log_tree_diff_flush(&rev);
+ ret = diff_result_code(&rev.diffopt, 0);
+cleanup:
+ strvec_clear(&stash_args);
free_stash_info(&info);
- return diff_result_code(&rev.diffopt, 0);
+ release_revisions(&rev);
+ if (do_usage)
+ usage_with_options(git_stash_show_usage, options);
+ return ret;
+usage:
+ do_usage = 1;
+ goto cleanup;
}
static int do_store_stash(const struct object_id *w_commit, const char *stash_msg,
@@ -1047,7 +1075,6 @@ static int check_changes_tracked_files(const struct pathspec *ps)
goto done;
}
- object_array_clear(&rev.pending);
result = run_diff_files(&rev, 0);
if (diff_result_code(&rev.diffopt, result)) {
ret = 1;
@@ -1055,7 +1082,7 @@ static int check_changes_tracked_files(const struct pathspec *ps)
}
done:
- clear_pathspec(&rev.prune_data);
+ release_revisions(&rev);
return ret;
}
@@ -1088,7 +1115,7 @@ static int save_untracked_files(struct stash_info *info, struct strbuf *msg,
cp_upd_index.git_cmd = 1;
strvec_pushl(&cp_upd_index.args, "update-index", "-z", "--add",
"--remove", "--stdin", NULL);
- strvec_pushf(&cp_upd_index.env_array, "GIT_INDEX_FILE=%s",
+ strvec_pushf(&cp_upd_index.env, "GIT_INDEX_FILE=%s",
stash_index_path.buf);
strbuf_addf(&untracked_msg, "untracked files on %s\n", msg->buf);
@@ -1162,7 +1189,7 @@ static int stash_patch(struct stash_info *info, const struct pathspec *ps,
cp_read_tree.git_cmd = 1;
strvec_pushl(&cp_read_tree.args, "read-tree", "HEAD", NULL);
- strvec_pushf(&cp_read_tree.env_array, "GIT_INDEX_FILE=%s",
+ strvec_pushf(&cp_read_tree.env, "GIT_INDEX_FILE=%s",
stash_index_path.buf);
if (run_command(&cp_read_tree)) {
ret = -1;
@@ -1249,7 +1276,7 @@ static int stash_working_tree(struct stash_info *info, const struct pathspec *ps
strvec_pushl(&cp_upd_index.args, "update-index",
"--ignore-skip-worktree-entries",
"-z", "--add", "--remove", "--stdin", NULL);
- strvec_pushf(&cp_upd_index.env_array, "GIT_INDEX_FILE=%s",
+ strvec_pushf(&cp_upd_index.env, "GIT_INDEX_FILE=%s",
stash_index_path.buf);
if (pipe_command(&cp_upd_index, diff_output.buf, diff_output.len,
@@ -1266,9 +1293,7 @@ static int stash_working_tree(struct stash_info *info, const struct pathspec *ps
done:
discard_index(&istate);
- UNLEAK(rev);
- object_array_clear(&rev.pending);
- clear_pathspec(&rev.prune_data);
+ release_revisions(&rev);
strbuf_release(&diff_output);
remove_path(stash_index_path.buf);
return ret;
@@ -1410,9 +1435,9 @@ done:
static int create_stash(int argc, const char **argv, const char *prefix)
{
- int ret = 0;
+ int ret;
struct strbuf stash_msg_buf = STRBUF_INIT;
- struct stash_info info;
+ struct stash_info info = STASH_INFO_INIT;
struct pathspec ps;
/* Starting with argv[1], since argv[0] is "create" */
@@ -1427,6 +1452,7 @@ static int create_stash(int argc, const char **argv, const char *prefix)
if (!ret)
printf_ln("%s", oid_to_hex(&info.w_commit));
+ free_stash_info(&info);
strbuf_release(&stash_msg_buf);
return ret;
}
@@ -1435,7 +1461,7 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q
int keep_index, int patch_mode, int include_untracked, int only_staged)
{
int ret = 0;
- struct stash_info info;
+ struct stash_info info = STASH_INFO_INIT;
struct strbuf patch = STRBUF_INIT;
struct strbuf stash_msg_buf = STRBUF_INIT;
struct strbuf untracked_files = STRBUF_INIT;
@@ -1525,7 +1551,7 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q
cp.git_cmd = 1;
if (startup_info->original_cwd) {
cp.dir = startup_info->original_cwd;
- strvec_pushf(&cp.env_array, "%s=%s",
+ strvec_pushf(&cp.env, "%s=%s",
GIT_WORK_TREE_ENVIRONMENT,
the_repository->worktree);
}
@@ -1622,7 +1648,8 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q
struct child_process cp = CHILD_PROCESS_INIT;
cp.git_cmd = 1;
- strvec_pushl(&cp.args, "reset", "-q", "--", NULL);
+ strvec_pushl(&cp.args, "reset", "-q", "--refresh", "--",
+ NULL);
add_pathspecs(&cp.args, ps);
if (run_command(&cp)) {
ret = -1;
@@ -1633,6 +1660,7 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q
}
done:
+ free_stash_info(&info);
strbuf_release(&stash_msg_buf);
return ret;
}
@@ -1769,6 +1797,9 @@ int cmd_stash(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, options, git_stash_usage,
PARSE_OPT_KEEP_UNKNOWN | PARSE_OPT_KEEP_DASHDASH);
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+
index_file = get_index_file();
strbuf_addf(&stash_index_path, "%s.stash.%" PRIuMAX, index_file,
(uintmax_t)pid);
diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c
index 5301612..fac52ad 100644
--- a/builtin/submodule--helper.c
+++ b/builtin/submodule--helper.c
@@ -72,135 +72,6 @@ static char *get_default_remote(void)
return repo_get_default_remote(the_repository);
}
-static int starts_with_dot_slash(const char *str)
-{
- return str[0] == '.' && is_dir_sep(str[1]);
-}
-
-static int starts_with_dot_dot_slash(const char *str)
-{
- return str[0] == '.' && str[1] == '.' && is_dir_sep(str[2]);
-}
-
-/*
- * Returns 1 if it was the last chop before ':'.
- */
-static int chop_last_dir(char **remoteurl, int is_relative)
-{
- char *rfind = find_last_dir_sep(*remoteurl);
- if (rfind) {
- *rfind = '\0';
- return 0;
- }
-
- rfind = strrchr(*remoteurl, ':');
- if (rfind) {
- *rfind = '\0';
- return 1;
- }
-
- if (is_relative || !strcmp(".", *remoteurl))
- die(_("cannot strip one component off url '%s'"),
- *remoteurl);
-
- free(*remoteurl);
- *remoteurl = xstrdup(".");
- return 0;
-}
-
-/*
- * The `url` argument is the URL that navigates to the submodule origin
- * repo. When relative, this URL is relative to the superproject origin
- * URL repo. The `up_path` argument, if specified, is the relative
- * path that navigates from the submodule working tree to the superproject
- * working tree. Returns the origin URL of the submodule.
- *
- * Return either an absolute URL or filesystem path (if the superproject
- * origin URL is an absolute URL or filesystem path, respectively) or a
- * relative file system path (if the superproject origin URL is a relative
- * file system path).
- *
- * When the output is a relative file system path, the path is either
- * relative to the submodule working tree, if up_path is specified, or to
- * the superproject working tree otherwise.
- *
- * NEEDSWORK: This works incorrectly on the domain and protocol part.
- * remote_url url outcome expectation
- * http://a.com/b ../c http://a.com/c as is
- * http://a.com/b/ ../c http://a.com/c same as previous line, but
- * ignore trailing slash in url
- * http://a.com/b ../../c http://c error out
- * http://a.com/b ../../../c http:/c error out
- * http://a.com/b ../../../../c http:c error out
- * http://a.com/b ../../../../../c .:c error out
- * NEEDSWORK: Given how chop_last_dir() works, this function is broken
- * when a local part has a colon in its path component, too.
- */
-static char *relative_url(const char *remote_url,
- const char *url,
- const char *up_path)
-{
- int is_relative = 0;
- int colonsep = 0;
- char *out;
- char *remoteurl = xstrdup(remote_url);
- struct strbuf sb = STRBUF_INIT;
- size_t len = strlen(remoteurl);
-
- if (is_dir_sep(remoteurl[len-1]))
- remoteurl[len-1] = '\0';
-
- if (!url_is_local_not_ssh(remoteurl) || is_absolute_path(remoteurl))
- is_relative = 0;
- else {
- is_relative = 1;
- /*
- * Prepend a './' to ensure all relative
- * remoteurls start with './' or '../'
- */
- if (!starts_with_dot_slash(remoteurl) &&
- !starts_with_dot_dot_slash(remoteurl)) {
- strbuf_reset(&sb);
- strbuf_addf(&sb, "./%s", remoteurl);
- free(remoteurl);
- remoteurl = strbuf_detach(&sb, NULL);
- }
- }
- /*
- * When the url starts with '../', remove that and the
- * last directory in remoteurl.
- */
- while (url) {
- if (starts_with_dot_dot_slash(url)) {
- url += 3;
- colonsep |= chop_last_dir(&remoteurl, is_relative);
- } else if (starts_with_dot_slash(url))
- url += 2;
- else
- break;
- }
- strbuf_reset(&sb);
- strbuf_addf(&sb, "%s%s%s", remoteurl, colonsep ? ":" : "/", url);
- if (ends_with(url, "/"))
- strbuf_setlen(&sb, sb.len - 1);
- free(remoteurl);
-
- if (starts_with_dot_slash(sb.buf))
- out = xstrdup(sb.buf + 2);
- else
- out = xstrdup(sb.buf);
-
- if (!up_path || !is_relative) {
- strbuf_release(&sb);
- return out;
- }
-
- strbuf_reset(&sb);
- strbuf_addf(&sb, "%s%s", up_path, out);
- free(out);
- return strbuf_detach(&sb, NULL);
-}
-
static char *resolve_relative_url(const char *rel_url, const char *up_path, int quiet)
{
char *remoteurl, *resolved_url;
@@ -247,10 +118,11 @@ static int resolve_relative_url_test(int argc, const char **argv, const char *pr
return 0;
}
-static char *do_get_submodule_displaypath(const char *path,
- const char *prefix,
- const char *super_prefix)
+/* the result should be freed by the caller. */
+static char *get_submodule_displaypath(const char *path, const char *prefix)
{
+ const char *super_prefix = get_super_prefix();
+
if (prefix && super_prefix) {
BUG("cannot have prefix '%s' and superprefix '%s'",
prefix, super_prefix);
@@ -266,13 +138,6 @@ static char *do_get_submodule_displaypath(const char *path,
}
}
-/* the result should be freed by the caller. */
-static char *get_submodule_displaypath(const char *path, const char *prefix)
-{
- const char *super_prefix = get_super_prefix();
- return do_get_submodule_displaypath(path, prefix, super_prefix);
-}
-
static char *compute_rev_name(const char *sub_path, const char* object_id)
{
struct strbuf sb = STRBUF_INIT;
@@ -292,7 +157,7 @@ static char *compute_rev_name(const char *sub_path, const char* object_id)
for (d = describe_argv; *d; d++) {
struct child_process cp = CHILD_PROCESS_INIT;
- prepare_submodule_repo_env(&cp.env_array);
+ prepare_submodule_repo_env(&cp.env);
cp.dir = sub_path;
cp.git_cmd = 1;
cp.no_stderr = 1;
@@ -479,7 +344,7 @@ static void runcommand_in_submodule_cb(const struct cache_entry *list_item,
if (!is_submodule_populated_gently(path, NULL))
goto cleanup;
- prepare_submodule_repo_env(&cp.env_array);
+ prepare_submodule_repo_env(&cp.env);
/*
* For the purpose of executing <command> in the submodule,
@@ -499,12 +364,12 @@ static void runcommand_in_submodule_cb(const struct cache_entry *list_item,
char *toplevel = xgetcwd();
struct strbuf sb = STRBUF_INIT;
- strvec_pushf(&cp.env_array, "name=%s", sub->name);
- strvec_pushf(&cp.env_array, "sm_path=%s", path);
- strvec_pushf(&cp.env_array, "displaypath=%s", displaypath);
- strvec_pushf(&cp.env_array, "sha1=%s",
+ strvec_pushf(&cp.env, "name=%s", sub->name);
+ strvec_pushf(&cp.env, "sm_path=%s", path);
+ strvec_pushf(&cp.env, "displaypath=%s", displaypath);
+ strvec_pushf(&cp.env, "sha1=%s",
oid_to_hex(ce_oid));
- strvec_pushf(&cp.env_array, "toplevel=%s", toplevel);
+ strvec_pushf(&cp.env, "toplevel=%s", toplevel);
/*
* Since the path variable was accessible from the script
@@ -513,7 +378,7 @@ static void runcommand_in_submodule_cb(const struct cache_entry *list_item,
* on windows. And since environment variables are
* case-insensitive in windows, it interferes with the
* existing PATH variable. Hence, to avoid that, we expose
- * path via the args strvec and not via env_array.
+ * path via the args strvec and not via env.
*/
sq_quote_buf(&sb, path);
strvec_pushf(&cp.args, "path=%s; %s",
@@ -536,7 +401,7 @@ static void runcommand_in_submodule_cb(const struct cache_entry *list_item,
cpr.git_cmd = 1;
cpr.dir = path;
- prepare_submodule_repo_env(&cpr.env_array);
+ prepare_submodule_repo_env(&cpr.env);
strvec_pushl(&cpr.args, "--super-prefix", NULL);
strvec_pushf(&cpr.args, "%s/", displaypath);
@@ -573,7 +438,7 @@ static int module_foreach(int argc, const char **argv, const char *prefix)
};
const char *const git_submodule_helper_usage[] = {
- N_("git submodule--helper foreach [--quiet] [--recursive] [--] <command>"),
+ N_("git submodule foreach [--quiet] [--recursive] [--] <command>"),
NULL
};
@@ -592,24 +457,32 @@ static int module_foreach(int argc, const char **argv, const char *prefix)
return 0;
}
+static int starts_with_dot_slash(const char *const path)
+{
+ return path_match_flags(path, PATH_MATCH_STARTS_WITH_DOT_SLASH |
+ PATH_MATCH_XPLATFORM);
+}
+
+static int starts_with_dot_dot_slash(const char *const path)
+{
+ return path_match_flags(path, PATH_MATCH_STARTS_WITH_DOT_DOT_SLASH |
+ PATH_MATCH_XPLATFORM);
+}
+
struct init_cb {
const char *prefix;
- const char *superprefix;
unsigned int flags;
};
#define INIT_CB_INIT { 0 }
static void init_submodule(const char *path, const char *prefix,
- const char *superprefix, unsigned int flags)
+ unsigned int flags)
{
const struct submodule *sub;
struct strbuf sb = STRBUF_INIT;
char *upd = NULL, *url = NULL, *displaypath;
- /* try superprefix from the environment, if it is not passed explicitly */
- if (!superprefix)
- superprefix = get_super_prefix();
- displaypath = do_get_submodule_displaypath(path, prefix, superprefix);
+ displaypath = get_submodule_displaypath(path, prefix);
sub = submodule_from_path(the_repository, null_oid(), path);
@@ -683,7 +556,7 @@ static void init_submodule(const char *path, const char *prefix,
static void init_submodule_cb(const struct cache_entry *list_item, void *cb_data)
{
struct init_cb *info = cb_data;
- init_submodule(list_item->name, info->prefix, info->superprefix, info->flags);
+ init_submodule(list_item->name, info->prefix, info->flags);
}
static int module_init(int argc, const char **argv, const char *prefix)
@@ -699,7 +572,7 @@ static int module_init(int argc, const char **argv, const char *prefix)
};
const char *const git_submodule_helper_usage[] = {
- N_("git submodule--helper init [<options>] [<path>]"),
+ N_("git submodule init [<options>] [<path>]"),
NULL
};
@@ -766,7 +639,7 @@ static void status_submodule(const char *path, const struct object_id *ce_oid,
{
char *displaypath;
struct strvec diff_files_args = STRVEC_INIT;
- struct rev_info rev;
+ struct rev_info rev = REV_INFO_INIT;
int diff_files_result;
struct strbuf buf = STRBUF_INIT;
const char *git_dir;
@@ -833,7 +706,7 @@ static void status_submodule(const char *path, const struct object_id *ce_oid,
cpr.git_cmd = 1;
cpr.dir = path;
- prepare_submodule_repo_env(&cpr.env_array);
+ prepare_submodule_repo_env(&cpr.env);
strvec_push(&cpr.args, "--super-prefix");
strvec_pushf(&cpr.args, "%s/", displaypath);
@@ -853,6 +726,7 @@ static void status_submodule(const char *path, const struct object_id *ce_oid,
cleanup:
strvec_clear(&diff_files_args);
free(displaypath);
+ release_revisions(&rev);
}
static void status_submodule_cb(const struct cache_entry *list_item,
@@ -955,7 +829,7 @@ static char *verify_submodule_committish(const char *sm_path,
cp_rev_parse.git_cmd = 1;
cp_rev_parse.dir = sm_path;
- prepare_submodule_repo_env(&cp_rev_parse.env_array);
+ prepare_submodule_repo_env(&cp_rev_parse.env);
strvec_pushl(&cp_rev_parse.args, "rev-parse", "-q", "--short", NULL);
strvec_pushf(&cp_rev_parse.args, "%s^0", committish);
strvec_push(&cp_rev_parse.args, "--");
@@ -996,7 +870,7 @@ static void print_submodule_summary(struct summary_cb *info, char *errmsg,
cp_log.git_cmd = 1;
cp_log.dir = p->sm_path;
- prepare_submodule_repo_env(&cp_log.env_array);
+ prepare_submodule_repo_env(&cp_log.env);
strvec_pushl(&cp_log.args, "log", NULL);
if (S_ISGITLINK(p->mod_src) && S_ISGITLINK(p->mod_dst)) {
@@ -1113,7 +987,7 @@ static void generate_submodule_summary(struct summary_cb *info,
cp_rev_list.git_cmd = 1;
cp_rev_list.dir = p->sm_path;
- prepare_submodule_repo_env(&cp_rev_list.env_array);
+ prepare_submodule_repo_env(&cp_rev_list.env);
if (!capture_command(&cp_rev_list, &sb_rev_list, 0))
total_commits = atoi(sb_rev_list.buf);
@@ -1231,6 +1105,7 @@ static int compute_summary_module_list(struct object_id *head_oid,
struct strvec diff_args = STRVEC_INIT;
struct rev_info rev;
struct module_cb_list list = MODULE_CB_LIST_INIT;
+ int ret = 0;
strvec_push(&diff_args, get_diff_cmd(diff_cmd));
if (info->cached)
@@ -1256,11 +1131,13 @@ static int compute_summary_module_list(struct object_id *head_oid,
setup_work_tree();
if (read_cache_preload(&rev.diffopt.pathspec) < 0) {
perror("read_cache_preload");
- return -1;
+ ret = -1;
+ goto cleanup;
}
} else if (read_cache() < 0) {
perror("read_cache");
- return -1;
+ ret = -1;
+ goto cleanup;
}
if (diff_cmd == DIFF_INDEX)
@@ -1268,8 +1145,10 @@ static int compute_summary_module_list(struct object_id *head_oid,
else
run_diff_files(&rev, 0);
prepare_submodule_summary(info, &list);
+cleanup:
strvec_clear(&diff_args);
- return 0;
+ release_revisions(&rev);
+ return ret;
}
static int module_summary(int argc, const char **argv, const char *prefix)
@@ -1296,7 +1175,7 @@ static int module_summary(int argc, const char **argv, const char *prefix)
};
const char *const git_submodule_helper_usage[] = {
- N_("git submodule--helper summary [<options>] [<commit>] [--] [<path>]"),
+ N_("git submodule summary [<options>] [<commit>] [--] [<path>]"),
NULL
};
@@ -1414,7 +1293,7 @@ static void sync_submodule(const char *path, const char *prefix,
cpr.git_cmd = 1;
cpr.dir = path;
- prepare_submodule_repo_env(&cpr.env_array);
+ prepare_submodule_repo_env(&cpr.env);
strvec_push(&cpr.args, "--super-prefix");
strvec_pushf(&cpr.args, "%s/", displaypath);
@@ -1460,7 +1339,7 @@ static int module_sync(int argc, const char **argv, const char *prefix)
};
const char *const git_submodule_helper_usage[] = {
- N_("git submodule--helper sync [--quiet] [--recursive] [<path>]"),
+ N_("git submodule sync [--quiet] [--recursive] [<path>]"),
NULL
};
@@ -1643,7 +1522,10 @@ struct module_clone_data {
unsigned int require_init: 1;
int single_branch;
};
-#define MODULE_CLONE_DATA_INIT { .reference = STRING_LIST_INIT_NODUP, .single_branch = -1 }
+#define MODULE_CLONE_DATA_INIT { \
+ .reference = STRING_LIST_INIT_NODUP, \
+ .single_branch = -1, \
+}
struct submodule_alternate_setup {
const char *submodule_name;
@@ -1816,7 +1698,7 @@ static int clone_submodule(struct module_clone_data *clone_data)
strvec_push(&cp.args, clone_data->path);
cp.git_cmd = 1;
- prepare_submodule_repo_env(&cp.env_array);
+ prepare_submodule_repo_env(&cp.env);
cp.no_stdin = 1;
if(run_command(&cp))
@@ -1899,7 +1781,7 @@ static int module_clone(int argc, const char **argv, const char *prefix)
const char *const git_submodule_helper_usage[] = {
N_("git submodule--helper clone [--prefix=<path>] [--quiet] "
"[--reference <repository>] [--name <name>] [--depth <depth>] "
- "[--single-branch] [--filter <filter-spec>]"
+ "[--single-branch] [--filter <filter-spec>] "
"--url <url> --path <path>"),
NULL
};
@@ -1926,7 +1808,7 @@ static int module_clone(int argc, const char **argv, const char *prefix)
static void determine_submodule_update_strategy(struct repository *r,
int just_cloned,
const char *path,
- const char *update,
+ enum submodule_update_type update,
struct submodule_update_strategy *out)
{
const struct submodule *sub = submodule_from_path(r, null_oid(), path);
@@ -1936,9 +1818,7 @@ static void determine_submodule_update_strategy(struct repository *r,
key = xstrfmt("submodule.%s.update", sub->name);
if (update) {
- if (parse_submodule_update_strategy(update, out) < 0)
- die(_("Invalid update mode '%s' for submodule path '%s'"),
- update, path);
+ out->type = update;
} else if (!repo_config_get_string_tmp(r, key, &val)) {
if (parse_submodule_update_strategy(val, out) < 0)
die(_("Invalid update mode '%s' configured for submodule path '%s'"),
@@ -1967,28 +1847,13 @@ struct update_clone_data {
};
struct submodule_update_clone {
- /* index into 'list', the list of submodules to look into for cloning */
+ /* index into 'update_data.list', the list of submodules to look into for cloning */
int current;
- struct module_list list;
- unsigned warn_if_uninitialized : 1;
-
- /* update parameter passed via commandline */
- struct submodule_update_strategy update;
/* configuration parameters which are passed on to the children */
- int progress;
- int quiet;
- int recommend_shallow;
- struct string_list references;
- int dissociate;
- unsigned require_init;
- const char *depth;
- const char *recursive_prefix;
- const char *prefix;
- int single_branch;
- struct list_objects_filter_options *filter_options;
+ struct update_data *update_data;
- /* to be consumed by git-submodule.sh */
+ /* to be consumed by update_submodule() */
struct update_clone_data *update_clone;
int update_clone_nr; int update_clone_alloc;
@@ -1998,34 +1863,46 @@ struct submodule_update_clone {
/* failed clones to be retried again */
const struct cache_entry **failed_clones;
int failed_clones_nr, failed_clones_alloc;
-
- int max_jobs;
- unsigned int init;
};
-#define SUBMODULE_UPDATE_CLONE_INIT { \
- .list = MODULE_LIST_INIT, \
- .update = SUBMODULE_UPDATE_STRATEGY_INIT, \
- .recommend_shallow = -1, \
- .references = STRING_LIST_INIT_DUP, \
- .single_branch = -1, \
- .max_jobs = 1, \
-}
+#define SUBMODULE_UPDATE_CLONE_INIT { 0 }
struct update_data {
- const char *recursive_prefix;
- const char *sm_path;
+ const char *prefix;
const char *displaypath;
- struct object_id oid;
+ enum submodule_update_type update_default;
struct object_id suboid;
+ struct string_list references;
struct submodule_update_strategy update_strategy;
+ struct list_objects_filter_options *filter_options;
+ struct module_list list;
int depth;
+ int max_jobs;
+ int single_branch;
+ int recommend_shallow;
+ unsigned int require_init;
unsigned int force;
unsigned int quiet;
unsigned int nofetch;
- unsigned int just_cloned;
unsigned int remote;
+ unsigned int progress;
+ unsigned int dissociate;
+ unsigned int init;
+ unsigned int warn_if_uninitialized;
+ unsigned int recursive;
+
+ /* copied over from update_clone_data */
+ struct object_id oid;
+ unsigned int just_cloned;
+ const char *sm_path;
};
-#define UPDATE_DATA_INIT { .update_strategy = SUBMODULE_UPDATE_STRATEGY_INIT }
+#define UPDATE_DATA_INIT { \
+ .update_strategy = SUBMODULE_UPDATE_STRATEGY_INIT, \
+ .list = MODULE_LIST_INIT, \
+ .recommend_shallow = -1, \
+ .references = STRING_LIST_INIT_DUP, \
+ .single_branch = -1, \
+ .max_jobs = 1, \
+}
static void next_submodule_warn_missing(struct submodule_update_clone *suc,
struct strbuf *out, const char *displaypath)
@@ -2034,7 +1911,7 @@ static void next_submodule_warn_missing(struct submodule_update_clone *suc,
* Only mention uninitialized submodules when their
* paths have been specified.
*/
- if (suc->warn_if_uninitialized) {
+ if (suc->update_data->warn_if_uninitialized) {
strbuf_addf(out,
_("Submodule path '%s' not initialized"),
displaypath);
@@ -2059,30 +1936,20 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
const char *update_string;
enum submodule_update_type update_type;
char *key;
- struct strbuf displaypath_sb = STRBUF_INIT;
+ struct update_data *ud = suc->update_data;
+ char *displaypath = get_submodule_displaypath(ce->name, ud->prefix);
struct strbuf sb = STRBUF_INIT;
- const char *displaypath = NULL;
int needs_cloning = 0;
int need_free_url = 0;
if (ce_stage(ce)) {
- if (suc->recursive_prefix)
- strbuf_addf(&sb, "%s/%s", suc->recursive_prefix, ce->name);
- else
- strbuf_addstr(&sb, ce->name);
- strbuf_addf(out, _("Skipping unmerged submodule %s"), sb.buf);
+ strbuf_addf(out, _("Skipping unmerged submodule %s"), displaypath);
strbuf_addch(out, '\n');
goto cleanup;
}
sub = submodule_from_path(the_repository, null_oid(), ce->name);
- if (suc->recursive_prefix)
- displaypath = relative_path(suc->recursive_prefix,
- ce->name, &displaypath_sb);
- else
- displaypath = ce->name;
-
if (!sub) {
next_submodule_warn_missing(suc, out, displaypath);
goto cleanup;
@@ -2096,8 +1963,8 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
}
free(key);
- if (suc->update.type == SM_UPDATE_NONE
- || (suc->update.type == SM_UPDATE_UNSPECIFIED
+ if (suc->update_data->update_strategy.type == SM_UPDATE_NONE
+ || (suc->update_data->update_strategy.type == SM_UPDATE_UNSPECIFIED
&& update_type == SM_UPDATE_NONE)) {
strbuf_addf(out, _("Skipping submodule '%s'"), displaypath);
strbuf_addch(out, '\n');
@@ -2141,38 +2008,38 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
child->err = -1;
strvec_push(&child->args, "submodule--helper");
strvec_push(&child->args, "clone");
- if (suc->progress)
+ if (suc->update_data->progress)
strvec_push(&child->args, "--progress");
- if (suc->quiet)
+ if (suc->update_data->quiet)
strvec_push(&child->args, "--quiet");
- if (suc->prefix)
- strvec_pushl(&child->args, "--prefix", suc->prefix, NULL);
- if (suc->recommend_shallow && sub->recommend_shallow == 1)
+ if (suc->update_data->prefix)
+ strvec_pushl(&child->args, "--prefix", suc->update_data->prefix, NULL);
+ if (suc->update_data->recommend_shallow && sub->recommend_shallow == 1)
strvec_push(&child->args, "--depth=1");
- if (suc->filter_options && suc->filter_options->choice)
+ else if (suc->update_data->depth)
+ strvec_pushf(&child->args, "--depth=%d", suc->update_data->depth);
+ if (suc->update_data->filter_options && suc->update_data->filter_options->choice)
strvec_pushf(&child->args, "--filter=%s",
- expand_list_objects_filter_spec(suc->filter_options));
- if (suc->require_init)
+ expand_list_objects_filter_spec(suc->update_data->filter_options));
+ if (suc->update_data->require_init)
strvec_push(&child->args, "--require-init");
strvec_pushl(&child->args, "--path", sub->path, NULL);
strvec_pushl(&child->args, "--name", sub->name, NULL);
strvec_pushl(&child->args, "--url", url, NULL);
- if (suc->references.nr) {
+ if (suc->update_data->references.nr) {
struct string_list_item *item;
- for_each_string_list_item(item, &suc->references)
+ for_each_string_list_item(item, &suc->update_data->references)
strvec_pushl(&child->args, "--reference", item->string, NULL);
}
- if (suc->dissociate)
+ if (suc->update_data->dissociate)
strvec_push(&child->args, "--dissociate");
- if (suc->depth)
- strvec_push(&child->args, suc->depth);
- if (suc->single_branch >= 0)
- strvec_push(&child->args, suc->single_branch ?
+ if (suc->update_data->single_branch >= 0)
+ strvec_push(&child->args, suc->update_data->single_branch ?
"--single-branch" :
"--no-single-branch");
cleanup:
- strbuf_release(&displaypath_sb);
+ free(displaypath);
strbuf_release(&sb);
if (need_free_url)
free((void*)url);
@@ -2189,8 +2056,8 @@ static int update_clone_get_next_task(struct child_process *child,
const struct cache_entry *ce;
int index;
- for (; suc->current < suc->list.nr; suc->current++) {
- ce = suc->list.entries[suc->current];
+ for (; suc->current < suc->update_data->list.nr; suc->current++) {
+ ce = suc->update_data->list.entries[suc->current];
if (prepare_to_clone_next_submodule(ce, child, suc, err)) {
int *p = xmalloc(sizeof(*p));
*p = suc->current;
@@ -2205,7 +2072,7 @@ static int update_clone_get_next_task(struct child_process *child,
* stragglers again, which we can imagine as an extension of the
* entry list.
*/
- index = suc->current - suc->list.nr;
+ index = suc->current - suc->update_data->list.nr;
if (index < suc->failed_clones_nr) {
int *p;
ce = suc->failed_clones[index];
@@ -2250,8 +2117,8 @@ static int update_clone_task_finished(int result,
if (!result)
return 0;
- if (idx < suc->list.nr) {
- ce = suc->list.entries[idx];
+ if (idx < suc->update_data->list.nr) {
+ ce = suc->update_data->list.entries[idx];
strbuf_addf(err, _("Failed to clone '%s'. Retry scheduled"),
ce->name);
strbuf_addch(err, '\n');
@@ -2261,7 +2128,7 @@ static int update_clone_task_finished(int result,
suc->failed_clones[suc->failed_clones_nr++] = ce;
return 0;
} else {
- idx -= suc->list.nr;
+ idx -= suc->update_data->list.nr;
ce = suc->failed_clones[idx];
strbuf_addf(err, _("Failed to clone '%s' a second time, aborting"),
ce->name);
@@ -2293,7 +2160,7 @@ static int is_tip_reachable(const char *path, struct object_id *oid)
cp.no_stderr = 1;
strvec_pushl(&cp.args, "rev-list", "-n", "1", hex, "--not", "--all", NULL);
- prepare_submodule_repo_env(&cp.env_array);
+ prepare_submodule_repo_env(&cp.env);
if (capture_command(&cp, &rev, GIT_MAX_HEXSZ + 1) || rev.len)
return 0;
@@ -2305,7 +2172,7 @@ static int fetch_in_submodule(const char *module_path, int depth, int quiet, str
{
struct child_process cp = CHILD_PROCESS_INIT;
- prepare_submodule_repo_env(&cp.env_array);
+ prepare_submodule_repo_env(&cp.env);
cp.git_cmd = 1;
cp.dir = xstrdup(module_path);
@@ -2318,6 +2185,7 @@ static int fetch_in_submodule(const char *module_path, int depth, int quiet, str
char *hex = oid_to_hex(oid);
char *remote = get_default_remote();
strvec_pushl(&cp.args, remote, hex, NULL);
+ free(remote);
}
return run_command(&cp);
@@ -2325,83 +2193,76 @@ static int fetch_in_submodule(const char *module_path, int depth, int quiet, str
static int run_update_command(struct update_data *ud, int subforce)
{
- struct strvec args = STRVEC_INIT;
- struct strvec child_env = STRVEC_INIT;
+ struct child_process cp = CHILD_PROCESS_INIT;
char *oid = oid_to_hex(&ud->oid);
int must_die_on_failure = 0;
- int git_cmd;
switch (ud->update_strategy.type) {
case SM_UPDATE_CHECKOUT:
- git_cmd = 1;
- strvec_pushl(&args, "checkout", "-q", NULL);
+ cp.git_cmd = 1;
+ strvec_pushl(&cp.args, "checkout", "-q", NULL);
if (subforce)
- strvec_push(&args, "-f");
+ strvec_push(&cp.args, "-f");
break;
case SM_UPDATE_REBASE:
- git_cmd = 1;
- strvec_push(&args, "rebase");
+ cp.git_cmd = 1;
+ strvec_push(&cp.args, "rebase");
if (ud->quiet)
- strvec_push(&args, "--quiet");
+ strvec_push(&cp.args, "--quiet");
must_die_on_failure = 1;
break;
case SM_UPDATE_MERGE:
- git_cmd = 1;
- strvec_push(&args, "merge");
+ cp.git_cmd = 1;
+ strvec_push(&cp.args, "merge");
if (ud->quiet)
- strvec_push(&args, "--quiet");
+ strvec_push(&cp.args, "--quiet");
must_die_on_failure = 1;
break;
case SM_UPDATE_COMMAND:
- git_cmd = 0;
- strvec_push(&args, ud->update_strategy.command);
+ cp.use_shell = 1;
+ strvec_push(&cp.args, ud->update_strategy.command);
must_die_on_failure = 1;
break;
default:
BUG("unexpected update strategy type: %s",
submodule_strategy_to_string(&ud->update_strategy));
}
- strvec_push(&args, oid);
+ strvec_push(&cp.args, oid);
- prepare_submodule_repo_env(&child_env);
- if (run_command_v_opt_cd_env(args.v, git_cmd ? RUN_GIT_CMD : RUN_USING_SHELL,
- ud->sm_path, child_env.v)) {
+ cp.dir = xstrdup(ud->sm_path);
+ prepare_submodule_repo_env(&cp.env);
+ if (run_command(&cp)) {
switch (ud->update_strategy.type) {
case SM_UPDATE_CHECKOUT:
- printf(_("Unable to checkout '%s' in submodule path '%s'"),
- oid, ud->displaypath);
+ die_message(_("Unable to checkout '%s' in submodule path '%s'"),
+ oid, ud->displaypath);
break;
case SM_UPDATE_REBASE:
- printf(_("Unable to rebase '%s' in submodule path '%s'"),
- oid, ud->displaypath);
+ die_message(_("Unable to rebase '%s' in submodule path '%s'"),
+ oid, ud->displaypath);
break;
case SM_UPDATE_MERGE:
- printf(_("Unable to merge '%s' in submodule path '%s'"),
- oid, ud->displaypath);
+ die_message(_("Unable to merge '%s' in submodule path '%s'"),
+ oid, ud->displaypath);
break;
case SM_UPDATE_COMMAND:
- printf(_("Execution of '%s %s' failed in submodule path '%s'"),
- ud->update_strategy.command, oid, ud->displaypath);
+ die_message(_("Execution of '%s %s' failed in submodule path '%s'"),
+ ud->update_strategy.command, oid, ud->displaypath);
break;
default:
BUG("unexpected update strategy type: %s",
submodule_strategy_to_string(&ud->update_strategy));
}
- /*
- * NEEDSWORK: We are currently printing to stdout with error
- * return so that the shell caller handles the error output
- * properly. Once we start handling the error messages within
- * C, we should use die() instead.
- */
if (must_die_on_failure)
- return 2;
- /*
- * This signifies to the caller in shell that the command
- * failed without dying
- */
+ exit(128);
+
+ /* the command failed, but update must continue */
return 1;
}
+ if (ud->quiet)
+ return 0;
+
switch (ud->update_strategy.type) {
case SM_UPDATE_CHECKOUT:
printf(_("Submodule path '%s': checked out '%s'\n"),
@@ -2427,7 +2288,7 @@ static int run_update_command(struct update_data *ud, int subforce)
return 0;
}
-static int do_run_update_procedure(struct update_data *ud)
+static int run_update_procedure(struct update_data *ud)
{
int subforce = is_null_oid(&ud->suboid) || ud->force;
@@ -2457,31 +2318,216 @@ static int do_run_update_procedure(struct update_data *ud)
return run_update_command(ud, subforce);
}
-/*
- * NEEDSWORK: As we convert "git submodule update" to C,
- * update_submodule2() will invoke more and more functions, making it
- * difficult to preserve the function ordering without forward
- * declarations.
- *
- * When the conversion is complete, this forward declaration will be
- * unnecessary and should be removed.
- */
-static int update_submodule2(struct update_data *update_data);
-static void update_submodule(struct update_clone_data *ucd)
+static const char *remote_submodule_branch(const char *path)
{
- fprintf(stdout, "dummy %s %d\t%s\n",
- oid_to_hex(&ucd->oid),
- ucd->just_cloned,
- ucd->sub->path);
+ const struct submodule *sub;
+ const char *branch = NULL;
+ char *key;
+
+ sub = submodule_from_path(the_repository, null_oid(), path);
+ if (!sub)
+ return NULL;
+
+ key = xstrfmt("submodule.%s.branch", sub->name);
+ if (repo_config_get_string_tmp(the_repository, key, &branch))
+ branch = sub->branch;
+ free(key);
+
+ if (!branch)
+ return "HEAD";
+
+ if (!strcmp(branch, ".")) {
+ const char *refname = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
+
+ if (!refname)
+ die(_("No such ref: %s"), "HEAD");
+
+ /* detached HEAD */
+ if (!strcmp(refname, "HEAD"))
+ die(_("Submodule (%s) branch configured to inherit "
+ "branch from superproject, but the superproject "
+ "is not on any branch"), sub->name);
+
+ if (!skip_prefix(refname, "refs/heads/", &refname))
+ die(_("Expecting a full ref name, got %s"), refname);
+ return refname;
+ }
+
+ return branch;
}
-static int update_submodules(struct submodule_update_clone *suc)
+static void ensure_core_worktree(const char *path)
{
- int i;
+ const char *cw;
+ struct repository subrepo;
+
+ if (repo_submodule_init(&subrepo, the_repository, path, null_oid()))
+ die(_("could not get a repository handle for submodule '%s'"), path);
+
+ if (!repo_config_get_string_tmp(&subrepo, "core.worktree", &cw)) {
+ char *cfg_file, *abs_path;
+ const char *rel_path;
+ struct strbuf sb = STRBUF_INIT;
+
+ cfg_file = repo_git_path(&subrepo, "config");
+
+ abs_path = absolute_pathdup(path);
+ rel_path = relative_path(abs_path, subrepo.gitdir, &sb);
- run_processes_parallel_tr2(suc->max_jobs, update_clone_get_next_task,
+ git_config_set_in_file(cfg_file, "core.worktree", rel_path);
+
+ free(cfg_file);
+ free(abs_path);
+ strbuf_release(&sb);
+ }
+}
+
+static const char *submodule_update_type_to_label(enum submodule_update_type type)
+{
+ switch (type) {
+ case SM_UPDATE_CHECKOUT:
+ return "checkout";
+ case SM_UPDATE_MERGE:
+ return "merge";
+ case SM_UPDATE_REBASE:
+ return "rebase";
+ case SM_UPDATE_UNSPECIFIED:
+ case SM_UPDATE_NONE:
+ case SM_UPDATE_COMMAND:
+ break;
+ }
+ BUG("unreachable with type %d", type);
+}
+
+static void update_data_to_args(struct update_data *update_data, struct strvec *args)
+{
+ enum submodule_update_type update_type = update_data->update_default;
+
+ if (update_data->displaypath) {
+ strvec_push(args, "--super-prefix");
+ strvec_pushf(args, "%s/", update_data->displaypath);
+ }
+ strvec_pushl(args, "submodule--helper", "update", "--recursive", NULL);
+ strvec_pushf(args, "--jobs=%d", update_data->max_jobs);
+ if (update_data->quiet)
+ strvec_push(args, "--quiet");
+ if (update_data->force)
+ strvec_push(args, "--force");
+ if (update_data->init)
+ strvec_push(args, "--init");
+ if (update_data->remote)
+ strvec_push(args, "--remote");
+ if (update_data->nofetch)
+ strvec_push(args, "--no-fetch");
+ if (update_data->dissociate)
+ strvec_push(args, "--dissociate");
+ if (update_data->progress)
+ strvec_push(args, "--progress");
+ if (update_data->require_init)
+ strvec_push(args, "--require-init");
+ if (update_data->depth)
+ strvec_pushf(args, "--depth=%d", update_data->depth);
+ if (update_type != SM_UPDATE_UNSPECIFIED)
+ strvec_pushf(args, "--%s",
+ submodule_update_type_to_label(update_type));
+
+ if (update_data->references.nr) {
+ struct string_list_item *item;
+ for_each_string_list_item(item, &update_data->references)
+ strvec_pushl(args, "--reference", item->string, NULL);
+ }
+ if (update_data->filter_options && update_data->filter_options->choice)
+ strvec_pushf(args, "--filter=%s",
+ expand_list_objects_filter_spec(
+ update_data->filter_options));
+ if (update_data->recommend_shallow == 0)
+ strvec_push(args, "--no-recommend-shallow");
+ else if (update_data->recommend_shallow == 1)
+ strvec_push(args, "--recommend-shallow");
+ if (update_data->single_branch >= 0)
+ strvec_push(args, update_data->single_branch ?
+ "--single-branch" :
+ "--no-single-branch");
+}
+
+static int update_submodule(struct update_data *update_data)
+{
+ ensure_core_worktree(update_data->sm_path);
+
+ update_data->displaypath = get_submodule_displaypath(
+ update_data->sm_path, update_data->prefix);
+
+ determine_submodule_update_strategy(the_repository, update_data->just_cloned,
+ update_data->sm_path, update_data->update_default,
+ &update_data->update_strategy);
+
+ if (update_data->just_cloned)
+ oidcpy(&update_data->suboid, null_oid());
+ else if (resolve_gitlink_ref(update_data->sm_path, "HEAD", &update_data->suboid))
+ die(_("Unable to find current revision in submodule path '%s'"),
+ update_data->displaypath);
+
+ if (update_data->remote) {
+ char *remote_name = get_default_remote_submodule(update_data->sm_path);
+ const char *branch = remote_submodule_branch(update_data->sm_path);
+ char *remote_ref = xstrfmt("refs/remotes/%s/%s", remote_name, branch);
+
+ if (!update_data->nofetch) {
+ if (fetch_in_submodule(update_data->sm_path, update_data->depth,
+ 0, NULL))
+ die(_("Unable to fetch in submodule path '%s'"),
+ update_data->sm_path);
+ }
+
+ if (resolve_gitlink_ref(update_data->sm_path, remote_ref, &update_data->oid))
+ die(_("Unable to find %s revision in submodule path '%s'"),
+ remote_ref, update_data->sm_path);
+
+ free(remote_ref);
+ }
+
+ if (!oideq(&update_data->oid, &update_data->suboid) || update_data->force)
+ if (run_update_procedure(update_data))
+ return 1;
+
+ if (update_data->recursive) {
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct update_data next = *update_data;
+ int res;
+
+ next.prefix = NULL;
+ oidcpy(&next.oid, null_oid());
+ oidcpy(&next.suboid, null_oid());
+
+ cp.dir = update_data->sm_path;
+ cp.git_cmd = 1;
+ prepare_submodule_repo_env(&cp.env);
+ update_data_to_args(&next, &cp.args);
+
+ /* die() if child process die()'d */
+ res = run_command(&cp);
+ if (!res)
+ return 0;
+ die_message(_("Failed to recurse into submodule path '%s'"),
+ update_data->displaypath);
+ if (res == 128)
+ exit(res);
+ else if (res)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int update_submodules(struct update_data *update_data)
+{
+ int i, res = 0;
+ struct submodule_update_clone suc = SUBMODULE_UPDATE_CLONE_INIT;
+
+ suc.update_data = update_data;
+ run_processes_parallel_tr2(suc.update_data->max_jobs, update_clone_get_next_task,
update_clone_start_failure,
- update_clone_task_finished, suc, "submodule",
+ update_clone_task_finished, &suc, "submodule",
"parallel/update");
/*
@@ -2492,53 +2538,73 @@ static int update_submodules(struct submodule_update_clone *suc)
* checkout involve more straightforward sequential I/O.
* - the listener can avoid doing any work if fetching failed.
*/
- if (suc->quickstop)
- return 1;
+ if (suc.quickstop) {
+ res = 1;
+ goto cleanup;
+ }
- for (i = 0; i < suc->update_clone_nr; i++)
- update_submodule(&suc->update_clone[i]);
+ for (i = 0; i < suc.update_clone_nr; i++) {
+ struct update_clone_data ucd = suc.update_clone[i];
- return 0;
+ oidcpy(&update_data->oid, &ucd.oid);
+ update_data->just_cloned = ucd.just_cloned;
+ update_data->sm_path = ucd.sub->path;
+
+ if (update_submodule(update_data))
+ res = 1;
+ }
+
+cleanup:
+ string_list_clear(&update_data->references, 0);
+ return res;
}
-static int update_clone(int argc, const char **argv, const char *prefix)
+static int module_update(int argc, const char **argv, const char *prefix)
{
- const char *update = NULL;
struct pathspec pathspec;
- struct submodule_update_clone suc = SUBMODULE_UPDATE_CLONE_INIT;
+ struct update_data opt = UPDATE_DATA_INIT;
struct list_objects_filter_options filter_options;
int ret;
- struct option module_update_clone_options[] = {
- OPT_BOOL(0, "init", &suc.init,
+ struct option module_update_options[] = {
+ OPT__FORCE(&opt.force, N_("force checkout updates"), 0),
+ OPT_BOOL(0, "init", &opt.init,
N_("initialize uninitialized submodules before update")),
- OPT_STRING(0, "prefix", &prefix,
+ OPT_BOOL(0, "remote", &opt.remote,
+ N_("use SHA-1 of submodule's remote tracking branch")),
+ OPT_BOOL(0, "recursive", &opt.recursive,
+ N_("traverse submodules recursively")),
+ OPT_BOOL('N', "no-fetch", &opt.nofetch,
+ N_("don't fetch new objects from the remote site")),
+ OPT_STRING(0, "prefix", &opt.prefix,
N_("path"),
N_("path into the working tree")),
- OPT_STRING(0, "recursive-prefix", &suc.recursive_prefix,
- N_("path"),
- N_("path into the working tree, across nested "
- "submodule boundaries")),
- OPT_STRING(0, "update", &update,
- N_("string"),
- N_("rebase, merge, checkout or none")),
- OPT_STRING_LIST(0, "reference", &suc.references, N_("repo"),
+ OPT_SET_INT(0, "checkout", &opt.update_default,
+ N_("use the 'checkout' update strategy (default)"),
+ SM_UPDATE_CHECKOUT),
+ OPT_SET_INT('m', "merge", &opt.update_default,
+ N_("use the 'merge' update strategy"),
+ SM_UPDATE_MERGE),
+ OPT_SET_INT('r', "rebase", &opt.update_default,
+ N_("use the 'rebase' update strategy"),
+ SM_UPDATE_REBASE),
+ OPT_STRING_LIST(0, "reference", &opt.references, N_("repo"),
N_("reference repository")),
- OPT_BOOL(0, "dissociate", &suc.dissociate,
+ OPT_BOOL(0, "dissociate", &opt.dissociate,
N_("use --reference only while cloning")),
- OPT_STRING(0, "depth", &suc.depth, "<depth>",
+ OPT_INTEGER(0, "depth", &opt.depth,
N_("create a shallow clone truncated to the "
"specified number of revisions")),
- OPT_INTEGER('j', "jobs", &suc.max_jobs,
+ OPT_INTEGER('j', "jobs", &opt.max_jobs,
N_("parallel jobs")),
- OPT_BOOL(0, "recommend-shallow", &suc.recommend_shallow,
+ OPT_BOOL(0, "recommend-shallow", &opt.recommend_shallow,
N_("whether the initial clone should follow the shallow recommendation")),
- OPT__QUIET(&suc.quiet, N_("don't print cloning progress")),
- OPT_BOOL(0, "progress", &suc.progress,
+ OPT__QUIET(&opt.quiet, N_("don't print cloning progress")),
+ OPT_BOOL(0, "progress", &opt.progress,
N_("force cloning progress")),
- OPT_BOOL(0, "require-init", &suc.require_init,
- N_("disallow cloning into non-empty directory")),
- OPT_BOOL(0, "single-branch", &suc.single_branch,
+ OPT_BOOL(0, "require-init", &opt.require_init,
+ N_("disallow cloning into non-empty directory, implies --init")),
+ OPT_BOOL(0, "single-branch", &opt.single_branch,
N_("clone only one branch, HEAD or --branch")),
OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
OPT_END()
@@ -2553,46 +2619,40 @@ static int update_clone(int argc, const char **argv, const char *prefix)
" [--recursive] [--[no-]single-branch] [--] [<path>...]"),
NULL
};
- suc.prefix = prefix;
- update_clone_config_from_gitmodules(&suc.max_jobs);
- git_config(git_update_clone_config, &suc.max_jobs);
+ update_clone_config_from_gitmodules(&opt.max_jobs);
+ git_config(git_update_clone_config, &opt.max_jobs);
memset(&filter_options, 0, sizeof(filter_options));
- argc = parse_options(argc, argv, prefix, module_update_clone_options,
+ argc = parse_options(argc, argv, prefix, module_update_options,
git_submodule_helper_usage, 0);
- if (filter_options.choice && !suc.init) {
- /*
- * NEEDSWORK: Don't use usage_with_options() because the
- * usage string is for "git submodule update", but the
- * options are for "git submodule--helper update-clone".
- *
- * This will no longer be an issue when "update-clone"
- * is replaced by "git submodule--helper update".
- */
- usage(git_submodule_helper_usage[0]);
+ if (opt.require_init)
+ opt.init = 1;
+
+ if (filter_options.choice && !opt.init) {
+ usage_with_options(git_submodule_helper_usage,
+ module_update_options);
}
- suc.filter_options = &filter_options;
+ opt.filter_options = &filter_options;
- if (update)
- if (parse_submodule_update_strategy(update, &suc.update) < 0)
- die(_("bad value for update parameter"));
+ if (opt.update_default)
+ opt.update_strategy.type = opt.update_default;
- if (module_list_compute(argc, argv, prefix, &pathspec, &suc.list) < 0) {
+ if (module_list_compute(argc, argv, prefix, &pathspec, &opt.list) < 0) {
list_objects_filter_release(&filter_options);
return 1;
}
if (pathspec.nr)
- suc.warn_if_uninitialized = 1;
+ opt.warn_if_uninitialized = 1;
- if (suc.init) {
+ if (opt.init) {
struct module_list list = MODULE_LIST_INIT;
struct init_cb info = INIT_CB_INIT;
- if (module_list_compute(argc, argv, suc.prefix,
+ if (module_list_compute(argc, argv, opt.prefix,
&pathspec, &list) < 0)
return 1;
@@ -2603,127 +2663,18 @@ static int update_clone(int argc, const char **argv, const char *prefix)
if (!argc && git_config_get_value_multi("submodule.active"))
module_list_active(&list);
- info.prefix = suc.prefix;
- info.superprefix = suc.recursive_prefix;
- if (suc.quiet)
+ info.prefix = opt.prefix;
+ if (opt.quiet)
info.flags |= OPT_QUIET;
for_each_listed_submodule(&list, init_submodule_cb, &info);
}
- ret = update_submodules(&suc);
+ ret = update_submodules(&opt);
list_objects_filter_release(&filter_options);
return ret;
}
-static int run_update_procedure(int argc, const char **argv, const char *prefix)
-{
- char *prefixed_path, *update = NULL;
- struct update_data update_data = UPDATE_DATA_INIT;
-
- struct option options[] = {
- OPT__QUIET(&update_data.quiet,
- N_("suppress output for update by rebase or merge")),
- OPT__FORCE(&update_data.force, N_("force checkout updates"),
- 0),
- OPT_BOOL('N', "no-fetch", &update_data.nofetch,
- N_("don't fetch new objects from the remote site")),
- OPT_BOOL(0, "just-cloned", &update_data.just_cloned,
- N_("overrides update mode in case the repository is a fresh clone")),
- OPT_INTEGER(0, "depth", &update_data.depth, N_("depth for shallow fetch")),
- OPT_STRING(0, "prefix", &prefix,
- N_("path"),
- N_("path into the working tree")),
- OPT_STRING(0, "update", &update,
- N_("string"),
- N_("rebase, merge, checkout or none")),
- OPT_STRING(0, "recursive-prefix", &update_data.recursive_prefix, N_("path"),
- N_("path into the working tree, across nested "
- "submodule boundaries")),
- OPT_CALLBACK_F(0, "oid", &update_data.oid, N_("sha1"),
- N_("SHA1 expected by superproject"), PARSE_OPT_NONEG,
- parse_opt_object_id),
- OPT_BOOL(0, "remote", &update_data.remote,
- N_("use SHA-1 of submodule's remote tracking branch")),
- OPT_END()
- };
-
- const char *const usage[] = {
- N_("git submodule--helper run-update-procedure [<options>] <path>"),
- NULL
- };
-
- argc = parse_options(argc, argv, prefix, options, usage, 0);
-
- if (argc != 1)
- usage_with_options(usage, options);
-
- update_data.sm_path = argv[0];
-
- if (update_data.recursive_prefix)
- prefixed_path = xstrfmt("%s%s", update_data.recursive_prefix, update_data.sm_path);
- else
- prefixed_path = xstrdup(update_data.sm_path);
-
- update_data.displaypath = get_submodule_displaypath(prefixed_path, prefix);
-
- determine_submodule_update_strategy(the_repository, update_data.just_cloned,
- update_data.sm_path, update,
- &update_data.update_strategy);
-
- free(prefixed_path);
- return update_submodule2(&update_data);
-}
-
-static int resolve_relative_path(int argc, const char **argv, const char *prefix)
-{
- struct strbuf sb = STRBUF_INIT;
- if (argc != 3)
- die("submodule--helper relative-path takes exactly 2 arguments, got %d", argc);
-
- printf("%s", relative_path(argv[1], argv[2], &sb));
- strbuf_release(&sb);
- return 0;
-}
-
-static const char *remote_submodule_branch(const char *path)
-{
- const struct submodule *sub;
- const char *branch = NULL;
- char *key;
-
- sub = submodule_from_path(the_repository, null_oid(), path);
- if (!sub)
- return NULL;
-
- key = xstrfmt("submodule.%s.branch", sub->name);
- if (repo_config_get_string_tmp(the_repository, key, &branch))
- branch = sub->branch;
- free(key);
-
- if (!branch)
- return "HEAD";
-
- if (!strcmp(branch, ".")) {
- const char *refname = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
-
- if (!refname)
- die(_("No such ref: %s"), "HEAD");
-
- /* detached HEAD */
- if (!strcmp(refname, "HEAD"))
- die(_("Submodule (%s) branch configured to inherit "
- "branch from superproject, but the superproject "
- "is not on any branch"), sub->name);
-
- if (!skip_prefix(refname, "refs/heads/", &refname))
- die(_("Expecting a full ref name, got %s"), refname);
- return refname;
- }
-
- return branch;
-}
-
static int push_check(int argc, const char **argv, const char *prefix)
{
struct remote *remote;
@@ -2801,32 +2752,6 @@ static int push_check(int argc, const char **argv, const char *prefix)
return 0;
}
-static void ensure_core_worktree(const char *path)
-{
- const char *cw;
- struct repository subrepo;
-
- if (repo_submodule_init(&subrepo, the_repository, path, null_oid()))
- die(_("could not get a repository handle for submodule '%s'"), path);
-
- if (!repo_config_get_string_tmp(&subrepo, "core.worktree", &cw)) {
- char *cfg_file, *abs_path;
- const char *rel_path;
- struct strbuf sb = STRBUF_INIT;
-
- cfg_file = repo_git_path(&subrepo, "config");
-
- abs_path = absolute_pathdup(path);
- rel_path = relative_path(abs_path, subrepo.gitdir, &sb);
-
- git_config_set_in_file(cfg_file, "core.worktree", rel_path);
-
- free(cfg_file);
- free(abs_path);
- strbuf_release(&sb);
- }
-}
-
static int absorb_git_dirs(int argc, const char **argv, const char *prefix)
{
int i;
@@ -2844,7 +2769,7 @@ static int absorb_git_dirs(int argc, const char **argv, const char *prefix)
};
const char *const git_submodule_helper_usage[] = {
- N_("git submodule--helper absorb-git-dirs [<options>] [<path>...]"),
+ N_("git submodule absorbgitdirs [<options>] [<path>...]"),
NULL
};
@@ -2949,7 +2874,7 @@ static int module_set_url(int argc, const char **argv, const char *prefix)
OPT_END()
};
const char *const usage[] = {
- N_("git submodule--helper set-url [--quiet] <path> <newurl>"),
+ N_("git submodule set-url [--quiet] <path> <newurl>"),
NULL
};
@@ -2988,8 +2913,8 @@ static int module_set_branch(int argc, const char **argv, const char *prefix)
OPT_END()
};
const char *const usage[] = {
- N_("git submodule--helper set-branch [-q|--quiet] (-d|--default) <path>"),
- N_("git submodule--helper set-branch [-q|--quiet] (-b|--branch) <branch> <path>"),
+ N_("git submodule set-branch [-q|--quiet] (-d|--default) <path>"),
+ N_("git submodule set-branch [-q|--quiet] (-b|--branch) <branch> <path>"),
NULL
};
@@ -3021,15 +2946,16 @@ static int module_create_branch(int argc, const char **argv, const char *prefix)
OPT__FORCE(&force, N_("force creation"), 0),
OPT_BOOL(0, "create-reflog", &reflog,
N_("create the branch's reflog")),
- OPT_SET_INT('t', "track", &track,
- N_("set up tracking mode (see git-pull(1))"),
- BRANCH_TRACK_EXPLICIT),
+ OPT_CALLBACK_F('t', "track", &track, "(direct|inherit)",
+ N_("set branch tracking configuration"),
+ PARSE_OPT_OPTARG,
+ parse_opt_tracking_mode),
OPT__DRY_RUN(&dry_run,
N_("show whether the branch would be created")),
OPT_END()
};
const char *const usage[] = {
- N_("git submodule--helper create-branch [-f|--force] [--create-reflog] [-q|--quiet] [-t|--track] [-n|--dry-run] <name> <start_oid> <start_name>"),
+ N_("git submodule--helper create-branch [-f|--force] [--create-reflog] [-q|--quiet] [-t|--track] [-n|--dry-run] <name> <start-oid> <start-name>"),
NULL
};
@@ -3048,41 +2974,6 @@ static int module_create_branch(int argc, const char **argv, const char *prefix)
return 0;
}
-/* NEEDSWORK: this is a temporary name until we delete update_submodule() */
-static int update_submodule2(struct update_data *update_data)
-{
- ensure_core_worktree(update_data->sm_path);
- if (update_data->just_cloned)
- oidcpy(&update_data->suboid, null_oid());
- else if (resolve_gitlink_ref(update_data->sm_path, "HEAD", &update_data->suboid))
- die(_("Unable to find current revision in submodule path '%s'"),
- update_data->displaypath);
-
- if (update_data->remote) {
- char *remote_name = get_default_remote_submodule(update_data->sm_path);
- const char *branch = remote_submodule_branch(update_data->sm_path);
- char *remote_ref = xstrfmt("refs/remotes/%s/%s", remote_name, branch);
-
- if (!update_data->nofetch) {
- if (fetch_in_submodule(update_data->sm_path, update_data->depth,
- 0, NULL))
- die(_("Unable to fetch in submodule path '%s'"),
- update_data->sm_path);
- }
-
- if (resolve_gitlink_ref(update_data->sm_path, remote_ref, &update_data->oid))
- die(_("Unable to find %s revision in submodule path '%s'"),
- remote_ref, update_data->sm_path);
-
- free(remote_ref);
- }
-
- if (!oideq(&update_data->oid, &update_data->suboid) || update_data->force)
- return do_run_update_procedure(update_data);
-
- return 3;
-}
-
struct add_data {
const char *prefix;
const char *branch;
@@ -3105,9 +2996,9 @@ static void append_fetch_remotes(struct strbuf *msg, const char *git_dir_path)
struct strbuf sb_remote_out = STRBUF_INIT;
cp_remote.git_cmd = 1;
- strvec_pushf(&cp_remote.env_array,
+ strvec_pushf(&cp_remote.env,
"GIT_DIR=%s", git_dir_path);
- strvec_push(&cp_remote.env_array, "GIT_WORK_TREE=.");
+ strvec_push(&cp_remote.env, "GIT_WORK_TREE=.");
strvec_pushl(&cp_remote.args, "remote", "-v", NULL);
if (!capture_command(&cp_remote, &sb_remote_out, 0)) {
char *next_line;
@@ -3191,7 +3082,7 @@ static int add_submodule(const struct add_data *add_data)
if (clone_submodule(&clone_data))
return -1;
- prepare_submodule_repo_env(&cp.env_array);
+ prepare_submodule_repo_env(&cp.env);
cp.git_cmd = 1;
cp.dir = add_data->sm_path;
/*
@@ -3360,14 +3251,14 @@ static int module_add(int argc, const char **argv, const char *prefix)
N_("reference repository")),
OPT_BOOL(0, "dissociate", &dissociate, N_("borrow the objects from reference repositories")),
OPT_STRING(0, "name", &add_data.sm_name, N_("name"),
- N_("sets the submodule’s name to the given string "
+ N_("sets the submodule's name to the given string "
"instead of defaulting to its path")),
OPT_INTEGER(0, "depth", &add_data.depth, N_("depth for shallow clones")),
OPT_END()
};
const char *const usage[] = {
- N_("git submodule--helper add [<options>] [--] <repository> [<path>]"),
+ N_("git submodule add [<options>] [--] <repository> [<path>]"),
NULL
};
@@ -3469,20 +3360,18 @@ struct cmd_struct {
static struct cmd_struct commands[] = {
{"list", module_list, 0},
{"name", module_name, 0},
- {"clone", module_clone, 0},
- {"add", module_add, SUPPORT_SUPER_PREFIX},
- {"update-clone", update_clone, 0},
- {"run-update-procedure", run_update_procedure, 0},
- {"relative-path", resolve_relative_path, 0},
+ {"clone", module_clone, SUPPORT_SUPER_PREFIX},
+ {"add", module_add, 0},
+ {"update", module_update, SUPPORT_SUPER_PREFIX},
{"resolve-relative-url-test", resolve_relative_url_test, 0},
{"foreach", module_foreach, SUPPORT_SUPER_PREFIX},
- {"init", module_init, SUPPORT_SUPER_PREFIX},
+ {"init", module_init, 0},
{"status", module_status, SUPPORT_SUPER_PREFIX},
{"sync", module_sync, SUPPORT_SUPER_PREFIX},
{"deinit", module_deinit, 0},
- {"summary", module_summary, SUPPORT_SUPER_PREFIX},
+ {"summary", module_summary, 0},
{"push-check", push_check, 0},
- {"absorb-git-dirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX},
+ {"absorbgitdirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX},
{"is-active", is_active, 0},
{"check-name", check_name, 0},
{"config", module_config, 0},
diff --git a/builtin/tag.c b/builtin/tag.c
index e5a8f85..75dece0 100644
--- a/builtin/tag.c
+++ b/builtin/tag.c
@@ -364,7 +364,7 @@ static void create_reflog_msg(const struct object_id *oid, struct strbuf *sb)
strbuf_addstr(sb, "object of unknown type");
break;
case OBJ_COMMIT:
- if ((buf = read_object_file(oid, &type, &size)) != NULL) {
+ if ((buf = read_object_file(oid, &type, &size))) {
subject_len = find_commit_subject(buf, &subject_start);
strbuf_insert(sb, sb->len, subject_start, subject_len);
} else {
@@ -372,7 +372,7 @@ static void create_reflog_msg(const struct object_id *oid, struct strbuf *sb)
}
free(buf);
- if ((c = lookup_commit_reference(the_repository, oid)) != NULL)
+ if ((c = lookup_commit_reference(the_repository, oid)))
strbuf_addf(sb, ", %s", show_date(c->date, 0, DATE_MODE(SHORT)));
break;
case OBJ_TREE:
diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c
index dbeb068..43789b8 100644
--- a/builtin/unpack-objects.c
+++ b/builtin/unpack-objects.c
@@ -1,5 +1,6 @@
#include "builtin.h"
#include "cache.h"
+#include "bulk-checkin.h"
#include "config.h"
#include "object-store.h"
#include "object.h"
@@ -96,15 +97,27 @@ static void use(int bytes)
display_throughput(progress, consumed_bytes);
}
+/*
+ * Decompress zstream from the standard input into a newly
+ * allocated buffer of specified size and return the buffer.
+ * The caller is responsible to free the returned buffer.
+ *
+ * But for dry_run mode, "get_data()" is only used to check the
+ * integrity of data, and the returned buffer is not used at all.
+ * Therefore, in dry_run mode, "get_data()" will release the small
+ * allocated buffer which is reused to hold temporary zstream output
+ * and return NULL instead of returning garbage data.
+ */
static void *get_data(unsigned long size)
{
git_zstream stream;
- void *buf = xmallocz(size);
+ unsigned long bufsize = dry_run && size > 8192 ? 8192 : size;
+ void *buf = xmallocz(bufsize);
memset(&stream, 0, sizeof(stream));
stream.next_out = buf;
- stream.avail_out = size;
+ stream.avail_out = bufsize;
stream.next_in = fill(1);
stream.avail_in = len;
git_inflate_init(&stream);
@@ -124,8 +137,17 @@ static void *get_data(unsigned long size)
}
stream.next_in = fill(1);
stream.avail_in = len;
+ if (dry_run) {
+ /* reuse the buffer in dry_run mode */
+ stream.next_out = buf;
+ stream.avail_out = bufsize > size - stream.total_out ?
+ size - stream.total_out :
+ bufsize;
+ }
}
git_inflate_end(&stream);
+ if (dry_run)
+ FREE_AND_NULL(buf);
return buf;
}
@@ -325,10 +347,70 @@ static void unpack_non_delta_entry(enum object_type type, unsigned long size,
{
void *buf = get_data(size);
- if (!dry_run && buf)
+ if (buf)
write_object(nr, type, buf, size);
- else
- free(buf);
+}
+
+struct input_zstream_data {
+ git_zstream *zstream;
+ unsigned char buf[8192];
+ int status;
+};
+
+static const void *feed_input_zstream(struct input_stream *in_stream,
+ unsigned long *readlen)
+{
+ struct input_zstream_data *data = in_stream->data;
+ git_zstream *zstream = data->zstream;
+ void *in = fill(1);
+
+ if (in_stream->is_finished) {
+ *readlen = 0;
+ return NULL;
+ }
+
+ zstream->next_out = data->buf;
+ zstream->avail_out = sizeof(data->buf);
+ zstream->next_in = in;
+ zstream->avail_in = len;
+
+ data->status = git_inflate(zstream, 0);
+
+ in_stream->is_finished = data->status != Z_OK;
+ use(len - zstream->avail_in);
+ *readlen = sizeof(data->buf) - zstream->avail_out;
+
+ return data->buf;
+}
+
+static void stream_blob(unsigned long size, unsigned nr)
+{
+ git_zstream zstream = { 0 };
+ struct input_zstream_data data = { 0 };
+ struct input_stream in_stream = {
+ .read = feed_input_zstream,
+ .data = &data,
+ };
+ struct obj_info *info = &obj_list[nr];
+
+ data.zstream = &zstream;
+ git_inflate_init(&zstream);
+
+ if (stream_loose_object(&in_stream, size, &info->oid))
+ die(_("failed to write object in stream"));
+
+ if (data.status != Z_STREAM_END)
+ die(_("inflate returned (%d)"), data.status);
+ git_inflate_end(&zstream);
+
+ if (strict) {
+ struct blob *blob = lookup_blob(the_repository, &info->oid);
+
+ if (!blob)
+ die(_("invalid blob object from stream"));
+ blob->object.flags |= FLAG_WRITTEN;
+ }
+ info->obj = NULL;
}
static int resolve_against_held(unsigned nr, const struct object_id *base,
@@ -358,10 +440,8 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
oidread(&base_oid, fill(the_hash_algo->rawsz));
use(the_hash_algo->rawsz);
delta_data = get_data(delta_size);
- if (dry_run || !delta_data) {
- free(delta_data);
+ if (!delta_data)
return;
- }
if (has_object_file(&base_oid))
; /* Ok we have this one */
else if (resolve_against_held(nr, &base_oid,
@@ -397,10 +477,8 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
die("offset value out of bound for delta base object");
delta_data = get_data(delta_size);
- if (dry_run || !delta_data) {
- free(delta_data);
+ if (!delta_data)
return;
- }
lo = 0;
hi = nr;
while (lo < hi) {
@@ -467,9 +545,14 @@ static void unpack_one(unsigned nr)
}
switch (type) {
+ case OBJ_BLOB:
+ if (!dry_run && size > big_file_threshold) {
+ stream_blob(size, nr);
+ return;
+ }
+ /* fallthrough */
case OBJ_COMMIT:
case OBJ_TREE:
- case OBJ_BLOB:
case OBJ_TAG:
unpack_non_delta_entry(type, size, nr);
return;
@@ -503,10 +586,12 @@ static void unpack_all(void)
if (!quiet)
progress = start_progress(_("Unpacking objects"), nr_objects);
CALLOC_ARRAY(obj_list, nr_objects);
+ begin_odb_transaction();
for (i = 0; i < nr_objects; i++) {
unpack_one(i);
display_progress(progress, i + 1);
}
+ end_odb_transaction();
stop_progress(&progress);
if (delta_list)
diff --git a/builtin/update-index.c b/builtin/update-index.c
index aafe7ee..b622499 100644
--- a/builtin/update-index.c
+++ b/builtin/update-index.c
@@ -5,6 +5,7 @@
*/
#define USE_THE_INDEX_COMPATIBILITY_MACROS
#include "cache.h"
+#include "bulk-checkin.h"
#include "config.h"
#include "lockfile.h"
#include "quote.h"
@@ -57,6 +58,14 @@ static void report(const char *fmt, ...)
if (!verbose)
return;
+ /*
+ * It is possible, though unlikely, that a caller could use the verbose
+ * output to synchronize with addition of objects to the object
+ * database. The current implementation of ODB transactions leaves
+ * objects invisible while a transaction is active, so flush the
+ * transaction here before reporting a change made by update-index.
+ */
+ flush_odb_transaction();
va_start(vp, fmt);
vprintf(fmt, vp);
putchar('\n');
@@ -1116,6 +1125,12 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
*/
parse_options_start(&ctx, argc, argv, prefix,
options, PARSE_OPT_STOP_AT_NON_OPTION);
+
+ /*
+ * Allow the object layer to optimize adding multiple objects in
+ * a batch.
+ */
+ begin_odb_transaction();
while (ctx.argc) {
if (parseopt_state != PARSE_OPT_DONE)
parseopt_state = parse_options_step(&ctx, options,
@@ -1190,6 +1205,11 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
strbuf_release(&buf);
}
+ /*
+ * By now we have added all of the new objects
+ */
+ end_odb_transaction();
+
if (split_index > 0) {
if (git_config_get_split_index() == 0)
warning(_("core.splitIndex is set to false; "
@@ -1236,14 +1256,33 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
}
if (fsmonitor > 0) {
- if (git_config_get_fsmonitor() == 0)
+ enum fsmonitor_mode fsm_mode = fsm_settings__get_mode(r);
+ enum fsmonitor_reason reason = fsm_settings__get_reason(r);
+
+ /*
+ * The user wants to turn on FSMonitor using the command
+ * line argument. (We don't know (or care) whether that
+ * is the IPC or HOOK version.)
+ *
+ * Use one of the __get routines to force load the FSMonitor
+ * config settings into the repo-settings. That will detect
+ * whether the file system is compatible so that we can stop
+ * here with a nice error message.
+ */
+ if (reason > FSMONITOR_REASON_OK)
+ die("%s",
+ fsm_settings__get_incompatible_msg(r, reason));
+
+ if (fsm_mode == FSMONITOR_MODE_DISABLED) {
warning(_("core.fsmonitor is unset; "
"set it if you really want to "
"enable fsmonitor"));
+ }
add_fsmonitor(&the_index);
report(_("fsmonitor enabled"));
} else if (!fsmonitor) {
- if (git_config_get_fsmonitor() == 1)
+ enum fsmonitor_mode fsm_mode = fsm_settings__get_mode(r);
+ if (fsm_mode > FSMONITOR_MODE_DISABLED)
warning(_("core.fsmonitor is set; "
"remove it if you really want to "
"disable fsmonitor"));
diff --git a/builtin/worktree.c b/builtin/worktree.c
index 4eaba2a..cd62eef 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -22,6 +22,7 @@ static const char * const worktree_usage[] = {
N_("git worktree move <worktree> <new-path>"),
N_("git worktree prune [<options>]"),
N_("git worktree remove [<options>] <worktree>"),
+ N_("git worktree repair [<path>...]"),
N_("git worktree unlock <path>"),
NULL
};
@@ -300,7 +301,7 @@ static int checkout_worktree(const struct add_opts *opts,
strvec_pushl(&cp.args, "reset", "--hard", "--no-recurse-submodules", NULL);
if (opts->quiet)
strvec_push(&cp.args, "--quiet");
- strvec_pushv(&cp.env_array, child_env->v);
+ strvec_pushv(&cp.env, child_env->v);
return run_command(&cp);
}
@@ -432,7 +433,7 @@ static int add_worktree(const char *path, const char *refname,
strvec_push(&cp.args, "--quiet");
}
- strvec_pushv(&cp.env_array, child_env.v);
+ strvec_pushv(&cp.env, child_env.v);
ret = run_command(&cp);
if (ret)
goto done;
@@ -650,35 +651,37 @@ static int add(int ac, const char **av, const char *prefix)
return add_worktree(path, branch, &opts);
}
-static void show_worktree_porcelain(struct worktree *wt)
+static void show_worktree_porcelain(struct worktree *wt, int line_terminator)
{
const char *reason;
- printf("worktree %s\n", wt->path);
+ printf("worktree %s%c", wt->path, line_terminator);
if (wt->is_bare)
- printf("bare\n");
+ printf("bare%c", line_terminator);
else {
- printf("HEAD %s\n", oid_to_hex(&wt->head_oid));
+ printf("HEAD %s%c", oid_to_hex(&wt->head_oid), line_terminator);
if (wt->is_detached)
- printf("detached\n");
+ printf("detached%c", line_terminator);
else if (wt->head_ref)
- printf("branch %s\n", wt->head_ref);
+ printf("branch %s%c", wt->head_ref, line_terminator);
}
reason = worktree_lock_reason(wt);
- if (reason && *reason) {
- struct strbuf sb = STRBUF_INIT;
- quote_c_style(reason, &sb, NULL, 0);
- printf("locked %s\n", sb.buf);
- strbuf_release(&sb);
- } else if (reason)
- printf("locked\n");
+ if (reason) {
+ fputs("locked", stdout);
+ if (*reason) {
+ fputc(' ', stdout);
+ write_name_quoted(reason, stdout, line_terminator);
+ } else {
+ fputc(line_terminator, stdout);
+ }
+ }
reason = worktree_prune_reason(wt, expire);
if (reason)
- printf("prunable %s\n", reason);
+ printf("prunable %s%c", reason, line_terminator);
- printf("\n");
+ fputc(line_terminator, stdout);
}
static void show_worktree(struct worktree *wt, int path_maxlen, int abbrev_len)
@@ -756,12 +759,15 @@ static void pathsort(struct worktree **wt)
static int list(int ac, const char **av, const char *prefix)
{
int porcelain = 0;
+ int line_terminator = '\n';
struct option options[] = {
OPT_BOOL(0, "porcelain", &porcelain, N_("machine-readable output")),
OPT__VERBOSE(&verbose, N_("show extended annotations and reasons, if available")),
OPT_EXPIRY_DATE(0, "expire", &expire,
N_("add 'prunable' annotation to worktrees older than <time>")),
+ OPT_SET_INT('z', NULL, &line_terminator,
+ N_("terminate records with a NUL character"), '\0'),
OPT_END()
};
@@ -771,6 +777,8 @@ static int list(int ac, const char **av, const char *prefix)
usage_with_options(worktree_usage, options);
else if (verbose && porcelain)
die(_("options '%s' and '%s' cannot be used together"), "--verbose", "--porcelain");
+ else if (!line_terminator && !porcelain)
+ die(_("the option '%s' requires '%s'"), "-z", "--porcelain");
else {
struct worktree **worktrees = get_worktrees();
int path_maxlen = 0, abbrev = DEFAULT_ABBREV, i;
@@ -783,7 +791,8 @@ static int list(int ac, const char **av, const char *prefix)
for (i = 0; worktrees[i]; i++) {
if (porcelain)
- show_worktree_porcelain(worktrees[i]);
+ show_worktree_porcelain(worktrees[i],
+ line_terminator);
else
show_worktree(worktrees[i], path_maxlen, abbrev);
}
@@ -980,9 +989,9 @@ static void check_clean_worktree(struct worktree *wt,
validate_no_submodules(wt);
child_process_init(&cp);
- strvec_pushf(&cp.env_array, "%s=%s/.git",
+ strvec_pushf(&cp.env, "%s=%s/.git",
GIT_DIR_ENVIRONMENT, wt->path);
- strvec_pushf(&cp.env_array, "%s=%s",
+ strvec_pushf(&cp.env, "%s=%s",
GIT_WORK_TREE_ENVIRONMENT, wt->path);
strvec_pushl(&cp.args, "status",
"--porcelain", "--ignore-submodules=none",