summaryrefslogtreecommitdiff
path: root/t/perf
diff options
context:
space:
mode:
Diffstat (limited to 't/perf')
-rw-r--r--t/perf/README25
-rwxr-xr-xt/perf/aggregate.perl69
-rwxr-xr-xt/perf/p1450-fsck.sh13
-rwxr-xr-xt/perf/p1451-fsck-skip-list.sh40
-rwxr-xr-xt/perf/p5311-pack-bitmaps-fetch.sh45
-rw-r--r--t/perf/perf-lib.sh74
6 files changed, 226 insertions, 40 deletions
diff --git a/t/perf/README b/t/perf/README
index 21321a0..be12090 100644
--- a/t/perf/README
+++ b/t/perf/README
@@ -168,3 +168,28 @@ that
While we have tried to make sure that it can cope with embedded
whitespace and other special characters, it will not work with
multi-line data.
+
+Rather than tracking the performance by run-time as `test_perf` does, you
+may also track output size by using `test_size`. The stdout of the
+function should be a single numeric value, which will be captured and
+shown in the aggregated output. For example:
+
+ test_perf 'time foo' '
+ ./foo >foo.out
+ '
+
+ test_size 'output size'
+ wc -c <foo.out
+ '
+
+might produce output like:
+
+ Test origin HEAD
+ -------------------------------------------------------------
+ 1234.1 time foo 0.37(0.79+0.02) 0.26(0.51+0.02) -29.7%
+ 1234.2 output size 4.3M 3.6M -14.7%
+
+The item being measured (and its units) is up to the test; the context
+and the test title should make it clear to the user whether bigger or
+smaller numbers are better. Unlike test_perf, the test code will only be
+run once, since output sizes tend to be more deterministic than timings.
diff --git a/t/perf/aggregate.perl b/t/perf/aggregate.perl
index bc86516..494907a 100755
--- a/t/perf/aggregate.perl
+++ b/t/perf/aggregate.perl
@@ -13,27 +13,42 @@ sub get_times {
my $line = <$fh>;
return undef if not defined $line;
close $fh or die "cannot close $name: $!";
- $line =~ /^(?:(\d+):)?(\d+):(\d+(?:\.\d+)?) (\d+(?:\.\d+)?) (\d+(?:\.\d+)?)$/
- or die "bad input line: $line";
- my $rt = ((defined $1 ? $1 : 0.0)*60+$2)*60+$3;
- return ($rt, $4, $5);
+ # times
+ if ($line =~ /^(?:(\d+):)?(\d+):(\d+(?:\.\d+)?) (\d+(?:\.\d+)?) (\d+(?:\.\d+)?)$/) {
+ my $rt = ((defined $1 ? $1 : 0.0)*60+$2)*60+$3;
+ return ($rt, $4, $5);
+ # size
+ } elsif ($line =~ /^\d+$/) {
+ return $&;
+ } else {
+ die "bad input line: $line";
+ }
+}
+
+sub relative_change {
+ my ($r, $firstr) = @_;
+ if ($firstr > 0) {
+ return sprintf "%+.1f%%", 100.0*($r-$firstr)/$firstr;
+ } elsif ($r == 0) {
+ return "=";
+ } else {
+ return "+inf";
+ }
}
sub format_times {
my ($r, $u, $s, $firstr) = @_;
+ # no value means we did not finish the test
if (!defined $r) {
return "<missing>";
}
- my $out = sprintf "%.2f(%.2f+%.2f)", $r, $u, $s;
- if (defined $firstr) {
- if ($firstr > 0) {
- $out .= sprintf " %+.1f%%", 100.0*($r-$firstr)/$firstr;
- } elsif ($r == 0) {
- $out .= " =";
- } else {
- $out .= " +inf";
- }
+ # a single value means we have a size, not times
+ if (!defined $u) {
+ return format_size($r, $firstr);
}
+ # otherwise, we have real/user/system times
+ my $out = sprintf "%.2f(%.2f+%.2f)", $r, $u, $s;
+ $out .= ' ' . relative_change($r, $firstr) if defined $firstr;
return $out;
}
@@ -51,6 +66,25 @@ EOT
exit(1);
}
+sub human_size {
+ my $n = shift;
+ my @units = ('', qw(K M G));
+ while ($n > 900 && @units > 1) {
+ $n /= 1000;
+ shift @units;
+ }
+ return $n unless length $units[0];
+ return sprintf '%.1f%s', $n, $units[0];
+}
+
+sub format_size {
+ my ($size, $first) = @_;
+ # match the width of a time: 0.00(0.00+0.00)
+ my $out = sprintf '%15s', human_size($size);
+ $out .= ' ' . relative_change($size, $first) if defined $first;
+ return $out;
+}
+
my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests,
$codespeed, $sortby, $subsection, $reponame);
@@ -181,7 +215,14 @@ sub print_default_results {
my $firstr;
for my $i (0..$#dirs) {
my $d = $dirs[$i];
- $times{$prefixes{$d}.$t} = [get_times("$resultsdir/$prefixes{$d}$t.times")];
+ my $base = "$resultsdir/$prefixes{$d}$t";
+ $times{$prefixes{$d}.$t} = [];
+ foreach my $type (qw(times size)) {
+ if (-e "$base.$type") {
+ $times{$prefixes{$d}.$t} = [get_times("$base.$type")];
+ last;
+ }
+ }
my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}};
my $w = length format_times($r,$u,$s,$firstr);
$colwidth[$i] = $w if $w > $colwidth[$i];
diff --git a/t/perf/p1450-fsck.sh b/t/perf/p1450-fsck.sh
new file mode 100755
index 0000000..ae1b841
--- /dev/null
+++ b/t/perf/p1450-fsck.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+test_description='Test fsck performance'
+
+. ./perf-lib.sh
+
+test_perf_large_repo
+
+test_perf 'fsck' '
+ git fsck
+'
+
+test_done
diff --git a/t/perf/p1451-fsck-skip-list.sh b/t/perf/p1451-fsck-skip-list.sh
new file mode 100755
index 0000000..c2b97d2
--- /dev/null
+++ b/t/perf/p1451-fsck-skip-list.sh
@@ -0,0 +1,40 @@
+#!/bin/sh
+
+test_description='Test fsck skipList performance'
+
+. ./perf-lib.sh
+
+test_perf_fresh_repo
+
+n=1000000
+
+test_expect_success "setup $n bad commits" '
+ for i in $(test_seq 1 $n)
+ do
+ echo "commit refs/heads/master" &&
+ echo "committer C <c@example.com> 1234567890 +0000" &&
+ echo "data <<EOF" &&
+ echo "$i.Q." &&
+ echo "EOF"
+ done | q_to_nul | git fast-import
+'
+
+skip=0
+while test $skip -le $n
+do
+ test_expect_success "create skipList for $skip bad commits" '
+ git log --format=%H --max-count=$skip |
+ sort >skiplist
+ '
+
+ test_perf "fsck with $skip skipped bad commits" '
+ git -c fsck.skipList=skiplist fsck
+ '
+
+ case $skip in
+ 0) skip=1 ;;
+ *) skip=${skip}0 ;;
+ esac
+done
+
+test_done
diff --git a/t/perf/p5311-pack-bitmaps-fetch.sh b/t/perf/p5311-pack-bitmaps-fetch.sh
new file mode 100755
index 0000000..b045759
--- /dev/null
+++ b/t/perf/p5311-pack-bitmaps-fetch.sh
@@ -0,0 +1,45 @@
+#!/bin/sh
+
+test_description='performance of fetches from bitmapped packs'
+. ./perf-lib.sh
+
+test_perf_default_repo
+
+test_expect_success 'create bitmapped server repo' '
+ git config pack.writebitmaps true &&
+ git config pack.writebitmaphashcache true &&
+ git repack -ad
+'
+
+# simulate a fetch from a repository that last fetched N days ago, for
+# various values of N. We do so by following the first-parent chain,
+# and assume the first entry in the chain that is N days older than the current
+# HEAD is where the HEAD would have been then.
+for days in 1 2 4 8 16 32 64 128; do
+ title=$(printf '%10s' "($days days)")
+ test_expect_success "setup revs from $days days ago" '
+ now=$(git log -1 --format=%ct HEAD) &&
+ then=$(($now - ($days * 86400))) &&
+ tip=$(git rev-list -1 --first-parent --until=$then HEAD) &&
+ {
+ echo HEAD &&
+ echo ^$tip
+ } >revs
+ '
+
+ test_perf "server $title" '
+ git pack-objects --stdout --revs \
+ --thin --delta-base-offset \
+ <revs >tmp.pack
+ '
+
+ test_size "size $title" '
+ wc -c <tmp.pack
+ '
+
+ test_perf "client $title" '
+ git index-pack --stdin --fix-thin <tmp.pack
+ '
+done
+
+test_done
diff --git a/t/perf/perf-lib.sh b/t/perf/perf-lib.sh
index e4c343a..11d1922 100644
--- a/t/perf/perf-lib.sh
+++ b/t/perf/perf-lib.sh
@@ -179,8 +179,8 @@ exit $ret' >&3 2>&4
return "$eval_ret"
}
-
-test_perf () {
+test_wrapper_ () {
+ test_wrapper_func_=$1; shift
test_start_
test "$#" = 3 && { test_prereq=$1; shift; } || test_prereq=
test "$#" = 2 ||
@@ -191,35 +191,57 @@ test_perf () {
base=$(basename "$0" .sh)
echo "$test_count" >>"$perf_results_dir"/$base.subtests
echo "$1" >"$perf_results_dir"/$base.$test_count.descr
- if test -z "$verbose"; then
- printf "%s" "perf $test_count - $1:"
- else
- echo "perf $test_count - $1:"
- fi
- for i in $(test_seq 1 $GIT_PERF_REPEAT_COUNT); do
- say >&3 "running: $2"
- if test_run_perf_ "$2"
- then
- if test -z "$verbose"; then
- printf " %s" "$i"
- else
- echo "* timing run $i/$GIT_PERF_REPEAT_COUNT:"
- fi
+ base="$perf_results_dir"/"$perf_results_prefix$(basename "$0" .sh)"."$test_count"
+ "$test_wrapper_func_" "$@"
+ fi
+
+ test_finish_
+}
+
+test_perf_ () {
+ if test -z "$verbose"; then
+ printf "%s" "perf $test_count - $1:"
+ else
+ echo "perf $test_count - $1:"
+ fi
+ for i in $(test_seq 1 $GIT_PERF_REPEAT_COUNT); do
+ say >&3 "running: $2"
+ if test_run_perf_ "$2"
+ then
+ if test -z "$verbose"; then
+ printf " %s" "$i"
else
- test -z "$verbose" && echo
- test_failure_ "$@"
- break
+ echo "* timing run $i/$GIT_PERF_REPEAT_COUNT:"
fi
- done
- if test -z "$verbose"; then
- echo " ok"
else
- test_ok_ "$1"
+ test -z "$verbose" && echo
+ test_failure_ "$@"
+ break
fi
- base="$perf_results_dir"/"$perf_results_prefix$(basename "$0" .sh)"."$test_count"
- "$TEST_DIRECTORY"/perf/min_time.perl test_time.* >"$base".times
+ done
+ if test -z "$verbose"; then
+ echo " ok"
+ else
+ test_ok_ "$1"
fi
- test_finish_
+ "$TEST_DIRECTORY"/perf/min_time.perl test_time.* >"$base".times
+}
+
+test_perf () {
+ test_wrapper_ test_perf_ "$@"
+}
+
+test_size_ () {
+ say >&3 "running: $2"
+ if test_eval_ "$2" 3>"$base".size; then
+ test_ok_ "$1"
+ else
+ test_failure_ "$@"
+ fi
+}
+
+test_size () {
+ test_wrapper_ test_size_ "$@"
}
# We extend test_done to print timings at the end (./run disables this