summaryrefslogtreecommitdiff
path: root/reftable
diff options
context:
space:
mode:
Diffstat (limited to 'reftable')
-rw-r--r--reftable/LICENSE31
-rw-r--r--reftable/basics.c126
-rw-r--r--reftable/basics.h74
-rw-r--r--reftable/basics_test.c105
-rw-r--r--reftable/block.c531
-rw-r--r--reftable/block.h142
-rw-r--r--reftable/block_test.c123
-rw-r--r--reftable/blocksource.c144
-rw-r--r--reftable/blocksource.h22
-rw-r--r--reftable/constants.h21
-rw-r--r--reftable/dump.c105
-rw-r--r--reftable/error.c46
-rw-r--r--reftable/generic.c179
-rw-r--r--reftable/generic.h32
-rw-r--r--reftable/iter.c188
-rw-r--r--reftable/iter.h65
-rw-r--r--reftable/merged.c355
-rw-r--r--reftable/merged.h29
-rw-r--r--reftable/merged_test.c454
-rw-r--r--reftable/pq.c77
-rw-r--r--reftable/pq.h41
-rw-r--r--reftable/pq_test.c74
-rw-r--r--reftable/publicbasics.c66
-rw-r--r--reftable/reader.c858
-rw-r--r--reftable/reader.h64
-rw-r--r--reftable/readwrite_test.c966
-rw-r--r--reftable/record.c1342
-rw-r--r--reftable/record.h165
-rw-r--r--reftable/record_test.c419
-rw-r--r--reftable/refname.c206
-rw-r--r--reftable/refname.h29
-rw-r--r--reftable/refname_test.c101
-rw-r--r--reftable/reftable-blocksource.h49
-rw-r--r--reftable/reftable-error.h69
-rw-r--r--reftable/reftable-generic.h47
-rw-r--r--reftable/reftable-iterator.h39
-rw-r--r--reftable/reftable-malloc.h18
-rw-r--r--reftable/reftable-merged.h72
-rw-r--r--reftable/reftable-reader.h101
-rw-r--r--reftable/reftable-record.h118
-rw-r--r--reftable/reftable-stack.h128
-rw-r--r--reftable/reftable-tests.h23
-rw-r--r--reftable/reftable-writer.h155
-rw-r--r--reftable/stack.c1518
-rw-r--r--reftable/stack.h40
-rw-r--r--reftable/stack_test.c1112
-rw-r--r--reftable/system.h23
-rw-r--r--reftable/test_framework.c27
-rw-r--r--reftable/test_framework.h61
-rw-r--r--reftable/tree.c63
-rw-r--r--reftable/tree.h34
-rw-r--r--reftable/tree_test.c60
-rw-r--r--reftable/writer.c713
-rw-r--r--reftable/writer.h51
54 files changed, 11701 insertions, 0 deletions
diff --git a/reftable/LICENSE b/reftable/LICENSE
new file mode 100644
index 0000000..402e0f9
--- /dev/null
+++ b/reftable/LICENSE
@@ -0,0 +1,31 @@
+BSD License
+
+Copyright (c) 2020, Google LLC
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+* Neither the name of Google LLC nor the names of its contributors may
+be used to endorse or promote products derived from this software
+without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/reftable/basics.c b/reftable/basics.c
new file mode 100644
index 0000000..fea711d
--- /dev/null
+++ b/reftable/basics.c
@@ -0,0 +1,126 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "basics.h"
+
+void put_be24(uint8_t *out, uint32_t i)
+{
+ out[0] = (uint8_t)((i >> 16) & 0xff);
+ out[1] = (uint8_t)((i >> 8) & 0xff);
+ out[2] = (uint8_t)(i & 0xff);
+}
+
+uint32_t get_be24(uint8_t *in)
+{
+ return (uint32_t)(in[0]) << 16 | (uint32_t)(in[1]) << 8 |
+ (uint32_t)(in[2]);
+}
+
+void put_be16(uint8_t *out, uint16_t i)
+{
+ out[0] = (uint8_t)((i >> 8) & 0xff);
+ out[1] = (uint8_t)(i & 0xff);
+}
+
+size_t binsearch(size_t sz, int (*f)(size_t k, void *args), void *args)
+{
+ size_t lo = 0;
+ size_t hi = sz;
+
+ /* Invariants:
+ *
+ * (hi == sz) || f(hi) == true
+ * (lo == 0 && f(0) == true) || fi(lo) == false
+ */
+ while (hi - lo > 1) {
+ size_t mid = lo + (hi - lo) / 2;
+ int ret = f(mid, args);
+ if (ret < 0)
+ return sz;
+
+ if (ret > 0)
+ hi = mid;
+ else
+ lo = mid;
+ }
+
+ if (lo)
+ return hi;
+
+ return f(0, args) ? 0 : 1;
+}
+
+void free_names(char **a)
+{
+ char **p;
+ if (!a) {
+ return;
+ }
+ for (p = a; *p; p++) {
+ reftable_free(*p);
+ }
+ reftable_free(a);
+}
+
+size_t names_length(char **names)
+{
+ char **p = names;
+ while (*p)
+ p++;
+ return p - names;
+}
+
+void parse_names(char *buf, int size, char ***namesp)
+{
+ char **names = NULL;
+ size_t names_cap = 0;
+ size_t names_len = 0;
+
+ char *p = buf;
+ char *end = buf + size;
+ while (p < end) {
+ char *next = strchr(p, '\n');
+ if (next && next < end) {
+ *next = 0;
+ } else {
+ next = end;
+ }
+ if (p < next) {
+ REFTABLE_ALLOC_GROW(names, names_len + 1, names_cap);
+ names[names_len++] = xstrdup(p);
+ }
+ p = next + 1;
+ }
+
+ REFTABLE_REALLOC_ARRAY(names, names_len + 1);
+ names[names_len] = NULL;
+ *namesp = names;
+}
+
+int names_equal(char **a, char **b)
+{
+ int i = 0;
+ for (; a[i] && b[i]; i++) {
+ if (strcmp(a[i], b[i])) {
+ return 0;
+ }
+ }
+
+ return a[i] == b[i];
+}
+
+int common_prefix_size(struct strbuf *a, struct strbuf *b)
+{
+ int p = 0;
+ for (; p < a->len && p < b->len; p++) {
+ if (a->buf[p] != b->buf[p])
+ break;
+ }
+
+ return p;
+}
diff --git a/reftable/basics.h b/reftable/basics.h
new file mode 100644
index 0000000..523ecd5
--- /dev/null
+++ b/reftable/basics.h
@@ -0,0 +1,74 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef BASICS_H
+#define BASICS_H
+
+/*
+ * miscellaneous utilities that are not provided by Git.
+ */
+
+#include "system.h"
+
+/* Bigendian en/decoding of integers */
+
+void put_be24(uint8_t *out, uint32_t i);
+uint32_t get_be24(uint8_t *in);
+void put_be16(uint8_t *out, uint16_t i);
+
+/*
+ * find smallest index i in [0, sz) at which `f(i) > 0`, assuming that f is
+ * ascending. Return sz if `f(i) == 0` for all indices. The search is aborted
+ * and `sz` is returned in case `f(i) < 0`.
+ *
+ * Contrary to bsearch(3), this returns something useful if the argument is not
+ * found.
+ */
+size_t binsearch(size_t sz, int (*f)(size_t k, void *args), void *args);
+
+/*
+ * Frees a NULL terminated array of malloced strings. The array itself is also
+ * freed.
+ */
+void free_names(char **a);
+
+/* parse a newline separated list of names. `size` is the length of the buffer,
+ * without terminating '\0'. Empty names are discarded. */
+void parse_names(char *buf, int size, char ***namesp);
+
+/* compares two NULL-terminated arrays of strings. */
+int names_equal(char **a, char **b);
+
+/* returns the array size of a NULL-terminated array of strings. */
+size_t names_length(char **names);
+
+/* Allocation routines; they invoke the functions set through
+ * reftable_set_alloc() */
+void *reftable_malloc(size_t sz);
+void *reftable_realloc(void *p, size_t sz);
+void reftable_free(void *p);
+void *reftable_calloc(size_t nelem, size_t elsize);
+
+#define REFTABLE_ALLOC_ARRAY(x, alloc) (x) = reftable_malloc(st_mult(sizeof(*(x)), (alloc)))
+#define REFTABLE_CALLOC_ARRAY(x, alloc) (x) = reftable_calloc((alloc), sizeof(*(x)))
+#define REFTABLE_REALLOC_ARRAY(x, alloc) (x) = reftable_realloc((x), st_mult(sizeof(*(x)), (alloc)))
+#define REFTABLE_ALLOC_GROW(x, nr, alloc) \
+ do { \
+ if ((nr) > alloc) { \
+ alloc = 2 * (alloc) + 1; \
+ if (alloc < (nr)) \
+ alloc = (nr); \
+ REFTABLE_REALLOC_ARRAY(x, alloc); \
+ } \
+ } while (0)
+
+/* Find the longest shared prefix size of `a` and `b` */
+struct strbuf;
+int common_prefix_size(struct strbuf *a, struct strbuf *b);
+
+#endif
diff --git a/reftable/basics_test.c b/reftable/basics_test.c
new file mode 100644
index 0000000..997c4d9
--- /dev/null
+++ b/reftable/basics_test.c
@@ -0,0 +1,105 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "system.h"
+
+#include "basics.h"
+#include "test_framework.h"
+#include "reftable-tests.h"
+
+struct integer_needle_lesseq_args {
+ int needle;
+ int *haystack;
+};
+
+static int integer_needle_lesseq(size_t i, void *_args)
+{
+ struct integer_needle_lesseq_args *args = _args;
+ return args->needle <= args->haystack[i];
+}
+
+static void test_binsearch(void)
+{
+ int haystack[] = { 2, 4, 6, 8, 10 };
+ struct {
+ int needle;
+ size_t expected_idx;
+ } testcases[] = {
+ {-9000, 0},
+ {-1, 0},
+ {0, 0},
+ {2, 0},
+ {3, 1},
+ {4, 1},
+ {7, 3},
+ {9, 4},
+ {10, 4},
+ {11, 5},
+ {9000, 5},
+ };
+ size_t i = 0;
+
+ for (i = 0; i < ARRAY_SIZE(testcases); i++) {
+ struct integer_needle_lesseq_args args = {
+ .haystack = haystack,
+ .needle = testcases[i].needle,
+ };
+ size_t idx;
+
+ idx = binsearch(ARRAY_SIZE(haystack), &integer_needle_lesseq, &args);
+ EXPECT(idx == testcases[i].expected_idx);
+ }
+}
+
+static void test_names_length(void)
+{
+ char *a[] = { "a", "b", NULL };
+ EXPECT(names_length(a) == 2);
+}
+
+static void test_parse_names_normal(void)
+{
+ char in[] = "a\nb\n";
+ char **out = NULL;
+ parse_names(in, strlen(in), &out);
+ EXPECT(!strcmp(out[0], "a"));
+ EXPECT(!strcmp(out[1], "b"));
+ EXPECT(!out[2]);
+ free_names(out);
+}
+
+static void test_parse_names_drop_empty(void)
+{
+ char in[] = "a\n\n";
+ char **out = NULL;
+ parse_names(in, strlen(in), &out);
+ EXPECT(!strcmp(out[0], "a"));
+ EXPECT(!out[1]);
+ free_names(out);
+}
+
+static void test_common_prefix(void)
+{
+ struct strbuf s1 = STRBUF_INIT;
+ struct strbuf s2 = STRBUF_INIT;
+ strbuf_addstr(&s1, "abcdef");
+ strbuf_addstr(&s2, "abc");
+ EXPECT(common_prefix_size(&s1, &s2) == 3);
+ strbuf_release(&s1);
+ strbuf_release(&s2);
+}
+
+int basics_test_main(int argc, const char *argv[])
+{
+ RUN_TEST(test_common_prefix);
+ RUN_TEST(test_parse_names_normal);
+ RUN_TEST(test_parse_names_drop_empty);
+ RUN_TEST(test_binsearch);
+ RUN_TEST(test_names_length);
+ return 0;
+}
diff --git a/reftable/block.c b/reftable/block.c
new file mode 100644
index 0000000..3e87460
--- /dev/null
+++ b/reftable/block.c
@@ -0,0 +1,531 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "block.h"
+
+#include "blocksource.h"
+#include "constants.h"
+#include "record.h"
+#include "reftable-error.h"
+#include "system.h"
+#include <zlib.h>
+
+int header_size(int version)
+{
+ switch (version) {
+ case 1:
+ return 24;
+ case 2:
+ return 28;
+ }
+ abort();
+}
+
+int footer_size(int version)
+{
+ switch (version) {
+ case 1:
+ return 68;
+ case 2:
+ return 72;
+ }
+ abort();
+}
+
+static int block_writer_register_restart(struct block_writer *w, int n,
+ int is_restart, struct strbuf *key)
+{
+ int rlen = w->restart_len;
+ if (rlen >= MAX_RESTARTS) {
+ is_restart = 0;
+ }
+
+ if (is_restart) {
+ rlen++;
+ }
+ if (2 + 3 * rlen + n > w->block_size - w->next)
+ return -1;
+ if (is_restart) {
+ REFTABLE_ALLOC_GROW(w->restarts, w->restart_len + 1, w->restart_cap);
+ w->restarts[w->restart_len++] = w->next;
+ }
+
+ w->next += n;
+
+ strbuf_reset(&w->last_key);
+ strbuf_addbuf(&w->last_key, key);
+ w->entries++;
+ return 0;
+}
+
+void block_writer_init(struct block_writer *bw, uint8_t typ, uint8_t *buf,
+ uint32_t block_size, uint32_t header_off, int hash_size)
+{
+ bw->buf = buf;
+ bw->hash_size = hash_size;
+ bw->block_size = block_size;
+ bw->header_off = header_off;
+ bw->buf[header_off] = typ;
+ bw->next = header_off + 4;
+ bw->restart_interval = 16;
+ bw->entries = 0;
+ bw->restart_len = 0;
+ bw->last_key.len = 0;
+}
+
+uint8_t block_writer_type(struct block_writer *bw)
+{
+ return bw->buf[bw->header_off];
+}
+
+/* Adds the reftable_record to the block. Returns -1 if it does not fit, 0 on
+ success. Returns REFTABLE_API_ERROR if attempting to write a record with
+ empty key. */
+int block_writer_add(struct block_writer *w, struct reftable_record *rec)
+{
+ struct strbuf empty = STRBUF_INIT;
+ struct strbuf last =
+ w->entries % w->restart_interval == 0 ? empty : w->last_key;
+ struct string_view out = {
+ .buf = w->buf + w->next,
+ .len = w->block_size - w->next,
+ };
+
+ struct string_view start = out;
+
+ int is_restart = 0;
+ struct strbuf key = STRBUF_INIT;
+ int n = 0;
+ int err = -1;
+
+ reftable_record_key(rec, &key);
+ if (!key.len) {
+ err = REFTABLE_API_ERROR;
+ goto done;
+ }
+
+ n = reftable_encode_key(&is_restart, out, last, key,
+ reftable_record_val_type(rec));
+ if (n < 0)
+ goto done;
+ string_view_consume(&out, n);
+
+ n = reftable_record_encode(rec, out, w->hash_size);
+ if (n < 0)
+ goto done;
+ string_view_consume(&out, n);
+
+ err = block_writer_register_restart(w, start.len - out.len, is_restart,
+ &key);
+done:
+ strbuf_release(&key);
+ return err;
+}
+
+int block_writer_finish(struct block_writer *w)
+{
+ int i;
+ for (i = 0; i < w->restart_len; i++) {
+ put_be24(w->buf + w->next, w->restarts[i]);
+ w->next += 3;
+ }
+
+ put_be16(w->buf + w->next, w->restart_len);
+ w->next += 2;
+ put_be24(w->buf + 1 + w->header_off, w->next);
+
+ if (block_writer_type(w) == BLOCK_TYPE_LOG) {
+ int block_header_skip = 4 + w->header_off;
+ uLongf src_len = w->next - block_header_skip;
+ uLongf dest_cap = src_len * 1.001 + 12;
+ uint8_t *compressed;
+
+ REFTABLE_ALLOC_ARRAY(compressed, dest_cap);
+
+ while (1) {
+ uLongf out_dest_len = dest_cap;
+ int zresult = compress2(compressed, &out_dest_len,
+ w->buf + block_header_skip,
+ src_len, 9);
+ if (zresult == Z_BUF_ERROR && dest_cap < LONG_MAX) {
+ dest_cap *= 2;
+ compressed =
+ reftable_realloc(compressed, dest_cap);
+ if (compressed)
+ continue;
+ }
+
+ if (Z_OK != zresult) {
+ reftable_free(compressed);
+ return REFTABLE_ZLIB_ERROR;
+ }
+
+ memcpy(w->buf + block_header_skip, compressed,
+ out_dest_len);
+ w->next = out_dest_len + block_header_skip;
+ reftable_free(compressed);
+ break;
+ }
+ }
+ return w->next;
+}
+
+int block_reader_init(struct block_reader *br, struct reftable_block *block,
+ uint32_t header_off, uint32_t table_block_size,
+ int hash_size)
+{
+ uint32_t full_block_size = table_block_size;
+ uint8_t typ = block->data[header_off];
+ uint32_t sz = get_be24(block->data + header_off + 1);
+ int err = 0;
+ uint16_t restart_count = 0;
+ uint32_t restart_start = 0;
+ uint8_t *restart_bytes = NULL;
+
+ reftable_block_done(&br->block);
+
+ if (!reftable_is_block_type(typ)) {
+ err = REFTABLE_FORMAT_ERROR;
+ goto done;
+ }
+
+ if (typ == BLOCK_TYPE_LOG) {
+ uint32_t block_header_skip = 4 + header_off;
+ uLong dst_len = sz - block_header_skip;
+ uLong src_len = block->len - block_header_skip;
+
+ /* Log blocks specify the *uncompressed* size in their header. */
+ REFTABLE_ALLOC_GROW(br->uncompressed_data, sz,
+ br->uncompressed_cap);
+
+ /* Copy over the block header verbatim. It's not compressed. */
+ memcpy(br->uncompressed_data, block->data, block_header_skip);
+
+ if (!br->zstream) {
+ REFTABLE_CALLOC_ARRAY(br->zstream, 1);
+ err = inflateInit(br->zstream);
+ } else {
+ err = inflateReset(br->zstream);
+ }
+ if (err != Z_OK) {
+ err = REFTABLE_ZLIB_ERROR;
+ goto done;
+ }
+
+ br->zstream->next_in = block->data + block_header_skip;
+ br->zstream->avail_in = src_len;
+ br->zstream->next_out = br->uncompressed_data + block_header_skip;
+ br->zstream->avail_out = dst_len;
+
+ /*
+ * We know both input as well as output size, and we know that
+ * the sizes should never be bigger than `uInt_MAX` because
+ * blocks can at most be 16MB large. We can thus use `Z_FINISH`
+ * here to instruct zlib to inflate the data in one go, which
+ * is more efficient than using `Z_NO_FLUSH`.
+ */
+ err = inflate(br->zstream, Z_FINISH);
+ if (err != Z_STREAM_END) {
+ err = REFTABLE_ZLIB_ERROR;
+ goto done;
+ }
+ err = 0;
+
+ if (br->zstream->total_out + block_header_skip != sz) {
+ err = REFTABLE_FORMAT_ERROR;
+ goto done;
+ }
+
+ /* We're done with the input data. */
+ reftable_block_done(block);
+ block->data = br->uncompressed_data;
+ block->len = sz;
+ full_block_size = src_len + block_header_skip - br->zstream->avail_in;
+ } else if (full_block_size == 0) {
+ full_block_size = sz;
+ } else if (sz < full_block_size && sz < block->len &&
+ block->data[sz] != 0) {
+ /* If the block is smaller than the full block size, it is
+ padded (data followed by '\0') or the next block is
+ unaligned. */
+ full_block_size = sz;
+ }
+
+ restart_count = get_be16(block->data + sz - 2);
+ restart_start = sz - 2 - 3 * restart_count;
+ restart_bytes = block->data + restart_start;
+
+ /* transfer ownership. */
+ br->block = *block;
+ block->data = NULL;
+ block->len = 0;
+
+ br->hash_size = hash_size;
+ br->block_len = restart_start;
+ br->full_block_size = full_block_size;
+ br->header_off = header_off;
+ br->restart_count = restart_count;
+ br->restart_bytes = restart_bytes;
+
+done:
+ return err;
+}
+
+void block_reader_release(struct block_reader *br)
+{
+ inflateEnd(br->zstream);
+ reftable_free(br->zstream);
+ reftable_free(br->uncompressed_data);
+ reftable_block_done(&br->block);
+}
+
+uint8_t block_reader_type(const struct block_reader *r)
+{
+ return r->block.data[r->header_off];
+}
+
+int block_reader_first_key(const struct block_reader *br, struct strbuf *key)
+{
+ int off = br->header_off + 4, n;
+ struct string_view in = {
+ .buf = br->block.data + off,
+ .len = br->block_len - off,
+ };
+ uint8_t extra = 0;
+
+ strbuf_reset(key);
+
+ n = reftable_decode_key(key, &extra, in);
+ if (n < 0)
+ return n;
+ if (!key->len)
+ return REFTABLE_FORMAT_ERROR;
+
+ return 0;
+}
+
+static uint32_t block_reader_restart_offset(const struct block_reader *br, int i)
+{
+ return get_be24(br->restart_bytes + 3 * i);
+}
+
+void block_iter_seek_start(struct block_iter *it, const struct block_reader *br)
+{
+ it->block = br->block.data;
+ it->block_len = br->block_len;
+ it->hash_size = br->hash_size;
+ strbuf_reset(&it->last_key);
+ it->next_off = br->header_off + 4;
+}
+
+struct restart_needle_less_args {
+ int error;
+ struct strbuf needle;
+ const struct block_reader *reader;
+};
+
+static int restart_needle_less(size_t idx, void *_args)
+{
+ struct restart_needle_less_args *args = _args;
+ uint32_t off = block_reader_restart_offset(args->reader, idx);
+ struct string_view in = {
+ .buf = args->reader->block.data + off,
+ .len = args->reader->block_len - off,
+ };
+ uint64_t prefix_len, suffix_len;
+ uint8_t extra;
+ int n;
+
+ /*
+ * Records at restart points are stored without prefix compression, so
+ * there is no need to fully decode the record key here. This removes
+ * the need for allocating memory.
+ */
+ n = reftable_decode_keylen(in, &prefix_len, &suffix_len, &extra);
+ if (n < 0 || prefix_len) {
+ args->error = 1;
+ return -1;
+ }
+
+ string_view_consume(&in, n);
+ if (suffix_len > in.len) {
+ args->error = 1;
+ return -1;
+ }
+
+ n = memcmp(args->needle.buf, in.buf,
+ args->needle.len < suffix_len ? args->needle.len : suffix_len);
+ if (n)
+ return n < 0;
+ return args->needle.len < suffix_len;
+}
+
+int block_iter_next(struct block_iter *it, struct reftable_record *rec)
+{
+ struct string_view in = {
+ .buf = (unsigned char *) it->block + it->next_off,
+ .len = it->block_len - it->next_off,
+ };
+ struct string_view start = in;
+ uint8_t extra = 0;
+ int n = 0;
+
+ if (it->next_off >= it->block_len)
+ return 1;
+
+ n = reftable_decode_key(&it->last_key, &extra, in);
+ if (n < 0)
+ return -1;
+ if (!it->last_key.len)
+ return REFTABLE_FORMAT_ERROR;
+
+ string_view_consume(&in, n);
+ n = reftable_record_decode(rec, it->last_key, extra, in, it->hash_size,
+ &it->scratch);
+ if (n < 0)
+ return -1;
+ string_view_consume(&in, n);
+
+ it->next_off += start.len - in.len;
+ return 0;
+}
+
+void block_iter_reset(struct block_iter *it)
+{
+ strbuf_reset(&it->last_key);
+ it->next_off = 0;
+ it->block = NULL;
+ it->block_len = 0;
+ it->hash_size = 0;
+}
+
+void block_iter_close(struct block_iter *it)
+{
+ strbuf_release(&it->last_key);
+ strbuf_release(&it->scratch);
+}
+
+int block_iter_seek_key(struct block_iter *it, const struct block_reader *br,
+ struct strbuf *want)
+{
+ struct restart_needle_less_args args = {
+ .needle = *want,
+ .reader = br,
+ };
+ struct reftable_record rec;
+ int err = 0;
+ size_t i;
+
+ /*
+ * Perform a binary search over the block's restart points, which
+ * avoids doing a linear scan over the whole block. Like this, we
+ * identify the section of the block that should contain our key.
+ *
+ * Note that we explicitly search for the first restart point _greater_
+ * than the sought-after record, not _greater or equal_ to it. In case
+ * the sought-after record is located directly at the restart point we
+ * would otherwise start doing the linear search at the preceding
+ * restart point. While that works alright, we would end up scanning
+ * too many record.
+ */
+ i = binsearch(br->restart_count, &restart_needle_less, &args);
+ if (args.error) {
+ err = REFTABLE_FORMAT_ERROR;
+ goto done;
+ }
+
+ /*
+ * Now there are multiple cases:
+ *
+ * - `i == 0`: The wanted record is smaller than the record found at
+ * the first restart point. As the first restart point is the first
+ * record in the block, our wanted record cannot be located in this
+ * block at all. We still need to position the iterator so that the
+ * next call to `block_iter_next()` will yield an end-of-iterator
+ * signal.
+ *
+ * - `i == restart_count`: The wanted record was not found at any of
+ * the restart points. As there is no restart point at the end of
+ * the section the record may thus be contained in the last block.
+ *
+ * - `i > 0`: The wanted record must be contained in the section
+ * before the found restart point. We thus do a linear search
+ * starting from the preceding restart point.
+ */
+ if (i > 0)
+ it->next_off = block_reader_restart_offset(br, i - 1);
+ else
+ it->next_off = br->header_off + 4;
+ it->block = br->block.data;
+ it->block_len = br->block_len;
+ it->hash_size = br->hash_size;
+
+ reftable_record_init(&rec, block_reader_type(br));
+
+ /*
+ * We're looking for the last entry less than the wanted key so that
+ * the next call to `block_reader_next()` would yield the wanted
+ * record. We thus don't want to position our reader at the sought
+ * after record, but one before. To do so, we have to go one entry too
+ * far and then back up.
+ */
+ while (1) {
+ size_t prev_off = it->next_off;
+
+ err = block_iter_next(it, &rec);
+ if (err < 0)
+ goto done;
+ if (err > 0) {
+ it->next_off = prev_off;
+ err = 0;
+ goto done;
+ }
+
+ /*
+ * Check whether the current key is greater or equal to the
+ * sought-after key. In case it is greater we know that the
+ * record does not exist in the block and can thus abort early.
+ * In case it is equal to the sought-after key we have found
+ * the desired record.
+ *
+ * Note that we store the next record's key record directly in
+ * `last_key` without restoring the key of the preceding record
+ * in case we need to go one record back. This is safe to do as
+ * `block_iter_next()` would return the ref whose key is equal
+ * to `last_key` now, and naturally all keys share a prefix
+ * with themselves.
+ */
+ reftable_record_key(&rec, &it->last_key);
+ if (strbuf_cmp(&it->last_key, want) >= 0) {
+ it->next_off = prev_off;
+ goto done;
+ }
+ }
+
+done:
+ reftable_record_release(&rec);
+ return err;
+}
+
+void block_writer_release(struct block_writer *bw)
+{
+ FREE_AND_NULL(bw->restarts);
+ strbuf_release(&bw->last_key);
+ /* the block is not owned. */
+}
+
+void reftable_block_done(struct reftable_block *blockp)
+{
+ struct reftable_block_source source = blockp->source;
+ if (blockp && source.ops)
+ source.ops->return_block(source.arg, blockp);
+ blockp->data = NULL;
+ blockp->len = 0;
+ blockp->source.ops = NULL;
+ blockp->source.arg = NULL;
+}
diff --git a/reftable/block.h b/reftable/block.h
new file mode 100644
index 0000000..ea4384a
--- /dev/null
+++ b/reftable/block.h
@@ -0,0 +1,142 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef BLOCK_H
+#define BLOCK_H
+
+#include "basics.h"
+#include "record.h"
+#include "reftable-blocksource.h"
+
+/*
+ * Writes reftable blocks. The block_writer is reused across blocks to minimize
+ * allocation overhead.
+ */
+struct block_writer {
+ uint8_t *buf;
+ uint32_t block_size;
+
+ /* Offset of the global header. Nonzero in the first block only. */
+ uint32_t header_off;
+
+ /* How often to restart keys. */
+ int restart_interval;
+ int hash_size;
+
+ /* Offset of next uint8_t to write. */
+ uint32_t next;
+ uint32_t *restarts;
+ uint32_t restart_len;
+ uint32_t restart_cap;
+
+ struct strbuf last_key;
+ int entries;
+};
+
+/*
+ * initializes the blockwriter to write `typ` entries, using `buf` as temporary
+ * storage. `buf` is not owned by the block_writer. */
+void block_writer_init(struct block_writer *bw, uint8_t typ, uint8_t *buf,
+ uint32_t block_size, uint32_t header_off, int hash_size);
+
+/* returns the block type (eg. 'r' for ref records. */
+uint8_t block_writer_type(struct block_writer *bw);
+
+/* appends the record, or -1 if it doesn't fit. */
+int block_writer_add(struct block_writer *w, struct reftable_record *rec);
+
+/* appends the key restarts, and compress the block if necessary. */
+int block_writer_finish(struct block_writer *w);
+
+/* clears out internally allocated block_writer members. */
+void block_writer_release(struct block_writer *bw);
+
+struct z_stream;
+
+/* Read a block. */
+struct block_reader {
+ /* offset of the block header; nonzero for the first block in a
+ * reftable. */
+ uint32_t header_off;
+
+ /* the memory block */
+ struct reftable_block block;
+ int hash_size;
+
+ /* Uncompressed data for log entries. */
+ z_stream *zstream;
+ unsigned char *uncompressed_data;
+ size_t uncompressed_cap;
+
+ /* size of the data, excluding restart data. */
+ uint32_t block_len;
+ uint8_t *restart_bytes;
+ uint16_t restart_count;
+
+ /* size of the data in the file. For log blocks, this is the compressed
+ * size. */
+ uint32_t full_block_size;
+};
+
+/* initializes a block reader. */
+int block_reader_init(struct block_reader *br, struct reftable_block *bl,
+ uint32_t header_off, uint32_t table_block_size,
+ int hash_size);
+
+void block_reader_release(struct block_reader *br);
+
+/* Returns the block type (eg. 'r' for refs) */
+uint8_t block_reader_type(const struct block_reader *r);
+
+/* Decodes the first key in the block */
+int block_reader_first_key(const struct block_reader *br, struct strbuf *key);
+
+/* Iterate over entries in a block */
+struct block_iter {
+ /* offset within the block of the next entry to read. */
+ uint32_t next_off;
+ const unsigned char *block;
+ size_t block_len;
+ int hash_size;
+
+ /* key for last entry we read. */
+ struct strbuf last_key;
+ struct strbuf scratch;
+};
+
+#define BLOCK_ITER_INIT { \
+ .last_key = STRBUF_INIT, \
+ .scratch = STRBUF_INIT, \
+}
+
+/* Position `it` at start of the block */
+void block_iter_seek_start(struct block_iter *it, const struct block_reader *br);
+
+/* Position `it` to the `want` key in the block */
+int block_iter_seek_key(struct block_iter *it, const struct block_reader *br,
+ struct strbuf *want);
+
+/* return < 0 for error, 0 for OK, > 0 for EOF. */
+int block_iter_next(struct block_iter *it, struct reftable_record *rec);
+
+/* Reset the block iterator to pristine state without releasing its memory. */
+void block_iter_reset(struct block_iter *it);
+
+/* deallocate memory for `it`. The block reader and its block is left intact. */
+void block_iter_close(struct block_iter *it);
+
+/* size of file header, depending on format version */
+int header_size(int version);
+
+/* size of file footer, depending on format version */
+int footer_size(int version);
+
+/* returns a block to its source. */
+void reftable_block_done(struct reftable_block *ret);
+
+#endif
diff --git a/reftable/block_test.c b/reftable/block_test.c
new file mode 100644
index 0000000..26a9cfb
--- /dev/null
+++ b/reftable/block_test.c
@@ -0,0 +1,123 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "block.h"
+
+#include "system.h"
+#include "blocksource.h"
+#include "basics.h"
+#include "constants.h"
+#include "record.h"
+#include "test_framework.h"
+#include "reftable-tests.h"
+
+static void test_block_read_write(void)
+{
+ const int header_off = 21; /* random */
+ char *names[30];
+ const int N = ARRAY_SIZE(names);
+ const int block_size = 1024;
+ struct reftable_block block = { NULL };
+ struct block_writer bw = {
+ .last_key = STRBUF_INIT,
+ };
+ struct reftable_record rec = {
+ .type = BLOCK_TYPE_REF,
+ };
+ int i = 0;
+ int n;
+ struct block_reader br = { 0 };
+ struct block_iter it = BLOCK_ITER_INIT;
+ int j = 0;
+ struct strbuf want = STRBUF_INIT;
+
+ REFTABLE_CALLOC_ARRAY(block.data, block_size);
+ block.len = block_size;
+ block.source = malloc_block_source();
+ block_writer_init(&bw, BLOCK_TYPE_REF, block.data, block_size,
+ header_off, hash_size(GIT_SHA1_FORMAT_ID));
+
+ rec.u.ref.refname = "";
+ rec.u.ref.value_type = REFTABLE_REF_DELETION;
+ n = block_writer_add(&bw, &rec);
+ EXPECT(n == REFTABLE_API_ERROR);
+
+ for (i = 0; i < N; i++) {
+ char name[100];
+ snprintf(name, sizeof(name), "branch%02d", i);
+
+ rec.u.ref.refname = name;
+ rec.u.ref.value_type = REFTABLE_REF_VAL1;
+ memset(rec.u.ref.value.val1, i, GIT_SHA1_RAWSZ);
+
+ names[i] = xstrdup(name);
+ n = block_writer_add(&bw, &rec);
+ rec.u.ref.refname = NULL;
+ rec.u.ref.value_type = REFTABLE_REF_DELETION;
+ EXPECT(n == 0);
+ }
+
+ n = block_writer_finish(&bw);
+ EXPECT(n > 0);
+
+ block_writer_release(&bw);
+
+ block_reader_init(&br, &block, header_off, block_size, GIT_SHA1_RAWSZ);
+
+ block_iter_seek_start(&it, &br);
+
+ while (1) {
+ int r = block_iter_next(&it, &rec);
+ EXPECT(r >= 0);
+ if (r > 0) {
+ break;
+ }
+ EXPECT_STREQ(names[j], rec.u.ref.refname);
+ j++;
+ }
+
+ reftable_record_release(&rec);
+ block_iter_close(&it);
+
+ for (i = 0; i < N; i++) {
+ struct block_iter it = BLOCK_ITER_INIT;
+ strbuf_reset(&want);
+ strbuf_addstr(&want, names[i]);
+
+ n = block_iter_seek_key(&it, &br, &want);
+ EXPECT(n == 0);
+
+ n = block_iter_next(&it, &rec);
+ EXPECT(n == 0);
+
+ EXPECT_STREQ(names[i], rec.u.ref.refname);
+
+ want.len--;
+ n = block_iter_seek_key(&it, &br, &want);
+ EXPECT(n == 0);
+
+ n = block_iter_next(&it, &rec);
+ EXPECT(n == 0);
+ EXPECT_STREQ(names[10 * (i / 10)], rec.u.ref.refname);
+
+ block_iter_close(&it);
+ }
+
+ reftable_record_release(&rec);
+ reftable_block_done(&br.block);
+ strbuf_release(&want);
+ for (i = 0; i < N; i++) {
+ reftable_free(names[i]);
+ }
+}
+
+int block_test_main(int argc, const char *argv[])
+{
+ RUN_TEST(test_block_read_write);
+ return 0;
+}
diff --git a/reftable/blocksource.c b/reftable/blocksource.c
new file mode 100644
index 0000000..eeed254
--- /dev/null
+++ b/reftable/blocksource.c
@@ -0,0 +1,144 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "system.h"
+
+#include "basics.h"
+#include "blocksource.h"
+#include "reftable-blocksource.h"
+#include "reftable-error.h"
+
+static void strbuf_return_block(void *b, struct reftable_block *dest)
+{
+ if (dest->len)
+ memset(dest->data, 0xff, dest->len);
+ reftable_free(dest->data);
+}
+
+static void strbuf_close(void *b)
+{
+}
+
+static int strbuf_read_block(void *v, struct reftable_block *dest, uint64_t off,
+ uint32_t size)
+{
+ struct strbuf *b = v;
+ assert(off + size <= b->len);
+ REFTABLE_CALLOC_ARRAY(dest->data, size);
+ memcpy(dest->data, b->buf + off, size);
+ dest->len = size;
+ return size;
+}
+
+static uint64_t strbuf_size(void *b)
+{
+ return ((struct strbuf *)b)->len;
+}
+
+static struct reftable_block_source_vtable strbuf_vtable = {
+ .size = &strbuf_size,
+ .read_block = &strbuf_read_block,
+ .return_block = &strbuf_return_block,
+ .close = &strbuf_close,
+};
+
+void block_source_from_strbuf(struct reftable_block_source *bs,
+ struct strbuf *buf)
+{
+ assert(!bs->ops);
+ bs->ops = &strbuf_vtable;
+ bs->arg = buf;
+}
+
+static void malloc_return_block(void *b, struct reftable_block *dest)
+{
+ if (dest->len)
+ memset(dest->data, 0xff, dest->len);
+ reftable_free(dest->data);
+}
+
+static struct reftable_block_source_vtable malloc_vtable = {
+ .return_block = &malloc_return_block,
+};
+
+static struct reftable_block_source malloc_block_source_instance = {
+ .ops = &malloc_vtable,
+};
+
+struct reftable_block_source malloc_block_source(void)
+{
+ return malloc_block_source_instance;
+}
+
+struct file_block_source {
+ uint64_t size;
+ unsigned char *data;
+};
+
+static uint64_t file_size(void *b)
+{
+ return ((struct file_block_source *)b)->size;
+}
+
+static void file_return_block(void *b, struct reftable_block *dest)
+{
+}
+
+static void file_close(void *v)
+{
+ struct file_block_source *b = v;
+ munmap(b->data, b->size);
+ reftable_free(b);
+}
+
+static int file_read_block(void *v, struct reftable_block *dest, uint64_t off,
+ uint32_t size)
+{
+ struct file_block_source *b = v;
+ assert(off + size <= b->size);
+ dest->data = b->data + off;
+ dest->len = size;
+ return size;
+}
+
+static struct reftable_block_source_vtable file_vtable = {
+ .size = &file_size,
+ .read_block = &file_read_block,
+ .return_block = &file_return_block,
+ .close = &file_close,
+};
+
+int reftable_block_source_from_file(struct reftable_block_source *bs,
+ const char *name)
+{
+ struct file_block_source *p;
+ struct stat st;
+ int fd;
+
+ fd = open(name, O_RDONLY);
+ if (fd < 0) {
+ if (errno == ENOENT)
+ return REFTABLE_NOT_EXIST_ERROR;
+ return -1;
+ }
+
+ if (fstat(fd, &st) < 0) {
+ close(fd);
+ return REFTABLE_IO_ERROR;
+ }
+
+ REFTABLE_CALLOC_ARRAY(p, 1);
+ p->size = st.st_size;
+ p->data = xmmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ close(fd);
+
+ assert(!bs->ops);
+ bs->ops = &file_vtable;
+ bs->arg = p;
+ return 0;
+}
diff --git a/reftable/blocksource.h b/reftable/blocksource.h
new file mode 100644
index 0000000..072e272
--- /dev/null
+++ b/reftable/blocksource.h
@@ -0,0 +1,22 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef BLOCKSOURCE_H
+#define BLOCKSOURCE_H
+
+#include "system.h"
+
+struct reftable_block_source;
+
+/* Create an in-memory block source for reading reftables */
+void block_source_from_strbuf(struct reftable_block_source *bs,
+ struct strbuf *buf);
+
+struct reftable_block_source malloc_block_source(void);
+
+#endif
diff --git a/reftable/constants.h b/reftable/constants.h
new file mode 100644
index 0000000..5eee72c
--- /dev/null
+++ b/reftable/constants.h
@@ -0,0 +1,21 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef CONSTANTS_H
+#define CONSTANTS_H
+
+#define BLOCK_TYPE_LOG 'g'
+#define BLOCK_TYPE_INDEX 'i'
+#define BLOCK_TYPE_REF 'r'
+#define BLOCK_TYPE_OBJ 'o'
+#define BLOCK_TYPE_ANY 0
+
+#define MAX_RESTARTS ((1 << 16) - 1)
+#define DEFAULT_BLOCK_SIZE 4096
+
+#endif
diff --git a/reftable/dump.c b/reftable/dump.c
new file mode 100644
index 0000000..26e0393
--- /dev/null
+++ b/reftable/dump.c
@@ -0,0 +1,105 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "git-compat-util.h"
+#include "hash-ll.h"
+
+#include "reftable-blocksource.h"
+#include "reftable-error.h"
+#include "reftable-record.h"
+#include "reftable-tests.h"
+#include "reftable-writer.h"
+#include "reftable-iterator.h"
+#include "reftable-reader.h"
+#include "reftable-stack.h"
+
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+
+static int compact_stack(const char *stackdir)
+{
+ struct reftable_stack *stack = NULL;
+ struct reftable_write_options cfg = { 0 };
+
+ int err = reftable_new_stack(&stack, stackdir, cfg);
+ if (err < 0)
+ goto done;
+
+ err = reftable_stack_compact_all(stack, NULL);
+ if (err < 0)
+ goto done;
+done:
+ if (stack) {
+ reftable_stack_destroy(stack);
+ }
+ return err;
+}
+
+static void print_help(void)
+{
+ printf("usage: dump [-cst] arg\n\n"
+ "options: \n"
+ " -c compact\n"
+ " -t dump table\n"
+ " -s dump stack\n"
+ " -6 sha256 hash format\n"
+ " -h this help\n"
+ "\n");
+}
+
+int reftable_dump_main(int argc, char *const *argv)
+{
+ int err = 0;
+ int opt_dump_table = 0;
+ int opt_dump_stack = 0;
+ int opt_compact = 0;
+ uint32_t opt_hash_id = GIT_SHA1_FORMAT_ID;
+ const char *arg = NULL, *argv0 = argv[0];
+
+ for (; argc > 1; argv++, argc--)
+ if (*argv[1] != '-')
+ break;
+ else if (!strcmp("-t", argv[1]))
+ opt_dump_table = 1;
+ else if (!strcmp("-6", argv[1]))
+ opt_hash_id = GIT_SHA256_FORMAT_ID;
+ else if (!strcmp("-s", argv[1]))
+ opt_dump_stack = 1;
+ else if (!strcmp("-c", argv[1]))
+ opt_compact = 1;
+ else if (!strcmp("-?", argv[1]) || !strcmp("-h", argv[1])) {
+ print_help();
+ return 2;
+ }
+
+ if (argc != 2) {
+ fprintf(stderr, "need argument\n");
+ print_help();
+ return 2;
+ }
+
+ arg = argv[1];
+
+ if (opt_dump_table) {
+ err = reftable_reader_print_file(arg);
+ } else if (opt_dump_stack) {
+ err = reftable_stack_print_directory(arg, opt_hash_id);
+ } else if (opt_compact) {
+ err = compact_stack(arg);
+ }
+
+ if (err < 0) {
+ fprintf(stderr, "%s: %s: %s\n", argv0, arg,
+ reftable_error_str(err));
+ return 1;
+ }
+ return 0;
+}
diff --git a/reftable/error.c b/reftable/error.c
new file mode 100644
index 0000000..cfb7a0f
--- /dev/null
+++ b/reftable/error.c
@@ -0,0 +1,46 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "system.h"
+#include "reftable-error.h"
+
+#include <stdio.h>
+
+const char *reftable_error_str(int err)
+{
+ static char buf[250];
+ switch (err) {
+ case REFTABLE_IO_ERROR:
+ return "I/O error";
+ case REFTABLE_FORMAT_ERROR:
+ return "corrupt reftable file";
+ case REFTABLE_NOT_EXIST_ERROR:
+ return "file does not exist";
+ case REFTABLE_LOCK_ERROR:
+ return "data is locked";
+ case REFTABLE_API_ERROR:
+ return "misuse of the reftable API";
+ case REFTABLE_ZLIB_ERROR:
+ return "zlib failure";
+ case REFTABLE_NAME_CONFLICT:
+ return "file/directory conflict";
+ case REFTABLE_EMPTY_TABLE_ERROR:
+ return "wrote empty table";
+ case REFTABLE_REFNAME_ERROR:
+ return "invalid refname";
+ case REFTABLE_ENTRY_TOO_BIG_ERROR:
+ return "entry too large";
+ case REFTABLE_OUTDATED_ERROR:
+ return "data concurrently modified";
+ case -1:
+ return "general error";
+ default:
+ snprintf(buf, sizeof(buf), "unknown error code %d", err);
+ return buf;
+ }
+}
diff --git a/reftable/generic.c b/reftable/generic.c
new file mode 100644
index 0000000..b9f1c7c
--- /dev/null
+++ b/reftable/generic.c
@@ -0,0 +1,179 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "constants.h"
+#include "record.h"
+#include "generic.h"
+#include "reftable-iterator.h"
+#include "reftable-generic.h"
+
+int reftable_table_seek_ref(struct reftable_table *tab,
+ struct reftable_iterator *it, const char *name)
+{
+ struct reftable_record rec = { .type = BLOCK_TYPE_REF,
+ .u.ref = {
+ .refname = (char *)name,
+ } };
+ return tab->ops->seek_record(tab->table_arg, it, &rec);
+}
+
+int reftable_table_seek_log(struct reftable_table *tab,
+ struct reftable_iterator *it, const char *name)
+{
+ struct reftable_record rec = { .type = BLOCK_TYPE_LOG,
+ .u.log = {
+ .refname = (char *)name,
+ .update_index = ~((uint64_t)0),
+ } };
+ return tab->ops->seek_record(tab->table_arg, it, &rec);
+}
+
+int reftable_table_read_ref(struct reftable_table *tab, const char *name,
+ struct reftable_ref_record *ref)
+{
+ struct reftable_iterator it = { NULL };
+ int err = reftable_table_seek_ref(tab, &it, name);
+ if (err)
+ goto done;
+
+ err = reftable_iterator_next_ref(&it, ref);
+ if (err)
+ goto done;
+
+ if (strcmp(ref->refname, name) ||
+ reftable_ref_record_is_deletion(ref)) {
+ reftable_ref_record_release(ref);
+ err = 1;
+ goto done;
+ }
+
+done:
+ reftable_iterator_destroy(&it);
+ return err;
+}
+
+int reftable_table_print(struct reftable_table *tab) {
+ struct reftable_iterator it = { NULL };
+ struct reftable_ref_record ref = { NULL };
+ struct reftable_log_record log = { NULL };
+ uint32_t hash_id = reftable_table_hash_id(tab);
+ int err = reftable_table_seek_ref(tab, &it, "");
+ if (err < 0) {
+ return err;
+ }
+
+ while (1) {
+ err = reftable_iterator_next_ref(&it, &ref);
+ if (err > 0) {
+ break;
+ }
+ if (err < 0) {
+ return err;
+ }
+ reftable_ref_record_print(&ref, hash_id);
+ }
+ reftable_iterator_destroy(&it);
+ reftable_ref_record_release(&ref);
+
+ err = reftable_table_seek_log(tab, &it, "");
+ if (err < 0) {
+ return err;
+ }
+ while (1) {
+ err = reftable_iterator_next_log(&it, &log);
+ if (err > 0) {
+ break;
+ }
+ if (err < 0) {
+ return err;
+ }
+ reftable_log_record_print(&log, hash_id);
+ }
+ reftable_iterator_destroy(&it);
+ reftable_log_record_release(&log);
+ return 0;
+}
+
+uint64_t reftable_table_max_update_index(struct reftable_table *tab)
+{
+ return tab->ops->max_update_index(tab->table_arg);
+}
+
+uint64_t reftable_table_min_update_index(struct reftable_table *tab)
+{
+ return tab->ops->min_update_index(tab->table_arg);
+}
+
+uint32_t reftable_table_hash_id(struct reftable_table *tab)
+{
+ return tab->ops->hash_id(tab->table_arg);
+}
+
+void reftable_iterator_destroy(struct reftable_iterator *it)
+{
+ if (!it->ops) {
+ return;
+ }
+ it->ops->close(it->iter_arg);
+ it->ops = NULL;
+ FREE_AND_NULL(it->iter_arg);
+}
+
+int reftable_iterator_next_ref(struct reftable_iterator *it,
+ struct reftable_ref_record *ref)
+{
+ struct reftable_record rec = {
+ .type = BLOCK_TYPE_REF,
+ .u = {
+ .ref = *ref
+ },
+ };
+ int err = iterator_next(it, &rec);
+ *ref = rec.u.ref;
+ return err;
+}
+
+int reftable_iterator_next_log(struct reftable_iterator *it,
+ struct reftable_log_record *log)
+{
+ struct reftable_record rec = {
+ .type = BLOCK_TYPE_LOG,
+ .u = {
+ .log = *log,
+ },
+ };
+ int err = iterator_next(it, &rec);
+ *log = rec.u.log;
+ return err;
+}
+
+int iterator_next(struct reftable_iterator *it, struct reftable_record *rec)
+{
+ return it->ops->next(it->iter_arg, rec);
+}
+
+static int empty_iterator_next(void *arg, struct reftable_record *rec)
+{
+ return 1;
+}
+
+static void empty_iterator_close(void *arg)
+{
+}
+
+static struct reftable_iterator_vtable empty_vtable = {
+ .next = &empty_iterator_next,
+ .close = &empty_iterator_close,
+};
+
+void iterator_set_empty(struct reftable_iterator *it)
+{
+ assert(!it->ops);
+ it->iter_arg = NULL;
+ it->ops = &empty_vtable;
+}
diff --git a/reftable/generic.h b/reftable/generic.h
new file mode 100644
index 0000000..98886a0
--- /dev/null
+++ b/reftable/generic.h
@@ -0,0 +1,32 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef GENERIC_H
+#define GENERIC_H
+
+#include "record.h"
+#include "reftable-generic.h"
+
+/* generic interface to reftables */
+struct reftable_table_vtable {
+ int (*seek_record)(void *tab, struct reftable_iterator *it,
+ struct reftable_record *);
+ uint32_t (*hash_id)(void *tab);
+ uint64_t (*min_update_index)(void *tab);
+ uint64_t (*max_update_index)(void *tab);
+};
+
+struct reftable_iterator_vtable {
+ int (*next)(void *iter_arg, struct reftable_record *rec);
+ void (*close)(void *iter_arg);
+};
+
+void iterator_set_empty(struct reftable_iterator *it);
+int iterator_next(struct reftable_iterator *it, struct reftable_record *rec);
+
+#endif
diff --git a/reftable/iter.c b/reftable/iter.c
new file mode 100644
index 0000000..aa9ac19
--- /dev/null
+++ b/reftable/iter.c
@@ -0,0 +1,188 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "iter.h"
+
+#include "system.h"
+
+#include "block.h"
+#include "generic.h"
+#include "constants.h"
+#include "reader.h"
+#include "reftable-error.h"
+
+static void filtering_ref_iterator_close(void *iter_arg)
+{
+ struct filtering_ref_iterator *fri = iter_arg;
+ strbuf_release(&fri->oid);
+ reftable_iterator_destroy(&fri->it);
+}
+
+static int filtering_ref_iterator_next(void *iter_arg,
+ struct reftable_record *rec)
+{
+ struct filtering_ref_iterator *fri = iter_arg;
+ struct reftable_ref_record *ref = &rec->u.ref;
+ int err = 0;
+ while (1) {
+ err = reftable_iterator_next_ref(&fri->it, ref);
+ if (err != 0) {
+ break;
+ }
+
+ if (fri->double_check) {
+ struct reftable_iterator it = { NULL };
+
+ err = reftable_table_seek_ref(&fri->tab, &it,
+ ref->refname);
+ if (err == 0) {
+ err = reftable_iterator_next_ref(&it, ref);
+ }
+
+ reftable_iterator_destroy(&it);
+
+ if (err < 0) {
+ break;
+ }
+
+ if (err > 0) {
+ continue;
+ }
+ }
+
+ if (ref->value_type == REFTABLE_REF_VAL2 &&
+ (!memcmp(fri->oid.buf, ref->value.val2.target_value,
+ fri->oid.len) ||
+ !memcmp(fri->oid.buf, ref->value.val2.value,
+ fri->oid.len)))
+ return 0;
+
+ if (ref->value_type == REFTABLE_REF_VAL1 &&
+ !memcmp(fri->oid.buf, ref->value.val1, fri->oid.len)) {
+ return 0;
+ }
+ }
+
+ reftable_ref_record_release(ref);
+ return err;
+}
+
+static struct reftable_iterator_vtable filtering_ref_iterator_vtable = {
+ .next = &filtering_ref_iterator_next,
+ .close = &filtering_ref_iterator_close,
+};
+
+void iterator_from_filtering_ref_iterator(struct reftable_iterator *it,
+ struct filtering_ref_iterator *fri)
+{
+ assert(!it->ops);
+ it->iter_arg = fri;
+ it->ops = &filtering_ref_iterator_vtable;
+}
+
+static void indexed_table_ref_iter_close(void *p)
+{
+ struct indexed_table_ref_iter *it = p;
+ block_iter_close(&it->cur);
+ reftable_block_done(&it->block_reader.block);
+ reftable_free(it->offsets);
+ strbuf_release(&it->oid);
+}
+
+static int indexed_table_ref_iter_next_block(struct indexed_table_ref_iter *it)
+{
+ uint64_t off;
+ int err = 0;
+ if (it->offset_idx == it->offset_len) {
+ it->is_finished = 1;
+ return 1;
+ }
+
+ reftable_block_done(&it->block_reader.block);
+
+ off = it->offsets[it->offset_idx++];
+ err = reader_init_block_reader(it->r, &it->block_reader, off,
+ BLOCK_TYPE_REF);
+ if (err < 0) {
+ return err;
+ }
+ if (err > 0) {
+ /* indexed block does not exist. */
+ return REFTABLE_FORMAT_ERROR;
+ }
+ block_iter_seek_start(&it->cur, &it->block_reader);
+ return 0;
+}
+
+static int indexed_table_ref_iter_next(void *p, struct reftable_record *rec)
+{
+ struct indexed_table_ref_iter *it = p;
+ struct reftable_ref_record *ref = &rec->u.ref;
+
+ while (1) {
+ int err = block_iter_next(&it->cur, rec);
+ if (err < 0) {
+ return err;
+ }
+
+ if (err > 0) {
+ err = indexed_table_ref_iter_next_block(it);
+ if (err < 0) {
+ return err;
+ }
+
+ if (it->is_finished) {
+ return 1;
+ }
+ continue;
+ }
+ /* BUG */
+ if (!memcmp(it->oid.buf, ref->value.val2.target_value,
+ it->oid.len) ||
+ !memcmp(it->oid.buf, ref->value.val2.value, it->oid.len)) {
+ return 0;
+ }
+ }
+}
+
+int new_indexed_table_ref_iter(struct indexed_table_ref_iter **dest,
+ struct reftable_reader *r, uint8_t *oid,
+ int oid_len, uint64_t *offsets, int offset_len)
+{
+ struct indexed_table_ref_iter empty = INDEXED_TABLE_REF_ITER_INIT;
+ struct indexed_table_ref_iter *itr = reftable_calloc(1, sizeof(*itr));
+ int err = 0;
+
+ *itr = empty;
+ itr->r = r;
+ strbuf_add(&itr->oid, oid, oid_len);
+
+ itr->offsets = offsets;
+ itr->offset_len = offset_len;
+
+ err = indexed_table_ref_iter_next_block(itr);
+ if (err < 0) {
+ reftable_free(itr);
+ } else {
+ *dest = itr;
+ }
+ return err;
+}
+
+static struct reftable_iterator_vtable indexed_table_ref_iter_vtable = {
+ .next = &indexed_table_ref_iter_next,
+ .close = &indexed_table_ref_iter_close,
+};
+
+void iterator_from_indexed_table_ref_iter(struct reftable_iterator *it,
+ struct indexed_table_ref_iter *itr)
+{
+ assert(!it->ops);
+ it->iter_arg = itr;
+ it->ops = &indexed_table_ref_iter_vtable;
+}
diff --git a/reftable/iter.h b/reftable/iter.h
new file mode 100644
index 0000000..537431b
--- /dev/null
+++ b/reftable/iter.h
@@ -0,0 +1,65 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef ITER_H
+#define ITER_H
+
+#include "system.h"
+#include "block.h"
+#include "record.h"
+
+#include "reftable-iterator.h"
+#include "reftable-generic.h"
+
+/* iterator that produces only ref records that point to `oid` */
+struct filtering_ref_iterator {
+ int double_check;
+ struct reftable_table tab;
+ struct strbuf oid;
+ struct reftable_iterator it;
+};
+#define FILTERING_REF_ITERATOR_INIT \
+ { \
+ .oid = STRBUF_INIT \
+ }
+
+void iterator_from_filtering_ref_iterator(struct reftable_iterator *,
+ struct filtering_ref_iterator *);
+
+/* iterator that produces only ref records that point to `oid`,
+ * but using the object index.
+ */
+struct indexed_table_ref_iter {
+ struct reftable_reader *r;
+ struct strbuf oid;
+
+ /* mutable */
+ uint64_t *offsets;
+
+ /* Points to the next offset to read. */
+ int offset_idx;
+ int offset_len;
+ struct block_reader block_reader;
+ struct block_iter cur;
+ int is_finished;
+};
+
+#define INDEXED_TABLE_REF_ITER_INIT { \
+ .cur = BLOCK_ITER_INIT, \
+ .oid = STRBUF_INIT, \
+}
+
+void iterator_from_indexed_table_ref_iter(struct reftable_iterator *it,
+ struct indexed_table_ref_iter *itr);
+
+/* Takes ownership of `offsets` */
+int new_indexed_table_ref_iter(struct indexed_table_ref_iter **dest,
+ struct reftable_reader *r, uint8_t *oid,
+ int oid_len, uint64_t *offsets, int offset_len);
+
+#endif
diff --git a/reftable/merged.c b/reftable/merged.c
new file mode 100644
index 0000000..f85a24c
--- /dev/null
+++ b/reftable/merged.c
@@ -0,0 +1,355 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "merged.h"
+
+#include "constants.h"
+#include "iter.h"
+#include "pq.h"
+#include "record.h"
+#include "generic.h"
+#include "reftable-merged.h"
+#include "reftable-error.h"
+#include "system.h"
+
+struct merged_subiter {
+ struct reftable_iterator iter;
+ struct reftable_record rec;
+};
+
+struct merged_iter {
+ struct merged_subiter *subiters;
+ struct merged_iter_pqueue pq;
+ uint32_t hash_id;
+ size_t stack_len;
+ uint8_t typ;
+ int suppress_deletions;
+ ssize_t advance_index;
+};
+
+static int merged_iter_init(struct merged_iter *mi)
+{
+ for (size_t i = 0; i < mi->stack_len; i++) {
+ struct pq_entry e = {
+ .index = i,
+ .rec = &mi->subiters[i].rec,
+ };
+ int err;
+
+ reftable_record_init(&mi->subiters[i].rec, mi->typ);
+ err = iterator_next(&mi->subiters[i].iter,
+ &mi->subiters[i].rec);
+ if (err < 0)
+ return err;
+ if (err > 0)
+ continue;
+
+ merged_iter_pqueue_add(&mi->pq, &e);
+ }
+
+ return 0;
+}
+
+static void merged_iter_close(void *p)
+{
+ struct merged_iter *mi = p;
+
+ merged_iter_pqueue_release(&mi->pq);
+ for (size_t i = 0; i < mi->stack_len; i++) {
+ reftable_iterator_destroy(&mi->subiters[i].iter);
+ reftable_record_release(&mi->subiters[i].rec);
+ }
+ reftable_free(mi->subiters);
+}
+
+static int merged_iter_advance_subiter(struct merged_iter *mi, size_t idx)
+{
+ struct pq_entry e = {
+ .index = idx,
+ .rec = &mi->subiters[idx].rec,
+ };
+ int err;
+
+ err = iterator_next(&mi->subiters[idx].iter, &mi->subiters[idx].rec);
+ if (err)
+ return err;
+
+ merged_iter_pqueue_add(&mi->pq, &e);
+ return 0;
+}
+
+static int merged_iter_next_entry(struct merged_iter *mi,
+ struct reftable_record *rec)
+{
+ struct pq_entry entry = { 0 };
+ int err = 0, empty;
+
+ empty = merged_iter_pqueue_is_empty(mi->pq);
+
+ if (mi->advance_index >= 0) {
+ /*
+ * When there are no pqueue entries then we only have a single
+ * subiter left. There is no need to use the pqueue in that
+ * case anymore as we know that the subiter will return entries
+ * in the correct order already.
+ *
+ * While this may sound like a very specific edge case, it may
+ * happen more frequently than you think. Most repositories
+ * will end up having a single large base table that contains
+ * most of the refs. It's thus likely that we exhaust all
+ * subiters but the one from that base ref.
+ */
+ if (empty)
+ return iterator_next(&mi->subiters[mi->advance_index].iter,
+ rec);
+
+ err = merged_iter_advance_subiter(mi, mi->advance_index);
+ if (err < 0)
+ return err;
+ if (!err)
+ empty = 0;
+ mi->advance_index = -1;
+ }
+
+ if (empty)
+ return 1;
+
+ entry = merged_iter_pqueue_remove(&mi->pq);
+
+ /*
+ One can also use reftable as datacenter-local storage, where the ref
+ database is maintained in globally consistent database (eg.
+ CockroachDB or Spanner). In this scenario, replication delays together
+ with compaction may cause newer tables to contain older entries. In
+ such a deployment, the loop below must be changed to collect all
+ entries for the same key, and return new the newest one.
+ */
+ while (!merged_iter_pqueue_is_empty(mi->pq)) {
+ struct pq_entry top = merged_iter_pqueue_top(mi->pq);
+ int cmp;
+
+ cmp = reftable_record_cmp(top.rec, entry.rec);
+ if (cmp > 0)
+ break;
+
+ merged_iter_pqueue_remove(&mi->pq);
+ err = merged_iter_advance_subiter(mi, top.index);
+ if (err < 0)
+ return err;
+ }
+
+ mi->advance_index = entry.index;
+ SWAP(*rec, *entry.rec);
+ return 0;
+}
+
+static int merged_iter_next_void(void *p, struct reftable_record *rec)
+{
+ struct merged_iter *mi = p;
+ while (1) {
+ int err = merged_iter_next_entry(mi, rec);
+ if (err)
+ return err;
+ if (mi->suppress_deletions && reftable_record_is_deletion(rec))
+ continue;
+ return 0;
+ }
+}
+
+static struct reftable_iterator_vtable merged_iter_vtable = {
+ .next = &merged_iter_next_void,
+ .close = &merged_iter_close,
+};
+
+static void iterator_from_merged_iter(struct reftable_iterator *it,
+ struct merged_iter *mi)
+{
+ assert(!it->ops);
+ it->iter_arg = mi;
+ it->ops = &merged_iter_vtable;
+}
+
+int reftable_new_merged_table(struct reftable_merged_table **dest,
+ struct reftable_table *stack, size_t n,
+ uint32_t hash_id)
+{
+ struct reftable_merged_table *m = NULL;
+ uint64_t last_max = 0;
+ uint64_t first_min = 0;
+
+ for (size_t i = 0; i < n; i++) {
+ uint64_t min = reftable_table_min_update_index(&stack[i]);
+ uint64_t max = reftable_table_max_update_index(&stack[i]);
+
+ if (reftable_table_hash_id(&stack[i]) != hash_id) {
+ return REFTABLE_FORMAT_ERROR;
+ }
+ if (i == 0 || min < first_min) {
+ first_min = min;
+ }
+ if (i == 0 || max > last_max) {
+ last_max = max;
+ }
+ }
+
+ REFTABLE_CALLOC_ARRAY(m, 1);
+ m->stack = stack;
+ m->stack_len = n;
+ m->min = first_min;
+ m->max = last_max;
+ m->hash_id = hash_id;
+ *dest = m;
+ return 0;
+}
+
+/* clears the list of subtable, without affecting the readers themselves. */
+void merged_table_release(struct reftable_merged_table *mt)
+{
+ FREE_AND_NULL(mt->stack);
+ mt->stack_len = 0;
+}
+
+void reftable_merged_table_free(struct reftable_merged_table *mt)
+{
+ if (!mt) {
+ return;
+ }
+ merged_table_release(mt);
+ reftable_free(mt);
+}
+
+uint64_t
+reftable_merged_table_max_update_index(struct reftable_merged_table *mt)
+{
+ return mt->max;
+}
+
+uint64_t
+reftable_merged_table_min_update_index(struct reftable_merged_table *mt)
+{
+ return mt->min;
+}
+
+static int reftable_table_seek_record(struct reftable_table *tab,
+ struct reftable_iterator *it,
+ struct reftable_record *rec)
+{
+ return tab->ops->seek_record(tab->table_arg, it, rec);
+}
+
+static int merged_table_seek_record(struct reftable_merged_table *mt,
+ struct reftable_iterator *it,
+ struct reftable_record *rec)
+{
+ struct merged_iter merged = {
+ .typ = reftable_record_type(rec),
+ .hash_id = mt->hash_id,
+ .suppress_deletions = mt->suppress_deletions,
+ .advance_index = -1,
+ };
+ struct merged_iter *p;
+ int err;
+
+ REFTABLE_CALLOC_ARRAY(merged.subiters, mt->stack_len);
+ for (size_t i = 0; i < mt->stack_len; i++) {
+ err = reftable_table_seek_record(&mt->stack[i],
+ &merged.subiters[merged.stack_len].iter, rec);
+ if (err < 0)
+ goto out;
+ if (!err)
+ merged.stack_len++;
+ }
+
+ err = merged_iter_init(&merged);
+ if (err < 0)
+ goto out;
+
+ p = reftable_malloc(sizeof(struct merged_iter));
+ *p = merged;
+ iterator_from_merged_iter(it, p);
+
+out:
+ if (err < 0)
+ merged_iter_close(&merged);
+ return err;
+}
+
+int reftable_merged_table_seek_ref(struct reftable_merged_table *mt,
+ struct reftable_iterator *it,
+ const char *name)
+{
+ struct reftable_record rec = {
+ .type = BLOCK_TYPE_REF,
+ .u.ref = {
+ .refname = (char *)name,
+ },
+ };
+ return merged_table_seek_record(mt, it, &rec);
+}
+
+int reftable_merged_table_seek_log_at(struct reftable_merged_table *mt,
+ struct reftable_iterator *it,
+ const char *name, uint64_t update_index)
+{
+ struct reftable_record rec = { .type = BLOCK_TYPE_LOG,
+ .u.log = {
+ .refname = (char *)name,
+ .update_index = update_index,
+ } };
+ return merged_table_seek_record(mt, it, &rec);
+}
+
+int reftable_merged_table_seek_log(struct reftable_merged_table *mt,
+ struct reftable_iterator *it,
+ const char *name)
+{
+ uint64_t max = ~((uint64_t)0);
+ return reftable_merged_table_seek_log_at(mt, it, name, max);
+}
+
+uint32_t reftable_merged_table_hash_id(struct reftable_merged_table *mt)
+{
+ return mt->hash_id;
+}
+
+static int reftable_merged_table_seek_void(void *tab,
+ struct reftable_iterator *it,
+ struct reftable_record *rec)
+{
+ return merged_table_seek_record(tab, it, rec);
+}
+
+static uint32_t reftable_merged_table_hash_id_void(void *tab)
+{
+ return reftable_merged_table_hash_id(tab);
+}
+
+static uint64_t reftable_merged_table_min_update_index_void(void *tab)
+{
+ return reftable_merged_table_min_update_index(tab);
+}
+
+static uint64_t reftable_merged_table_max_update_index_void(void *tab)
+{
+ return reftable_merged_table_max_update_index(tab);
+}
+
+static struct reftable_table_vtable merged_table_vtable = {
+ .seek_record = reftable_merged_table_seek_void,
+ .hash_id = reftable_merged_table_hash_id_void,
+ .min_update_index = reftable_merged_table_min_update_index_void,
+ .max_update_index = reftable_merged_table_max_update_index_void,
+};
+
+void reftable_table_from_merged_table(struct reftable_table *tab,
+ struct reftable_merged_table *merged)
+{
+ assert(!tab->ops);
+ tab->ops = &merged_table_vtable;
+ tab->table_arg = merged;
+}
diff --git a/reftable/merged.h b/reftable/merged.h
new file mode 100644
index 0000000..a2571db
--- /dev/null
+++ b/reftable/merged.h
@@ -0,0 +1,29 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef MERGED_H
+#define MERGED_H
+
+#include "system.h"
+
+struct reftable_merged_table {
+ struct reftable_table *stack;
+ size_t stack_len;
+ uint32_t hash_id;
+
+ /* If unset, produce deletions. This is useful for compaction. For the
+ * full stack, deletions should be produced. */
+ int suppress_deletions;
+
+ uint64_t min;
+ uint64_t max;
+};
+
+void merged_table_release(struct reftable_merged_table *mt);
+
+#endif
diff --git a/reftable/merged_test.c b/reftable/merged_test.c
new file mode 100644
index 0000000..530fc82
--- /dev/null
+++ b/reftable/merged_test.c
@@ -0,0 +1,454 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "merged.h"
+
+#include "system.h"
+
+#include "basics.h"
+#include "blocksource.h"
+#include "reader.h"
+#include "record.h"
+#include "test_framework.h"
+#include "reftable-merged.h"
+#include "reftable-tests.h"
+#include "reftable-generic.h"
+#include "reftable-writer.h"
+
+static void write_test_table(struct strbuf *buf,
+ struct reftable_ref_record refs[], int n)
+{
+ uint64_t min = 0xffffffff;
+ uint64_t max = 0;
+ int i = 0;
+ int err;
+
+ struct reftable_write_options opts = {
+ .block_size = 256,
+ };
+ struct reftable_writer *w = NULL;
+ for (i = 0; i < n; i++) {
+ uint64_t ui = refs[i].update_index;
+ if (ui > max) {
+ max = ui;
+ }
+ if (ui < min) {
+ min = ui;
+ }
+ }
+
+ w = reftable_new_writer(&strbuf_add_void, &noop_flush, buf, &opts);
+ reftable_writer_set_limits(w, min, max);
+
+ for (i = 0; i < n; i++) {
+ uint64_t before = refs[i].update_index;
+ int n = reftable_writer_add_ref(w, &refs[i]);
+ EXPECT(n == 0);
+ EXPECT(before == refs[i].update_index);
+ }
+
+ err = reftable_writer_close(w);
+ EXPECT_ERR(err);
+
+ reftable_writer_free(w);
+}
+
+static void write_test_log_table(struct strbuf *buf,
+ struct reftable_log_record logs[], int n,
+ uint64_t update_index)
+{
+ int i = 0;
+ int err;
+
+ struct reftable_write_options opts = {
+ .block_size = 256,
+ .exact_log_message = 1,
+ };
+ struct reftable_writer *w = NULL;
+ w = reftable_new_writer(&strbuf_add_void, &noop_flush, buf, &opts);
+ reftable_writer_set_limits(w, update_index, update_index);
+
+ for (i = 0; i < n; i++) {
+ int err = reftable_writer_add_log(w, &logs[i]);
+ EXPECT_ERR(err);
+ }
+
+ err = reftable_writer_close(w);
+ EXPECT_ERR(err);
+
+ reftable_writer_free(w);
+}
+
+static struct reftable_merged_table *
+merged_table_from_records(struct reftable_ref_record **refs,
+ struct reftable_block_source **source,
+ struct reftable_reader ***readers, int *sizes,
+ struct strbuf *buf, size_t n)
+{
+ struct reftable_merged_table *mt = NULL;
+ struct reftable_table *tabs;
+ int err;
+
+ REFTABLE_CALLOC_ARRAY(tabs, n);
+ REFTABLE_CALLOC_ARRAY(*readers, n);
+ REFTABLE_CALLOC_ARRAY(*source, n);
+
+ for (size_t i = 0; i < n; i++) {
+ write_test_table(&buf[i], refs[i], sizes[i]);
+ block_source_from_strbuf(&(*source)[i], &buf[i]);
+
+ err = reftable_new_reader(&(*readers)[i], &(*source)[i],
+ "name");
+ EXPECT_ERR(err);
+ reftable_table_from_reader(&tabs[i], (*readers)[i]);
+ }
+
+ err = reftable_new_merged_table(&mt, tabs, n, GIT_SHA1_FORMAT_ID);
+ EXPECT_ERR(err);
+ return mt;
+}
+
+static void readers_destroy(struct reftable_reader **readers, size_t n)
+{
+ int i = 0;
+ for (; i < n; i++)
+ reftable_reader_free(readers[i]);
+ reftable_free(readers);
+}
+
+static void test_merged_between(void)
+{
+ struct reftable_ref_record r1[] = { {
+ .refname = "b",
+ .update_index = 1,
+ .value_type = REFTABLE_REF_VAL1,
+ .value.val1 = { 1, 2, 3, 0 },
+ } };
+ struct reftable_ref_record r2[] = { {
+ .refname = "a",
+ .update_index = 2,
+ .value_type = REFTABLE_REF_DELETION,
+ } };
+
+ struct reftable_ref_record *refs[] = { r1, r2 };
+ int sizes[] = { 1, 1 };
+ struct strbuf bufs[2] = { STRBUF_INIT, STRBUF_INIT };
+ struct reftable_block_source *bs = NULL;
+ struct reftable_reader **readers = NULL;
+ struct reftable_merged_table *mt =
+ merged_table_from_records(refs, &bs, &readers, sizes, bufs, 2);
+ int i;
+ struct reftable_ref_record ref = { NULL };
+ struct reftable_iterator it = { NULL };
+ int err = reftable_merged_table_seek_ref(mt, &it, "a");
+ EXPECT_ERR(err);
+
+ err = reftable_iterator_next_ref(&it, &ref);
+ EXPECT_ERR(err);
+ EXPECT(ref.update_index == 2);
+ reftable_ref_record_release(&ref);
+ reftable_iterator_destroy(&it);
+ readers_destroy(readers, 2);
+ reftable_merged_table_free(mt);
+ for (i = 0; i < ARRAY_SIZE(bufs); i++) {
+ strbuf_release(&bufs[i]);
+ }
+ reftable_free(bs);
+}
+
+static void test_merged(void)
+{
+ struct reftable_ref_record r1[] = {
+ {
+ .refname = "a",
+ .update_index = 1,
+ .value_type = REFTABLE_REF_VAL1,
+ .value.val1 = { 1 },
+ },
+ {
+ .refname = "b",
+ .update_index = 1,
+ .value_type = REFTABLE_REF_VAL1,
+ .value.val1 = { 1 },
+ },
+ {
+ .refname = "c",
+ .update_index = 1,
+ .value_type = REFTABLE_REF_VAL1,
+ .value.val1 = { 1 },
+ }
+ };
+ struct reftable_ref_record r2[] = { {
+ .refname = "a",
+ .update_index = 2,
+ .value_type = REFTABLE_REF_DELETION,
+ } };
+ struct reftable_ref_record r3[] = {
+ {
+ .refname = "c",
+ .update_index = 3,
+ .value_type = REFTABLE_REF_VAL1,
+ .value.val1 = { 2 },
+ },
+ {
+ .refname = "d",
+ .update_index = 3,
+ .value_type = REFTABLE_REF_VAL1,
+ .value.val1 = { 1 },
+ },
+ };
+
+ struct reftable_ref_record *want[] = {
+ &r2[0],
+ &r1[1],
+ &r3[0],
+ &r3[1],
+ };
+
+ struct reftable_ref_record *refs[] = { r1, r2, r3 };
+ int sizes[3] = { 3, 1, 2 };
+ struct strbuf bufs[3] = { STRBUF_INIT, STRBUF_INIT, STRBUF_INIT };
+ struct reftable_block_source *bs = NULL;
+ struct reftable_reader **readers = NULL;
+ struct reftable_merged_table *mt =
+ merged_table_from_records(refs, &bs, &readers, sizes, bufs, 3);
+
+ struct reftable_iterator it = { NULL };
+ int err = reftable_merged_table_seek_ref(mt, &it, "a");
+ struct reftable_ref_record *out = NULL;
+ size_t len = 0;
+ size_t cap = 0;
+ int i = 0;
+
+ EXPECT_ERR(err);
+ EXPECT(reftable_merged_table_hash_id(mt) == GIT_SHA1_FORMAT_ID);
+ EXPECT(reftable_merged_table_min_update_index(mt) == 1);
+
+ while (len < 100) { /* cap loops/recursion. */
+ struct reftable_ref_record ref = { NULL };
+ int err = reftable_iterator_next_ref(&it, &ref);
+ if (err > 0)
+ break;
+
+ REFTABLE_ALLOC_GROW(out, len + 1, cap);
+ out[len++] = ref;
+ }
+ reftable_iterator_destroy(&it);
+
+ EXPECT(ARRAY_SIZE(want) == len);
+ for (i = 0; i < len; i++) {
+ EXPECT(reftable_ref_record_equal(want[i], &out[i],
+ GIT_SHA1_RAWSZ));
+ }
+ for (i = 0; i < len; i++) {
+ reftable_ref_record_release(&out[i]);
+ }
+ reftable_free(out);
+
+ for (i = 0; i < 3; i++) {
+ strbuf_release(&bufs[i]);
+ }
+ readers_destroy(readers, 3);
+ reftable_merged_table_free(mt);
+ reftable_free(bs);
+}
+
+static struct reftable_merged_table *
+merged_table_from_log_records(struct reftable_log_record **logs,
+ struct reftable_block_source **source,
+ struct reftable_reader ***readers, int *sizes,
+ struct strbuf *buf, size_t n)
+{
+ struct reftable_merged_table *mt = NULL;
+ struct reftable_table *tabs;
+ int err;
+
+ REFTABLE_CALLOC_ARRAY(tabs, n);
+ REFTABLE_CALLOC_ARRAY(*readers, n);
+ REFTABLE_CALLOC_ARRAY(*source, n);
+
+ for (size_t i = 0; i < n; i++) {
+ write_test_log_table(&buf[i], logs[i], sizes[i], i + 1);
+ block_source_from_strbuf(&(*source)[i], &buf[i]);
+
+ err = reftable_new_reader(&(*readers)[i], &(*source)[i],
+ "name");
+ EXPECT_ERR(err);
+ reftable_table_from_reader(&tabs[i], (*readers)[i]);
+ }
+
+ err = reftable_new_merged_table(&mt, tabs, n, GIT_SHA1_FORMAT_ID);
+ EXPECT_ERR(err);
+ return mt;
+}
+
+static void test_merged_logs(void)
+{
+ struct reftable_log_record r1[] = {
+ {
+ .refname = "a",
+ .update_index = 2,
+ .value_type = REFTABLE_LOG_UPDATE,
+ .value.update = {
+ .old_hash = { 2 },
+ /* deletion */
+ .name = "jane doe",
+ .email = "jane@invalid",
+ .message = "message2",
+ }
+ },
+ {
+ .refname = "a",
+ .update_index = 1,
+ .value_type = REFTABLE_LOG_UPDATE,
+ .value.update = {
+ .old_hash = { 1 },
+ .new_hash = { 2 },
+ .name = "jane doe",
+ .email = "jane@invalid",
+ .message = "message1",
+ }
+ },
+ };
+ struct reftable_log_record r2[] = {
+ {
+ .refname = "a",
+ .update_index = 3,
+ .value_type = REFTABLE_LOG_UPDATE,
+ .value.update = {
+ .new_hash = { 3 },
+ .name = "jane doe",
+ .email = "jane@invalid",
+ .message = "message3",
+ }
+ },
+ };
+ struct reftable_log_record r3[] = {
+ {
+ .refname = "a",
+ .update_index = 2,
+ .value_type = REFTABLE_LOG_DELETION,
+ },
+ };
+ struct reftable_log_record *want[] = {
+ &r2[0],
+ &r3[0],
+ &r1[1],
+ };
+
+ struct reftable_log_record *logs[] = { r1, r2, r3 };
+ int sizes[3] = { 2, 1, 1 };
+ struct strbuf bufs[3] = { STRBUF_INIT, STRBUF_INIT, STRBUF_INIT };
+ struct reftable_block_source *bs = NULL;
+ struct reftable_reader **readers = NULL;
+ struct reftable_merged_table *mt = merged_table_from_log_records(
+ logs, &bs, &readers, sizes, bufs, 3);
+
+ struct reftable_iterator it = { NULL };
+ int err = reftable_merged_table_seek_log(mt, &it, "a");
+ struct reftable_log_record *out = NULL;
+ size_t len = 0;
+ size_t cap = 0;
+ int i = 0;
+
+ EXPECT_ERR(err);
+ EXPECT(reftable_merged_table_hash_id(mt) == GIT_SHA1_FORMAT_ID);
+ EXPECT(reftable_merged_table_min_update_index(mt) == 1);
+
+ while (len < 100) { /* cap loops/recursion. */
+ struct reftable_log_record log = { NULL };
+ int err = reftable_iterator_next_log(&it, &log);
+ if (err > 0)
+ break;
+
+ REFTABLE_ALLOC_GROW(out, len + 1, cap);
+ out[len++] = log;
+ }
+ reftable_iterator_destroy(&it);
+
+ EXPECT(ARRAY_SIZE(want) == len);
+ for (i = 0; i < len; i++) {
+ EXPECT(reftable_log_record_equal(want[i], &out[i],
+ GIT_SHA1_RAWSZ));
+ }
+
+ err = reftable_merged_table_seek_log_at(mt, &it, "a", 2);
+ EXPECT_ERR(err);
+ reftable_log_record_release(&out[0]);
+ err = reftable_iterator_next_log(&it, &out[0]);
+ EXPECT_ERR(err);
+ EXPECT(reftable_log_record_equal(&out[0], &r3[0], GIT_SHA1_RAWSZ));
+ reftable_iterator_destroy(&it);
+
+ for (i = 0; i < len; i++) {
+ reftable_log_record_release(&out[i]);
+ }
+ reftable_free(out);
+
+ for (i = 0; i < 3; i++) {
+ strbuf_release(&bufs[i]);
+ }
+ readers_destroy(readers, 3);
+ reftable_merged_table_free(mt);
+ reftable_free(bs);
+}
+
+static void test_default_write_opts(void)
+{
+ struct reftable_write_options opts = { 0 };
+ struct strbuf buf = STRBUF_INIT;
+ struct reftable_writer *w =
+ reftable_new_writer(&strbuf_add_void, &noop_flush, &buf, &opts);
+
+ struct reftable_ref_record rec = {
+ .refname = "master",
+ .update_index = 1,
+ };
+ int err;
+ struct reftable_block_source source = { NULL };
+ struct reftable_table *tab = reftable_calloc(1, sizeof(*tab));
+ uint32_t hash_id;
+ struct reftable_reader *rd = NULL;
+ struct reftable_merged_table *merged = NULL;
+
+ reftable_writer_set_limits(w, 1, 1);
+
+ err = reftable_writer_add_ref(w, &rec);
+ EXPECT_ERR(err);
+
+ err = reftable_writer_close(w);
+ EXPECT_ERR(err);
+ reftable_writer_free(w);
+
+ block_source_from_strbuf(&source, &buf);
+
+ err = reftable_new_reader(&rd, &source, "filename");
+ EXPECT_ERR(err);
+
+ hash_id = reftable_reader_hash_id(rd);
+ EXPECT(hash_id == GIT_SHA1_FORMAT_ID);
+
+ reftable_table_from_reader(&tab[0], rd);
+ err = reftable_new_merged_table(&merged, tab, 1, GIT_SHA1_FORMAT_ID);
+ EXPECT_ERR(err);
+
+ reftable_reader_free(rd);
+ reftable_merged_table_free(merged);
+ strbuf_release(&buf);
+}
+
+/* XXX test refs_for(oid) */
+
+int merged_test_main(int argc, const char *argv[])
+{
+ RUN_TEST(test_merged_logs);
+ RUN_TEST(test_merged_between);
+ RUN_TEST(test_merged);
+ RUN_TEST(test_default_write_opts);
+ return 0;
+}
diff --git a/reftable/pq.c b/reftable/pq.c
new file mode 100644
index 0000000..7fb45d8
--- /dev/null
+++ b/reftable/pq.c
@@ -0,0 +1,77 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "pq.h"
+
+#include "reftable-record.h"
+#include "system.h"
+#include "basics.h"
+
+int pq_less(struct pq_entry *a, struct pq_entry *b)
+{
+ int cmp = reftable_record_cmp(a->rec, b->rec);
+ if (cmp == 0)
+ return a->index > b->index;
+ return cmp < 0;
+}
+
+struct pq_entry merged_iter_pqueue_remove(struct merged_iter_pqueue *pq)
+{
+ int i = 0;
+ struct pq_entry e = pq->heap[0];
+ pq->heap[0] = pq->heap[pq->len - 1];
+ pq->len--;
+
+ i = 0;
+ while (i < pq->len) {
+ int min = i;
+ int j = 2 * i + 1;
+ int k = 2 * i + 2;
+ if (j < pq->len && pq_less(&pq->heap[j], &pq->heap[i])) {
+ min = j;
+ }
+ if (k < pq->len && pq_less(&pq->heap[k], &pq->heap[min])) {
+ min = k;
+ }
+
+ if (min == i) {
+ break;
+ }
+
+ SWAP(pq->heap[i], pq->heap[min]);
+ i = min;
+ }
+
+ return e;
+}
+
+void merged_iter_pqueue_add(struct merged_iter_pqueue *pq, const struct pq_entry *e)
+{
+ int i = 0;
+
+ REFTABLE_ALLOC_GROW(pq->heap, pq->len + 1, pq->cap);
+ pq->heap[pq->len++] = *e;
+
+ i = pq->len - 1;
+ while (i > 0) {
+ int j = (i - 1) / 2;
+ if (pq_less(&pq->heap[j], &pq->heap[i])) {
+ break;
+ }
+
+ SWAP(pq->heap[j], pq->heap[i]);
+
+ i = j;
+ }
+}
+
+void merged_iter_pqueue_release(struct merged_iter_pqueue *pq)
+{
+ FREE_AND_NULL(pq->heap);
+ memset(pq, 0, sizeof(*pq));
+}
diff --git a/reftable/pq.h b/reftable/pq.h
new file mode 100644
index 0000000..f796c23
--- /dev/null
+++ b/reftable/pq.h
@@ -0,0 +1,41 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef PQ_H
+#define PQ_H
+
+#include "record.h"
+
+struct pq_entry {
+ size_t index;
+ struct reftable_record *rec;
+};
+
+struct merged_iter_pqueue {
+ struct pq_entry *heap;
+ size_t len;
+ size_t cap;
+};
+
+void merged_iter_pqueue_check(struct merged_iter_pqueue pq);
+struct pq_entry merged_iter_pqueue_remove(struct merged_iter_pqueue *pq);
+void merged_iter_pqueue_add(struct merged_iter_pqueue *pq, const struct pq_entry *e);
+void merged_iter_pqueue_release(struct merged_iter_pqueue *pq);
+int pq_less(struct pq_entry *a, struct pq_entry *b);
+
+static inline struct pq_entry merged_iter_pqueue_top(struct merged_iter_pqueue pq)
+{
+ return pq.heap[0];
+}
+
+static inline int merged_iter_pqueue_is_empty(struct merged_iter_pqueue pq)
+{
+ return pq.len == 0;
+}
+
+#endif
diff --git a/reftable/pq_test.c b/reftable/pq_test.c
new file mode 100644
index 0000000..b7d3c80
--- /dev/null
+++ b/reftable/pq_test.c
@@ -0,0 +1,74 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "system.h"
+
+#include "basics.h"
+#include "constants.h"
+#include "pq.h"
+#include "record.h"
+#include "reftable-tests.h"
+#include "test_framework.h"
+
+void merged_iter_pqueue_check(struct merged_iter_pqueue pq)
+{
+ int i;
+ for (i = 1; i < pq.len; i++) {
+ int parent = (i - 1) / 2;
+
+ EXPECT(pq_less(&pq.heap[parent], &pq.heap[i]));
+ }
+}
+
+static void test_pq(void)
+{
+ struct merged_iter_pqueue pq = { NULL };
+ struct reftable_record recs[54];
+ int N = ARRAY_SIZE(recs) - 1, i;
+ char *last = NULL;
+
+ for (i = 0; i < N; i++) {
+ struct strbuf refname = STRBUF_INIT;
+ strbuf_addf(&refname, "%02d", i);
+
+ reftable_record_init(&recs[i], BLOCK_TYPE_REF);
+ recs[i].u.ref.refname = strbuf_detach(&refname, NULL);
+ }
+
+ i = 1;
+ do {
+ struct pq_entry e = {
+ .rec = &recs[i],
+ };
+
+ merged_iter_pqueue_add(&pq, &e);
+ merged_iter_pqueue_check(pq);
+
+ i = (i * 7) % N;
+ } while (i != 1);
+
+ while (!merged_iter_pqueue_is_empty(pq)) {
+ struct pq_entry e = merged_iter_pqueue_remove(&pq);
+ merged_iter_pqueue_check(pq);
+
+ EXPECT(reftable_record_type(e.rec) == BLOCK_TYPE_REF);
+ if (last)
+ EXPECT(strcmp(last, e.rec->u.ref.refname) < 0);
+ last = e.rec->u.ref.refname;
+ }
+
+ for (i = 0; i < N; i++)
+ reftable_record_release(&recs[i]);
+ merged_iter_pqueue_release(&pq);
+}
+
+int pq_test_main(int argc, const char *argv[])
+{
+ RUN_TEST(test_pq);
+ return 0;
+}
diff --git a/reftable/publicbasics.c b/reftable/publicbasics.c
new file mode 100644
index 0000000..44b84a1
--- /dev/null
+++ b/reftable/publicbasics.c
@@ -0,0 +1,66 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "system.h"
+#include "reftable-malloc.h"
+
+#include "basics.h"
+
+static void *(*reftable_malloc_ptr)(size_t sz);
+static void *(*reftable_realloc_ptr)(void *, size_t);
+static void (*reftable_free_ptr)(void *);
+
+void *reftable_malloc(size_t sz)
+{
+ if (reftable_malloc_ptr)
+ return (*reftable_malloc_ptr)(sz);
+ return malloc(sz);
+}
+
+void *reftable_realloc(void *p, size_t sz)
+{
+ if (reftable_realloc_ptr)
+ return (*reftable_realloc_ptr)(p, sz);
+ return realloc(p, sz);
+}
+
+void reftable_free(void *p)
+{
+ if (reftable_free_ptr)
+ reftable_free_ptr(p);
+ else
+ free(p);
+}
+
+void *reftable_calloc(size_t nelem, size_t elsize)
+{
+ size_t sz = st_mult(nelem, elsize);
+ void *p = reftable_malloc(sz);
+ memset(p, 0, sz);
+ return p;
+}
+
+void reftable_set_alloc(void *(*malloc)(size_t),
+ void *(*realloc)(void *, size_t), void (*free)(void *))
+{
+ reftable_malloc_ptr = malloc;
+ reftable_realloc_ptr = realloc;
+ reftable_free_ptr = free;
+}
+
+int hash_size(uint32_t id)
+{
+ switch (id) {
+ case 0:
+ case GIT_SHA1_FORMAT_ID:
+ return GIT_SHA1_RAWSZ;
+ case GIT_SHA256_FORMAT_ID:
+ return GIT_SHA256_RAWSZ;
+ }
+ abort();
+}
diff --git a/reftable/reader.c b/reftable/reader.c
new file mode 100644
index 0000000..481dff1
--- /dev/null
+++ b/reftable/reader.c
@@ -0,0 +1,858 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "reader.h"
+
+#include "system.h"
+#include "block.h"
+#include "constants.h"
+#include "generic.h"
+#include "iter.h"
+#include "record.h"
+#include "reftable-error.h"
+#include "reftable-generic.h"
+
+uint64_t block_source_size(struct reftable_block_source *source)
+{
+ return source->ops->size(source->arg);
+}
+
+int block_source_read_block(struct reftable_block_source *source,
+ struct reftable_block *dest, uint64_t off,
+ uint32_t size)
+{
+ int result = source->ops->read_block(source->arg, dest, off, size);
+ dest->source = *source;
+ return result;
+}
+
+void block_source_close(struct reftable_block_source *source)
+{
+ if (!source->ops) {
+ return;
+ }
+
+ source->ops->close(source->arg);
+ source->ops = NULL;
+}
+
+static struct reftable_reader_offsets *
+reader_offsets_for(struct reftable_reader *r, uint8_t typ)
+{
+ switch (typ) {
+ case BLOCK_TYPE_REF:
+ return &r->ref_offsets;
+ case BLOCK_TYPE_LOG:
+ return &r->log_offsets;
+ case BLOCK_TYPE_OBJ:
+ return &r->obj_offsets;
+ }
+ abort();
+}
+
+static int reader_get_block(struct reftable_reader *r,
+ struct reftable_block *dest, uint64_t off,
+ uint32_t sz)
+{
+ if (off >= r->size)
+ return 0;
+
+ if (off + sz > r->size) {
+ sz = r->size - off;
+ }
+
+ return block_source_read_block(&r->source, dest, off, sz);
+}
+
+uint32_t reftable_reader_hash_id(struct reftable_reader *r)
+{
+ return r->hash_id;
+}
+
+const char *reader_name(struct reftable_reader *r)
+{
+ return r->name;
+}
+
+static int parse_footer(struct reftable_reader *r, uint8_t *footer,
+ uint8_t *header)
+{
+ uint8_t *f = footer;
+ uint8_t first_block_typ;
+ int err = 0;
+ uint32_t computed_crc;
+ uint32_t file_crc;
+
+ if (memcmp(f, "REFT", 4)) {
+ err = REFTABLE_FORMAT_ERROR;
+ goto done;
+ }
+ f += 4;
+
+ if (memcmp(footer, header, header_size(r->version))) {
+ err = REFTABLE_FORMAT_ERROR;
+ goto done;
+ }
+
+ f++;
+ r->block_size = get_be24(f);
+
+ f += 3;
+ r->min_update_index = get_be64(f);
+ f += 8;
+ r->max_update_index = get_be64(f);
+ f += 8;
+
+ if (r->version == 1) {
+ r->hash_id = GIT_SHA1_FORMAT_ID;
+ } else {
+ r->hash_id = get_be32(f);
+ switch (r->hash_id) {
+ case GIT_SHA1_FORMAT_ID:
+ break;
+ case GIT_SHA256_FORMAT_ID:
+ break;
+ default:
+ err = REFTABLE_FORMAT_ERROR;
+ goto done;
+ }
+ f += 4;
+ }
+
+ r->ref_offsets.index_offset = get_be64(f);
+ f += 8;
+
+ r->obj_offsets.offset = get_be64(f);
+ f += 8;
+
+ r->object_id_len = r->obj_offsets.offset & ((1 << 5) - 1);
+ r->obj_offsets.offset >>= 5;
+
+ r->obj_offsets.index_offset = get_be64(f);
+ f += 8;
+ r->log_offsets.offset = get_be64(f);
+ f += 8;
+ r->log_offsets.index_offset = get_be64(f);
+ f += 8;
+
+ computed_crc = crc32(0, footer, f - footer);
+ file_crc = get_be32(f);
+ f += 4;
+ if (computed_crc != file_crc) {
+ err = REFTABLE_FORMAT_ERROR;
+ goto done;
+ }
+
+ first_block_typ = header[header_size(r->version)];
+ r->ref_offsets.is_present = (first_block_typ == BLOCK_TYPE_REF);
+ r->ref_offsets.offset = 0;
+ r->log_offsets.is_present = (first_block_typ == BLOCK_TYPE_LOG ||
+ r->log_offsets.offset > 0);
+ r->obj_offsets.is_present = r->obj_offsets.offset > 0;
+ if (r->obj_offsets.is_present && !r->object_id_len) {
+ err = REFTABLE_FORMAT_ERROR;
+ goto done;
+ }
+
+ err = 0;
+done:
+ return err;
+}
+
+int init_reader(struct reftable_reader *r, struct reftable_block_source *source,
+ const char *name)
+{
+ struct reftable_block footer = { NULL };
+ struct reftable_block header = { NULL };
+ int err = 0;
+ uint64_t file_size = block_source_size(source);
+
+ /* Need +1 to read type of first block. */
+ uint32_t read_size = header_size(2) + 1; /* read v2 because it's larger. */
+ memset(r, 0, sizeof(struct reftable_reader));
+
+ if (read_size > file_size) {
+ err = REFTABLE_FORMAT_ERROR;
+ goto done;
+ }
+
+ err = block_source_read_block(source, &header, 0, read_size);
+ if (err != read_size) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+
+ if (memcmp(header.data, "REFT", 4)) {
+ err = REFTABLE_FORMAT_ERROR;
+ goto done;
+ }
+ r->version = header.data[4];
+ if (r->version != 1 && r->version != 2) {
+ err = REFTABLE_FORMAT_ERROR;
+ goto done;
+ }
+
+ r->size = file_size - footer_size(r->version);
+ r->source = *source;
+ r->name = xstrdup(name);
+ r->hash_id = 0;
+
+ err = block_source_read_block(source, &footer, r->size,
+ footer_size(r->version));
+ if (err != footer_size(r->version)) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+
+ err = parse_footer(r, footer.data, header.data);
+done:
+ reftable_block_done(&footer);
+ reftable_block_done(&header);
+ return err;
+}
+
+struct table_iter {
+ struct reftable_reader *r;
+ uint8_t typ;
+ uint64_t block_off;
+ struct block_reader br;
+ struct block_iter bi;
+ int is_finished;
+};
+#define TABLE_ITER_INIT { \
+ .bi = BLOCK_ITER_INIT \
+}
+
+static int table_iter_next_in_block(struct table_iter *ti,
+ struct reftable_record *rec)
+{
+ int res = block_iter_next(&ti->bi, rec);
+ if (res == 0 && reftable_record_type(rec) == BLOCK_TYPE_REF) {
+ rec->u.ref.update_index += ti->r->min_update_index;
+ }
+
+ return res;
+}
+
+static void table_iter_block_done(struct table_iter *ti)
+{
+ block_reader_release(&ti->br);
+ block_iter_reset(&ti->bi);
+}
+
+static int32_t extract_block_size(uint8_t *data, uint8_t *typ, uint64_t off,
+ int version)
+{
+ int32_t result = 0;
+
+ if (off == 0) {
+ data += header_size(version);
+ }
+
+ *typ = data[0];
+ if (reftable_is_block_type(*typ)) {
+ result = get_be24(data + 1);
+ }
+ return result;
+}
+
+int reader_init_block_reader(struct reftable_reader *r, struct block_reader *br,
+ uint64_t next_off, uint8_t want_typ)
+{
+ int32_t guess_block_size = r->block_size ? r->block_size :
+ DEFAULT_BLOCK_SIZE;
+ struct reftable_block block = { NULL };
+ uint8_t block_typ = 0;
+ int err = 0;
+ uint32_t header_off = next_off ? 0 : header_size(r->version);
+ int32_t block_size = 0;
+
+ if (next_off >= r->size)
+ return 1;
+
+ err = reader_get_block(r, &block, next_off, guess_block_size);
+ if (err < 0)
+ goto done;
+
+ block_size = extract_block_size(block.data, &block_typ, next_off,
+ r->version);
+ if (block_size < 0) {
+ err = block_size;
+ goto done;
+ }
+ if (want_typ != BLOCK_TYPE_ANY && block_typ != want_typ) {
+ err = 1;
+ goto done;
+ }
+
+ if (block_size > guess_block_size) {
+ reftable_block_done(&block);
+ err = reader_get_block(r, &block, next_off, block_size);
+ if (err < 0) {
+ goto done;
+ }
+ }
+
+ err = block_reader_init(br, &block, header_off, r->block_size,
+ hash_size(r->hash_id));
+done:
+ reftable_block_done(&block);
+
+ return err;
+}
+
+static void table_iter_close(struct table_iter *ti)
+{
+ table_iter_block_done(ti);
+ block_iter_close(&ti->bi);
+}
+
+static int table_iter_next_block(struct table_iter *ti)
+{
+ uint64_t next_block_off = ti->block_off + ti->br.full_block_size;
+ int err;
+
+ err = reader_init_block_reader(ti->r, &ti->br, next_block_off, ti->typ);
+ if (err > 0)
+ ti->is_finished = 1;
+ if (err)
+ return err;
+
+ ti->block_off = next_block_off;
+ ti->is_finished = 0;
+ block_iter_seek_start(&ti->bi, &ti->br);
+
+ return 0;
+}
+
+static int table_iter_next(struct table_iter *ti, struct reftable_record *rec)
+{
+ if (reftable_record_type(rec) != ti->typ)
+ return REFTABLE_API_ERROR;
+
+ while (1) {
+ int err;
+
+ if (ti->is_finished)
+ return 1;
+
+ /*
+ * Check whether the current block still has more records. If
+ * so, return it. If the iterator returns positive then the
+ * current block has been exhausted.
+ */
+ err = table_iter_next_in_block(ti, rec);
+ if (err <= 0)
+ return err;
+
+ /*
+ * Otherwise, we need to continue to the next block in the
+ * table and retry. If there are no more blocks then the
+ * iterator is drained.
+ */
+ err = table_iter_next_block(ti);
+ if (err) {
+ ti->is_finished = 1;
+ return err;
+ }
+ }
+}
+
+static int table_iter_next_void(void *ti, struct reftable_record *rec)
+{
+ return table_iter_next(ti, rec);
+}
+
+static void table_iter_close_void(void *ti)
+{
+ table_iter_close(ti);
+}
+
+static struct reftable_iterator_vtable table_iter_vtable = {
+ .next = &table_iter_next_void,
+ .close = &table_iter_close_void,
+};
+
+static void iterator_from_table_iter(struct reftable_iterator *it,
+ struct table_iter *ti)
+{
+ assert(!it->ops);
+ it->iter_arg = ti;
+ it->ops = &table_iter_vtable;
+}
+
+static int reader_table_iter_at(struct reftable_reader *r,
+ struct table_iter *ti, uint64_t off,
+ uint8_t typ)
+{
+ int err;
+
+ err = reader_init_block_reader(r, &ti->br, off, typ);
+ if (err != 0)
+ return err;
+
+ ti->r = r;
+ ti->typ = block_reader_type(&ti->br);
+ ti->block_off = off;
+ block_iter_seek_start(&ti->bi, &ti->br);
+ return 0;
+}
+
+static int reader_start(struct reftable_reader *r, struct table_iter *ti,
+ uint8_t typ, int index)
+{
+ struct reftable_reader_offsets *offs = reader_offsets_for(r, typ);
+ uint64_t off = offs->offset;
+ if (index) {
+ off = offs->index_offset;
+ if (off == 0) {
+ return 1;
+ }
+ typ = BLOCK_TYPE_INDEX;
+ }
+
+ return reader_table_iter_at(r, ti, off, typ);
+}
+
+static int reader_seek_linear(struct table_iter *ti,
+ struct reftable_record *want)
+{
+ struct strbuf want_key = STRBUF_INIT;
+ struct strbuf got_key = STRBUF_INIT;
+ struct reftable_record rec;
+ int err = -1;
+
+ reftable_record_init(&rec, reftable_record_type(want));
+ reftable_record_key(want, &want_key);
+
+ /*
+ * First we need to locate the block that must contain our record. To
+ * do so we scan through blocks linearly until we find the first block
+ * whose first key is bigger than our wanted key. Once we have found
+ * that block we know that the key must be contained in the preceding
+ * block.
+ *
+ * This algorithm is somewhat unfortunate because it means that we
+ * always have to seek one block too far and then back up. But as we
+ * can only decode the _first_ key of a block but not its _last_ key we
+ * have no other way to do this.
+ */
+ while (1) {
+ struct table_iter next = *ti;
+
+ /*
+ * We must be careful to not modify underlying data of `ti`
+ * because we may find that `next` does not contain our desired
+ * block, but that `ti` does. In that case, we would discard
+ * `next` and continue with `ti`.
+ *
+ * This also means that we cannot reuse allocated memory for
+ * `next` here. While it would be great if we could, it should
+ * in practice not be too bad given that we should only ever
+ * end up doing linear seeks with at most three blocks. As soon
+ * as we have more than three blocks we would have an index, so
+ * we would not do a linear search there anymore.
+ */
+ memset(&next.br.block, 0, sizeof(next.br.block));
+ next.br.zstream = NULL;
+ next.br.uncompressed_data = NULL;
+ next.br.uncompressed_cap = 0;
+
+ err = table_iter_next_block(&next);
+ if (err < 0)
+ goto done;
+ if (err > 0)
+ break;
+
+ err = block_reader_first_key(&next.br, &got_key);
+ if (err < 0)
+ goto done;
+
+ if (strbuf_cmp(&got_key, &want_key) > 0) {
+ table_iter_block_done(&next);
+ break;
+ }
+
+ table_iter_block_done(ti);
+ *ti = next;
+ }
+
+ /*
+ * We have located the block that must contain our record, so we seek
+ * the wanted key inside of it. If the block does not contain our key
+ * we know that the corresponding record does not exist.
+ */
+ err = block_iter_seek_key(&ti->bi, &ti->br, &want_key);
+ if (err < 0)
+ goto done;
+ err = 0;
+
+done:
+ reftable_record_release(&rec);
+ strbuf_release(&want_key);
+ strbuf_release(&got_key);
+ return err;
+}
+
+static int reader_seek_indexed(struct reftable_reader *r,
+ struct reftable_iterator *it,
+ struct reftable_record *rec)
+{
+ struct reftable_record want_index = {
+ .type = BLOCK_TYPE_INDEX, .u.idx = { .last_key = STRBUF_INIT }
+ };
+ struct reftable_record index_result = {
+ .type = BLOCK_TYPE_INDEX,
+ .u.idx = { .last_key = STRBUF_INIT },
+ };
+ struct table_iter index_iter = TABLE_ITER_INIT;
+ struct table_iter empty = TABLE_ITER_INIT;
+ struct table_iter next = TABLE_ITER_INIT;
+ int err = 0;
+
+ reftable_record_key(rec, &want_index.u.idx.last_key);
+ err = reader_start(r, &index_iter, reftable_record_type(rec), 1);
+ if (err < 0)
+ goto done;
+
+ /*
+ * The index may consist of multiple levels, where each level may have
+ * multiple index blocks. We start by doing a linear search in the
+ * highest layer that identifies the relevant index block as well as
+ * the record inside that block that corresponds to our wanted key.
+ */
+ err = reader_seek_linear(&index_iter, &want_index);
+ if (err < 0)
+ goto done;
+
+ /*
+ * Traverse down the levels until we find a non-index entry.
+ */
+ while (1) {
+ /*
+ * In case we seek a record that does not exist the index iter
+ * will tell us that the iterator is over. This works because
+ * the last index entry of the current level will contain the
+ * last key it knows about. So in case our seeked key is larger
+ * than the last indexed key we know that it won't exist.
+ *
+ * There is one subtlety in the layout of the index section
+ * that makes this work as expected: the highest-level index is
+ * at end of the section and will point backwards and thus we
+ * start reading from the end of the index section, not the
+ * beginning.
+ *
+ * If that wasn't the case and the order was reversed then the
+ * linear seek would seek into the lower levels and traverse
+ * all levels of the index only to find out that the key does
+ * not exist.
+ */
+ err = table_iter_next(&index_iter, &index_result);
+ if (err != 0)
+ goto done;
+
+ err = reader_table_iter_at(r, &next, index_result.u.idx.offset,
+ 0);
+ if (err != 0)
+ goto done;
+
+ err = block_iter_seek_key(&next.bi, &next.br, &want_index.u.idx.last_key);
+ if (err < 0)
+ goto done;
+
+ if (next.typ == reftable_record_type(rec)) {
+ err = 0;
+ break;
+ }
+
+ if (next.typ != BLOCK_TYPE_INDEX) {
+ err = REFTABLE_FORMAT_ERROR;
+ break;
+ }
+
+ table_iter_close(&index_iter);
+ index_iter = next;
+ next = empty;
+ }
+
+ if (err == 0) {
+ struct table_iter *malloced = reftable_calloc(1, sizeof(*malloced));
+ *malloced = next;
+ next = empty;
+ iterator_from_table_iter(it, malloced);
+ }
+
+done:
+ table_iter_close(&next);
+ table_iter_close(&index_iter);
+ reftable_record_release(&want_index);
+ reftable_record_release(&index_result);
+ return err;
+}
+
+static int reader_seek_internal(struct reftable_reader *r,
+ struct reftable_iterator *it,
+ struct reftable_record *rec)
+{
+ struct reftable_reader_offsets *offs =
+ reader_offsets_for(r, reftable_record_type(rec));
+ uint64_t idx = offs->index_offset;
+ struct table_iter ti = TABLE_ITER_INIT, *p;
+ int err;
+
+ if (idx > 0)
+ return reader_seek_indexed(r, it, rec);
+
+ err = reader_start(r, &ti, reftable_record_type(rec), 0);
+ if (err < 0)
+ goto out;
+
+ err = reader_seek_linear(&ti, rec);
+ if (err < 0)
+ goto out;
+
+ REFTABLE_ALLOC_ARRAY(p, 1);
+ *p = ti;
+ iterator_from_table_iter(it, p);
+
+out:
+ if (err)
+ table_iter_close(&ti);
+ return err;
+}
+
+static int reader_seek(struct reftable_reader *r, struct reftable_iterator *it,
+ struct reftable_record *rec)
+{
+ uint8_t typ = reftable_record_type(rec);
+
+ struct reftable_reader_offsets *offs = reader_offsets_for(r, typ);
+ if (!offs->is_present) {
+ iterator_set_empty(it);
+ return 0;
+ }
+
+ return reader_seek_internal(r, it, rec);
+}
+
+int reftable_reader_seek_ref(struct reftable_reader *r,
+ struct reftable_iterator *it, const char *name)
+{
+ struct reftable_record rec = {
+ .type = BLOCK_TYPE_REF,
+ .u.ref = {
+ .refname = (char *)name,
+ },
+ };
+ return reader_seek(r, it, &rec);
+}
+
+int reftable_reader_seek_log_at(struct reftable_reader *r,
+ struct reftable_iterator *it, const char *name,
+ uint64_t update_index)
+{
+ struct reftable_record rec = { .type = BLOCK_TYPE_LOG,
+ .u.log = {
+ .refname = (char *)name,
+ .update_index = update_index,
+ } };
+ return reader_seek(r, it, &rec);
+}
+
+int reftable_reader_seek_log(struct reftable_reader *r,
+ struct reftable_iterator *it, const char *name)
+{
+ uint64_t max = ~((uint64_t)0);
+ return reftable_reader_seek_log_at(r, it, name, max);
+}
+
+void reader_close(struct reftable_reader *r)
+{
+ block_source_close(&r->source);
+ FREE_AND_NULL(r->name);
+}
+
+int reftable_new_reader(struct reftable_reader **p,
+ struct reftable_block_source *src, char const *name)
+{
+ struct reftable_reader *rd = reftable_calloc(1, sizeof(*rd));
+ int err = init_reader(rd, src, name);
+ if (err == 0) {
+ *p = rd;
+ } else {
+ block_source_close(src);
+ reftable_free(rd);
+ }
+ return err;
+}
+
+void reftable_reader_free(struct reftable_reader *r)
+{
+ if (!r)
+ return;
+ reader_close(r);
+ reftable_free(r);
+}
+
+static int reftable_reader_refs_for_indexed(struct reftable_reader *r,
+ struct reftable_iterator *it,
+ uint8_t *oid)
+{
+ struct reftable_record want = {
+ .type = BLOCK_TYPE_OBJ,
+ .u.obj = {
+ .hash_prefix = oid,
+ .hash_prefix_len = r->object_id_len,
+ },
+ };
+ struct reftable_iterator oit = { NULL };
+ struct reftable_record got = {
+ .type = BLOCK_TYPE_OBJ,
+ .u.obj = { 0 },
+ };
+ int err = 0;
+ struct indexed_table_ref_iter *itr = NULL;
+
+ /* Look through the reverse index. */
+ err = reader_seek(r, &oit, &want);
+ if (err != 0)
+ goto done;
+
+ /* read out the reftable_obj_record */
+ err = iterator_next(&oit, &got);
+ if (err < 0)
+ goto done;
+
+ if (err > 0 || memcmp(want.u.obj.hash_prefix, got.u.obj.hash_prefix,
+ r->object_id_len)) {
+ /* didn't find it; return empty iterator */
+ iterator_set_empty(it);
+ err = 0;
+ goto done;
+ }
+
+ err = new_indexed_table_ref_iter(&itr, r, oid, hash_size(r->hash_id),
+ got.u.obj.offsets,
+ got.u.obj.offset_len);
+ if (err < 0)
+ goto done;
+ got.u.obj.offsets = NULL;
+ iterator_from_indexed_table_ref_iter(it, itr);
+
+done:
+ reftable_iterator_destroy(&oit);
+ reftable_record_release(&got);
+ return err;
+}
+
+static int reftable_reader_refs_for_unindexed(struct reftable_reader *r,
+ struct reftable_iterator *it,
+ uint8_t *oid)
+{
+ struct table_iter ti_empty = TABLE_ITER_INIT;
+ struct table_iter *ti = reftable_calloc(1, sizeof(*ti));
+ struct filtering_ref_iterator *filter = NULL;
+ struct filtering_ref_iterator empty = FILTERING_REF_ITERATOR_INIT;
+ int oid_len = hash_size(r->hash_id);
+ int err;
+
+ *ti = ti_empty;
+ err = reader_start(r, ti, BLOCK_TYPE_REF, 0);
+ if (err < 0) {
+ reftable_free(ti);
+ return err;
+ }
+
+ filter = reftable_malloc(sizeof(struct filtering_ref_iterator));
+ *filter = empty;
+
+ strbuf_add(&filter->oid, oid, oid_len);
+ reftable_table_from_reader(&filter->tab, r);
+ filter->double_check = 0;
+ iterator_from_table_iter(&filter->it, ti);
+
+ iterator_from_filtering_ref_iterator(it, filter);
+ return 0;
+}
+
+int reftable_reader_refs_for(struct reftable_reader *r,
+ struct reftable_iterator *it, uint8_t *oid)
+{
+ if (r->obj_offsets.is_present)
+ return reftable_reader_refs_for_indexed(r, it, oid);
+ return reftable_reader_refs_for_unindexed(r, it, oid);
+}
+
+uint64_t reftable_reader_max_update_index(struct reftable_reader *r)
+{
+ return r->max_update_index;
+}
+
+uint64_t reftable_reader_min_update_index(struct reftable_reader *r)
+{
+ return r->min_update_index;
+}
+
+/* generic table interface. */
+
+static int reftable_reader_seek_void(void *tab, struct reftable_iterator *it,
+ struct reftable_record *rec)
+{
+ return reader_seek(tab, it, rec);
+}
+
+static uint32_t reftable_reader_hash_id_void(void *tab)
+{
+ return reftable_reader_hash_id(tab);
+}
+
+static uint64_t reftable_reader_min_update_index_void(void *tab)
+{
+ return reftable_reader_min_update_index(tab);
+}
+
+static uint64_t reftable_reader_max_update_index_void(void *tab)
+{
+ return reftable_reader_max_update_index(tab);
+}
+
+static struct reftable_table_vtable reader_vtable = {
+ .seek_record = reftable_reader_seek_void,
+ .hash_id = reftable_reader_hash_id_void,
+ .min_update_index = reftable_reader_min_update_index_void,
+ .max_update_index = reftable_reader_max_update_index_void,
+};
+
+void reftable_table_from_reader(struct reftable_table *tab,
+ struct reftable_reader *reader)
+{
+ assert(!tab->ops);
+ tab->ops = &reader_vtable;
+ tab->table_arg = reader;
+}
+
+
+int reftable_reader_print_file(const char *tablename)
+{
+ struct reftable_block_source src = { NULL };
+ int err = reftable_block_source_from_file(&src, tablename);
+ struct reftable_reader *r = NULL;
+ struct reftable_table tab = { NULL };
+ if (err < 0)
+ goto done;
+
+ err = reftable_new_reader(&r, &src, tablename);
+ if (err < 0)
+ goto done;
+
+ reftable_table_from_reader(&tab, r);
+ err = reftable_table_print(&tab);
+done:
+ reftable_reader_free(r);
+ return err;
+}
diff --git a/reftable/reader.h b/reftable/reader.h
new file mode 100644
index 0000000..e869165
--- /dev/null
+++ b/reftable/reader.h
@@ -0,0 +1,64 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef READER_H
+#define READER_H
+
+#include "block.h"
+#include "record.h"
+#include "reftable-iterator.h"
+#include "reftable-reader.h"
+
+uint64_t block_source_size(struct reftable_block_source *source);
+
+int block_source_read_block(struct reftable_block_source *source,
+ struct reftable_block *dest, uint64_t off,
+ uint32_t size);
+void block_source_close(struct reftable_block_source *source);
+
+/* metadata for a block type */
+struct reftable_reader_offsets {
+ int is_present;
+ uint64_t offset;
+ uint64_t index_offset;
+};
+
+/* The state for reading a reftable file. */
+struct reftable_reader {
+ /* for convience, associate a name with the instance. */
+ char *name;
+ struct reftable_block_source source;
+
+ /* Size of the file, excluding the footer. */
+ uint64_t size;
+
+ /* 'sha1' for SHA1, 's256' for SHA-256 */
+ uint32_t hash_id;
+
+ uint32_t block_size;
+ uint64_t min_update_index;
+ uint64_t max_update_index;
+ /* Length of the OID keys in the 'o' section */
+ int object_id_len;
+ int version;
+
+ struct reftable_reader_offsets ref_offsets;
+ struct reftable_reader_offsets obj_offsets;
+ struct reftable_reader_offsets log_offsets;
+};
+
+int init_reader(struct reftable_reader *r, struct reftable_block_source *source,
+ const char *name);
+void reader_close(struct reftable_reader *r);
+const char *reader_name(struct reftable_reader *r);
+
+/* initialize a block reader to read from `r` */
+int reader_init_block_reader(struct reftable_reader *r, struct block_reader *br,
+ uint64_t next_off, uint8_t want_typ);
+
+#endif
diff --git a/reftable/readwrite_test.c b/reftable/readwrite_test.c
new file mode 100644
index 0000000..a6dbd21
--- /dev/null
+++ b/reftable/readwrite_test.c
@@ -0,0 +1,966 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "system.h"
+
+#include "basics.h"
+#include "block.h"
+#include "blocksource.h"
+#include "reader.h"
+#include "record.h"
+#include "test_framework.h"
+#include "reftable-tests.h"
+#include "reftable-writer.h"
+
+static const int update_index = 5;
+
+static void test_buffer(void)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct reftable_block_source source = { NULL };
+ struct reftable_block out = { NULL };
+ int n;
+ uint8_t in[] = "hello";
+ strbuf_add(&buf, in, sizeof(in));
+ block_source_from_strbuf(&source, &buf);
+ EXPECT(block_source_size(&source) == 6);
+ n = block_source_read_block(&source, &out, 0, sizeof(in));
+ EXPECT(n == sizeof(in));
+ EXPECT(!memcmp(in, out.data, n));
+ reftable_block_done(&out);
+
+ n = block_source_read_block(&source, &out, 1, 2);
+ EXPECT(n == 2);
+ EXPECT(!memcmp(out.data, "el", 2));
+
+ reftable_block_done(&out);
+ block_source_close(&source);
+ strbuf_release(&buf);
+}
+
+static void write_table(char ***names, struct strbuf *buf, int N,
+ int block_size, uint32_t hash_id)
+{
+ struct reftable_write_options opts = {
+ .block_size = block_size,
+ .hash_id = hash_id,
+ };
+ struct reftable_writer *w =
+ reftable_new_writer(&strbuf_add_void, &noop_flush, buf, &opts);
+ struct reftable_ref_record ref = { NULL };
+ int i = 0, n;
+ struct reftable_log_record log = { NULL };
+ const struct reftable_stats *stats = NULL;
+
+ REFTABLE_CALLOC_ARRAY(*names, N + 1);
+
+ reftable_writer_set_limits(w, update_index, update_index);
+ for (i = 0; i < N; i++) {
+ char name[100];
+ int n;
+
+ snprintf(name, sizeof(name), "refs/heads/branch%02d", i);
+
+ ref.refname = name;
+ ref.update_index = update_index;
+ ref.value_type = REFTABLE_REF_VAL1;
+ set_test_hash(ref.value.val1, i);
+ (*names)[i] = xstrdup(name);
+
+ n = reftable_writer_add_ref(w, &ref);
+ EXPECT(n == 0);
+ }
+
+ for (i = 0; i < N; i++) {
+ char name[100];
+ int n;
+
+ snprintf(name, sizeof(name), "refs/heads/branch%02d", i);
+
+ log.refname = name;
+ log.update_index = update_index;
+ log.value_type = REFTABLE_LOG_UPDATE;
+ set_test_hash(log.value.update.new_hash, i);
+ log.value.update.message = "message";
+
+ n = reftable_writer_add_log(w, &log);
+ EXPECT(n == 0);
+ }
+
+ n = reftable_writer_close(w);
+ EXPECT(n == 0);
+
+ stats = reftable_writer_stats(w);
+ for (i = 0; i < stats->ref_stats.blocks; i++) {
+ int off = i * opts.block_size;
+ if (off == 0) {
+ off = header_size(
+ (hash_id == GIT_SHA256_FORMAT_ID) ? 2 : 1);
+ }
+ EXPECT(buf->buf[off] == 'r');
+ }
+
+ EXPECT(stats->log_stats.blocks > 0);
+ reftable_writer_free(w);
+}
+
+static void test_log_buffer_size(void)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct reftable_write_options opts = {
+ .block_size = 4096,
+ };
+ int err;
+ int i;
+ struct reftable_log_record
+ log = { .refname = "refs/heads/master",
+ .update_index = 0xa,
+ .value_type = REFTABLE_LOG_UPDATE,
+ .value = { .update = {
+ .name = "Han-Wen Nienhuys",
+ .email = "hanwen@google.com",
+ .tz_offset = 100,
+ .time = 0x5e430672,
+ .message = "commit: 9\n",
+ } } };
+ struct reftable_writer *w =
+ reftable_new_writer(&strbuf_add_void, &noop_flush, &buf, &opts);
+
+ /* This tests buffer extension for log compression. Must use a random
+ hash, to ensure that the compressed part is larger than the original.
+ */
+ for (i = 0; i < GIT_SHA1_RAWSZ; i++) {
+ log.value.update.old_hash[i] = (uint8_t)(git_rand() % 256);
+ log.value.update.new_hash[i] = (uint8_t)(git_rand() % 256);
+ }
+ reftable_writer_set_limits(w, update_index, update_index);
+ err = reftable_writer_add_log(w, &log);
+ EXPECT_ERR(err);
+ err = reftable_writer_close(w);
+ EXPECT_ERR(err);
+ reftable_writer_free(w);
+ strbuf_release(&buf);
+}
+
+static void test_log_overflow(void)
+{
+ struct strbuf buf = STRBUF_INIT;
+ char msg[256] = { 0 };
+ struct reftable_write_options opts = {
+ .block_size = ARRAY_SIZE(msg),
+ };
+ int err;
+ struct reftable_log_record log = {
+ .refname = "refs/heads/master",
+ .update_index = 0xa,
+ .value_type = REFTABLE_LOG_UPDATE,
+ .value = {
+ .update = {
+ .old_hash = { 1 },
+ .new_hash = { 2 },
+ .name = "Han-Wen Nienhuys",
+ .email = "hanwen@google.com",
+ .tz_offset = 100,
+ .time = 0x5e430672,
+ .message = msg,
+ },
+ },
+ };
+ struct reftable_writer *w =
+ reftable_new_writer(&strbuf_add_void, &noop_flush, &buf, &opts);
+
+ memset(msg, 'x', sizeof(msg) - 1);
+ reftable_writer_set_limits(w, update_index, update_index);
+ err = reftable_writer_add_log(w, &log);
+ EXPECT(err == REFTABLE_ENTRY_TOO_BIG_ERROR);
+ reftable_writer_free(w);
+ strbuf_release(&buf);
+}
+
+static void test_log_write_read(void)
+{
+ int N = 2;
+ char **names = reftable_calloc(N + 1, sizeof(*names));
+ int err;
+ struct reftable_write_options opts = {
+ .block_size = 256,
+ };
+ struct reftable_ref_record ref = { NULL };
+ int i = 0;
+ struct reftable_log_record log = { NULL };
+ int n;
+ struct reftable_iterator it = { NULL };
+ struct reftable_reader rd = { NULL };
+ struct reftable_block_source source = { NULL };
+ struct strbuf buf = STRBUF_INIT;
+ struct reftable_writer *w =
+ reftable_new_writer(&strbuf_add_void, &noop_flush, &buf, &opts);
+ const struct reftable_stats *stats = NULL;
+ reftable_writer_set_limits(w, 0, N);
+ for (i = 0; i < N; i++) {
+ char name[256];
+ struct reftable_ref_record ref = { NULL };
+ snprintf(name, sizeof(name), "b%02d%0*d", i, 130, 7);
+ names[i] = xstrdup(name);
+ ref.refname = name;
+ ref.update_index = i;
+
+ err = reftable_writer_add_ref(w, &ref);
+ EXPECT_ERR(err);
+ }
+ for (i = 0; i < N; i++) {
+ struct reftable_log_record log = { NULL };
+
+ log.refname = names[i];
+ log.update_index = i;
+ log.value_type = REFTABLE_LOG_UPDATE;
+ set_test_hash(log.value.update.old_hash, i);
+ set_test_hash(log.value.update.new_hash, i + 1);
+
+ err = reftable_writer_add_log(w, &log);
+ EXPECT_ERR(err);
+ }
+
+ n = reftable_writer_close(w);
+ EXPECT(n == 0);
+
+ stats = reftable_writer_stats(w);
+ EXPECT(stats->log_stats.blocks > 0);
+ reftable_writer_free(w);
+ w = NULL;
+
+ block_source_from_strbuf(&source, &buf);
+
+ err = init_reader(&rd, &source, "file.log");
+ EXPECT_ERR(err);
+
+ err = reftable_reader_seek_ref(&rd, &it, names[N - 1]);
+ EXPECT_ERR(err);
+
+ err = reftable_iterator_next_ref(&it, &ref);
+ EXPECT_ERR(err);
+
+ /* end of iteration. */
+ err = reftable_iterator_next_ref(&it, &ref);
+ EXPECT(0 < err);
+
+ reftable_iterator_destroy(&it);
+ reftable_ref_record_release(&ref);
+
+ err = reftable_reader_seek_log(&rd, &it, "");
+ EXPECT_ERR(err);
+
+ i = 0;
+ while (1) {
+ int err = reftable_iterator_next_log(&it, &log);
+ if (err > 0) {
+ break;
+ }
+
+ EXPECT_ERR(err);
+ EXPECT_STREQ(names[i], log.refname);
+ EXPECT(i == log.update_index);
+ i++;
+ reftable_log_record_release(&log);
+ }
+
+ EXPECT(i == N);
+ reftable_iterator_destroy(&it);
+
+ /* cleanup. */
+ strbuf_release(&buf);
+ free_names(names);
+ reader_close(&rd);
+}
+
+static void test_log_zlib_corruption(void)
+{
+ struct reftable_write_options opts = {
+ .block_size = 256,
+ };
+ struct reftable_iterator it = { 0 };
+ struct reftable_reader rd = { 0 };
+ struct reftable_block_source source = { 0 };
+ struct strbuf buf = STRBUF_INIT;
+ struct reftable_writer *w =
+ reftable_new_writer(&strbuf_add_void, &noop_flush, &buf, &opts);
+ const struct reftable_stats *stats = NULL;
+ char message[100] = { 0 };
+ int err, i, n;
+ struct reftable_log_record log = {
+ .refname = "refname",
+ .value_type = REFTABLE_LOG_UPDATE,
+ .value = {
+ .update = {
+ .new_hash = { 1 },
+ .old_hash = { 2 },
+ .name = "My Name",
+ .email = "myname@invalid",
+ .message = message,
+ },
+ },
+ };
+
+ for (i = 0; i < sizeof(message) - 1; i++)
+ message[i] = (uint8_t)(git_rand() % 64 + ' ');
+
+ reftable_writer_set_limits(w, 1, 1);
+
+ err = reftable_writer_add_log(w, &log);
+ EXPECT_ERR(err);
+
+ n = reftable_writer_close(w);
+ EXPECT(n == 0);
+
+ stats = reftable_writer_stats(w);
+ EXPECT(stats->log_stats.blocks > 0);
+ reftable_writer_free(w);
+ w = NULL;
+
+ /* corrupt the data. */
+ buf.buf[50] ^= 0x99;
+
+ block_source_from_strbuf(&source, &buf);
+
+ err = init_reader(&rd, &source, "file.log");
+ EXPECT_ERR(err);
+
+ err = reftable_reader_seek_log(&rd, &it, "refname");
+ EXPECT(err == REFTABLE_ZLIB_ERROR);
+
+ reftable_iterator_destroy(&it);
+
+ /* cleanup. */
+ strbuf_release(&buf);
+ reader_close(&rd);
+}
+
+static void test_table_read_write_sequential(void)
+{
+ char **names;
+ struct strbuf buf = STRBUF_INIT;
+ int N = 50;
+ struct reftable_iterator it = { NULL };
+ struct reftable_block_source source = { NULL };
+ struct reftable_reader rd = { NULL };
+ int err = 0;
+ int j = 0;
+
+ write_table(&names, &buf, N, 256, GIT_SHA1_FORMAT_ID);
+
+ block_source_from_strbuf(&source, &buf);
+
+ err = init_reader(&rd, &source, "file.ref");
+ EXPECT_ERR(err);
+
+ err = reftable_reader_seek_ref(&rd, &it, "");
+ EXPECT_ERR(err);
+
+ while (1) {
+ struct reftable_ref_record ref = { NULL };
+ int r = reftable_iterator_next_ref(&it, &ref);
+ EXPECT(r >= 0);
+ if (r > 0) {
+ break;
+ }
+ EXPECT(0 == strcmp(names[j], ref.refname));
+ EXPECT(update_index == ref.update_index);
+
+ j++;
+ reftable_ref_record_release(&ref);
+ }
+ EXPECT(j == N);
+ reftable_iterator_destroy(&it);
+ strbuf_release(&buf);
+ free_names(names);
+
+ reader_close(&rd);
+}
+
+static void test_table_write_small_table(void)
+{
+ char **names;
+ struct strbuf buf = STRBUF_INIT;
+ int N = 1;
+ write_table(&names, &buf, N, 4096, GIT_SHA1_FORMAT_ID);
+ EXPECT(buf.len < 200);
+ strbuf_release(&buf);
+ free_names(names);
+}
+
+static void test_table_read_api(void)
+{
+ char **names;
+ struct strbuf buf = STRBUF_INIT;
+ int N = 50;
+ struct reftable_reader rd = { NULL };
+ struct reftable_block_source source = { NULL };
+ int err;
+ int i;
+ struct reftable_log_record log = { NULL };
+ struct reftable_iterator it = { NULL };
+
+ write_table(&names, &buf, N, 256, GIT_SHA1_FORMAT_ID);
+
+ block_source_from_strbuf(&source, &buf);
+
+ err = init_reader(&rd, &source, "file.ref");
+ EXPECT_ERR(err);
+
+ err = reftable_reader_seek_ref(&rd, &it, names[0]);
+ EXPECT_ERR(err);
+
+ err = reftable_iterator_next_log(&it, &log);
+ EXPECT(err == REFTABLE_API_ERROR);
+
+ strbuf_release(&buf);
+ for (i = 0; i < N; i++) {
+ reftable_free(names[i]);
+ }
+ reftable_iterator_destroy(&it);
+ reftable_free(names);
+ reader_close(&rd);
+ strbuf_release(&buf);
+}
+
+static void test_table_read_write_seek(int index, int hash_id)
+{
+ char **names;
+ struct strbuf buf = STRBUF_INIT;
+ int N = 50;
+ struct reftable_reader rd = { NULL };
+ struct reftable_block_source source = { NULL };
+ int err;
+ int i = 0;
+
+ struct reftable_iterator it = { NULL };
+ struct strbuf pastLast = STRBUF_INIT;
+ struct reftable_ref_record ref = { NULL };
+
+ write_table(&names, &buf, N, 256, hash_id);
+
+ block_source_from_strbuf(&source, &buf);
+
+ err = init_reader(&rd, &source, "file.ref");
+ EXPECT_ERR(err);
+ EXPECT(hash_id == reftable_reader_hash_id(&rd));
+
+ if (!index) {
+ rd.ref_offsets.index_offset = 0;
+ } else {
+ EXPECT(rd.ref_offsets.index_offset > 0);
+ }
+
+ for (i = 1; i < N; i++) {
+ int err = reftable_reader_seek_ref(&rd, &it, names[i]);
+ EXPECT_ERR(err);
+ err = reftable_iterator_next_ref(&it, &ref);
+ EXPECT_ERR(err);
+ EXPECT(0 == strcmp(names[i], ref.refname));
+ EXPECT(REFTABLE_REF_VAL1 == ref.value_type);
+ EXPECT(i == ref.value.val1[0]);
+
+ reftable_ref_record_release(&ref);
+ reftable_iterator_destroy(&it);
+ }
+
+ strbuf_addstr(&pastLast, names[N - 1]);
+ strbuf_addstr(&pastLast, "/");
+
+ err = reftable_reader_seek_ref(&rd, &it, pastLast.buf);
+ if (err == 0) {
+ struct reftable_ref_record ref = { NULL };
+ int err = reftable_iterator_next_ref(&it, &ref);
+ EXPECT(err > 0);
+ } else {
+ EXPECT(err > 0);
+ }
+
+ strbuf_release(&pastLast);
+ reftable_iterator_destroy(&it);
+
+ strbuf_release(&buf);
+ for (i = 0; i < N; i++) {
+ reftable_free(names[i]);
+ }
+ reftable_free(names);
+ reader_close(&rd);
+}
+
+static void test_table_read_write_seek_linear(void)
+{
+ test_table_read_write_seek(0, GIT_SHA1_FORMAT_ID);
+}
+
+static void test_table_read_write_seek_linear_sha256(void)
+{
+ test_table_read_write_seek(0, GIT_SHA256_FORMAT_ID);
+}
+
+static void test_table_read_write_seek_index(void)
+{
+ test_table_read_write_seek(1, GIT_SHA1_FORMAT_ID);
+}
+
+static void test_table_refs_for(int indexed)
+{
+ int N = 50;
+ char **want_names = reftable_calloc(N + 1, sizeof(*want_names));
+ int want_names_len = 0;
+ uint8_t want_hash[GIT_SHA1_RAWSZ];
+
+ struct reftable_write_options opts = {
+ .block_size = 256,
+ };
+ struct reftable_ref_record ref = { NULL };
+ int i = 0;
+ int n;
+ int err;
+ struct reftable_reader rd;
+ struct reftable_block_source source = { NULL };
+
+ struct strbuf buf = STRBUF_INIT;
+ struct reftable_writer *w =
+ reftable_new_writer(&strbuf_add_void, &noop_flush, &buf, &opts);
+
+ struct reftable_iterator it = { NULL };
+ int j;
+
+ set_test_hash(want_hash, 4);
+
+ for (i = 0; i < N; i++) {
+ uint8_t hash[GIT_SHA1_RAWSZ];
+ char fill[51] = { 0 };
+ char name[100];
+ struct reftable_ref_record ref = { NULL };
+
+ memset(hash, i, sizeof(hash));
+ memset(fill, 'x', 50);
+ /* Put the variable part in the start */
+ snprintf(name, sizeof(name), "br%02d%s", i, fill);
+ name[40] = 0;
+ ref.refname = name;
+
+ ref.value_type = REFTABLE_REF_VAL2;
+ set_test_hash(ref.value.val2.value, i / 4);
+ set_test_hash(ref.value.val2.target_value, 3 + i / 4);
+
+ /* 80 bytes / entry, so 3 entries per block. Yields 17
+ */
+ /* blocks. */
+ n = reftable_writer_add_ref(w, &ref);
+ EXPECT(n == 0);
+
+ if (!memcmp(ref.value.val2.value, want_hash, GIT_SHA1_RAWSZ) ||
+ !memcmp(ref.value.val2.target_value, want_hash, GIT_SHA1_RAWSZ)) {
+ want_names[want_names_len++] = xstrdup(name);
+ }
+ }
+
+ n = reftable_writer_close(w);
+ EXPECT(n == 0);
+
+ reftable_writer_free(w);
+ w = NULL;
+
+ block_source_from_strbuf(&source, &buf);
+
+ err = init_reader(&rd, &source, "file.ref");
+ EXPECT_ERR(err);
+ if (!indexed) {
+ rd.obj_offsets.is_present = 0;
+ }
+
+ err = reftable_reader_seek_ref(&rd, &it, "");
+ EXPECT_ERR(err);
+ reftable_iterator_destroy(&it);
+
+ err = reftable_reader_refs_for(&rd, &it, want_hash);
+ EXPECT_ERR(err);
+
+ j = 0;
+ while (1) {
+ int err = reftable_iterator_next_ref(&it, &ref);
+ EXPECT(err >= 0);
+ if (err > 0) {
+ break;
+ }
+
+ EXPECT(j < want_names_len);
+ EXPECT(0 == strcmp(ref.refname, want_names[j]));
+ j++;
+ reftable_ref_record_release(&ref);
+ }
+ EXPECT(j == want_names_len);
+
+ strbuf_release(&buf);
+ free_names(want_names);
+ reftable_iterator_destroy(&it);
+ reader_close(&rd);
+}
+
+static void test_table_refs_for_no_index(void)
+{
+ test_table_refs_for(0);
+}
+
+static void test_table_refs_for_obj_index(void)
+{
+ test_table_refs_for(1);
+}
+
+static void test_write_empty_table(void)
+{
+ struct reftable_write_options opts = { 0 };
+ struct strbuf buf = STRBUF_INIT;
+ struct reftable_writer *w =
+ reftable_new_writer(&strbuf_add_void, &noop_flush, &buf, &opts);
+ struct reftable_block_source source = { NULL };
+ struct reftable_reader *rd = NULL;
+ struct reftable_ref_record rec = { NULL };
+ struct reftable_iterator it = { NULL };
+ int err;
+
+ reftable_writer_set_limits(w, 1, 1);
+
+ err = reftable_writer_close(w);
+ EXPECT(err == REFTABLE_EMPTY_TABLE_ERROR);
+ reftable_writer_free(w);
+
+ EXPECT(buf.len == header_size(1) + footer_size(1));
+
+ block_source_from_strbuf(&source, &buf);
+
+ err = reftable_new_reader(&rd, &source, "filename");
+ EXPECT_ERR(err);
+
+ err = reftable_reader_seek_ref(rd, &it, "");
+ EXPECT_ERR(err);
+
+ err = reftable_iterator_next_ref(&it, &rec);
+ EXPECT(err > 0);
+
+ reftable_iterator_destroy(&it);
+ reftable_reader_free(rd);
+ strbuf_release(&buf);
+}
+
+static void test_write_object_id_min_length(void)
+{
+ struct reftable_write_options opts = {
+ .block_size = 75,
+ };
+ struct strbuf buf = STRBUF_INIT;
+ struct reftable_writer *w =
+ reftable_new_writer(&strbuf_add_void, &noop_flush, &buf, &opts);
+ struct reftable_ref_record ref = {
+ .update_index = 1,
+ .value_type = REFTABLE_REF_VAL1,
+ .value.val1 = {42},
+ };
+ int err;
+ int i;
+
+ reftable_writer_set_limits(w, 1, 1);
+
+ /* Write the same hash in many refs. If there is only 1 hash, the
+ * disambiguating prefix is length 0 */
+ for (i = 0; i < 256; i++) {
+ char name[256];
+ snprintf(name, sizeof(name), "ref%05d", i);
+ ref.refname = name;
+ err = reftable_writer_add_ref(w, &ref);
+ EXPECT_ERR(err);
+ }
+
+ err = reftable_writer_close(w);
+ EXPECT_ERR(err);
+ EXPECT(reftable_writer_stats(w)->object_id_len == 2);
+ reftable_writer_free(w);
+ strbuf_release(&buf);
+}
+
+static void test_write_object_id_length(void)
+{
+ struct reftable_write_options opts = {
+ .block_size = 75,
+ };
+ struct strbuf buf = STRBUF_INIT;
+ struct reftable_writer *w =
+ reftable_new_writer(&strbuf_add_void, &noop_flush, &buf, &opts);
+ struct reftable_ref_record ref = {
+ .update_index = 1,
+ .value_type = REFTABLE_REF_VAL1,
+ .value.val1 = {42},
+ };
+ int err;
+ int i;
+
+ reftable_writer_set_limits(w, 1, 1);
+
+ /* Write the same hash in many refs. If there is only 1 hash, the
+ * disambiguating prefix is length 0 */
+ for (i = 0; i < 256; i++) {
+ char name[256];
+ snprintf(name, sizeof(name), "ref%05d", i);
+ ref.refname = name;
+ ref.value.val1[15] = i;
+ err = reftable_writer_add_ref(w, &ref);
+ EXPECT_ERR(err);
+ }
+
+ err = reftable_writer_close(w);
+ EXPECT_ERR(err);
+ EXPECT(reftable_writer_stats(w)->object_id_len == 16);
+ reftable_writer_free(w);
+ strbuf_release(&buf);
+}
+
+static void test_write_empty_key(void)
+{
+ struct reftable_write_options opts = { 0 };
+ struct strbuf buf = STRBUF_INIT;
+ struct reftable_writer *w =
+ reftable_new_writer(&strbuf_add_void, &noop_flush, &buf, &opts);
+ struct reftable_ref_record ref = {
+ .refname = "",
+ .update_index = 1,
+ .value_type = REFTABLE_REF_DELETION,
+ };
+ int err;
+
+ reftable_writer_set_limits(w, 1, 1);
+ err = reftable_writer_add_ref(w, &ref);
+ EXPECT(err == REFTABLE_API_ERROR);
+
+ err = reftable_writer_close(w);
+ EXPECT(err == REFTABLE_EMPTY_TABLE_ERROR);
+ reftable_writer_free(w);
+ strbuf_release(&buf);
+}
+
+static void test_write_key_order(void)
+{
+ struct reftable_write_options opts = { 0 };
+ struct strbuf buf = STRBUF_INIT;
+ struct reftable_writer *w =
+ reftable_new_writer(&strbuf_add_void, &noop_flush, &buf, &opts);
+ struct reftable_ref_record refs[2] = {
+ {
+ .refname = "b",
+ .update_index = 1,
+ .value_type = REFTABLE_REF_SYMREF,
+ .value = {
+ .symref = "target",
+ },
+ }, {
+ .refname = "a",
+ .update_index = 1,
+ .value_type = REFTABLE_REF_SYMREF,
+ .value = {
+ .symref = "target",
+ },
+ }
+ };
+ int err;
+
+ reftable_writer_set_limits(w, 1, 1);
+ err = reftable_writer_add_ref(w, &refs[0]);
+ EXPECT_ERR(err);
+ err = reftable_writer_add_ref(w, &refs[1]);
+ EXPECT(err == REFTABLE_API_ERROR);
+ reftable_writer_close(w);
+ reftable_writer_free(w);
+ strbuf_release(&buf);
+}
+
+static void test_write_multiple_indices(void)
+{
+ struct reftable_write_options opts = {
+ .block_size = 100,
+ };
+ struct strbuf writer_buf = STRBUF_INIT, buf = STRBUF_INIT;
+ struct reftable_block_source source = { 0 };
+ struct reftable_iterator it = { 0 };
+ const struct reftable_stats *stats;
+ struct reftable_writer *writer;
+ struct reftable_reader *reader;
+ int err, i;
+
+ writer = reftable_new_writer(&strbuf_add_void, &noop_flush, &writer_buf, &opts);
+ reftable_writer_set_limits(writer, 1, 1);
+ for (i = 0; i < 100; i++) {
+ struct reftable_ref_record ref = {
+ .update_index = 1,
+ .value_type = REFTABLE_REF_VAL1,
+ .value.val1 = {i},
+ };
+
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "refs/heads/%04d", i);
+ ref.refname = buf.buf,
+
+ err = reftable_writer_add_ref(writer, &ref);
+ EXPECT_ERR(err);
+ }
+
+ for (i = 0; i < 100; i++) {
+ struct reftable_log_record log = {
+ .update_index = 1,
+ .value_type = REFTABLE_LOG_UPDATE,
+ .value.update = {
+ .old_hash = { i },
+ .new_hash = { i },
+ },
+ };
+
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "refs/heads/%04d", i);
+ log.refname = buf.buf,
+
+ err = reftable_writer_add_log(writer, &log);
+ EXPECT_ERR(err);
+ }
+
+ reftable_writer_close(writer);
+
+ /*
+ * The written data should be sufficiently large to result in indices
+ * for each of the block types.
+ */
+ stats = reftable_writer_stats(writer);
+ EXPECT(stats->ref_stats.index_offset > 0);
+ EXPECT(stats->obj_stats.index_offset > 0);
+ EXPECT(stats->log_stats.index_offset > 0);
+
+ block_source_from_strbuf(&source, &writer_buf);
+ err = reftable_new_reader(&reader, &source, "filename");
+ EXPECT_ERR(err);
+
+ /*
+ * Seeking the log uses the log index now. In case there is any
+ * confusion regarding indices we would notice here.
+ */
+ err = reftable_reader_seek_log(reader, &it, "");
+ EXPECT_ERR(err);
+
+ reftable_iterator_destroy(&it);
+ reftable_writer_free(writer);
+ reftable_reader_free(reader);
+ strbuf_release(&writer_buf);
+ strbuf_release(&buf);
+}
+
+static void test_write_multi_level_index(void)
+{
+ struct reftable_write_options opts = {
+ .block_size = 100,
+ };
+ struct strbuf writer_buf = STRBUF_INIT, buf = STRBUF_INIT;
+ struct reftable_block_source source = { 0 };
+ struct reftable_iterator it = { 0 };
+ const struct reftable_stats *stats;
+ struct reftable_writer *writer;
+ struct reftable_reader *reader;
+ int err;
+
+ writer = reftable_new_writer(&strbuf_add_void, &noop_flush, &writer_buf, &opts);
+ reftable_writer_set_limits(writer, 1, 1);
+ for (size_t i = 0; i < 200; i++) {
+ struct reftable_ref_record ref = {
+ .update_index = 1,
+ .value_type = REFTABLE_REF_VAL1,
+ .value.val1 = {i},
+ };
+
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "refs/heads/%03" PRIuMAX, (uintmax_t)i);
+ ref.refname = buf.buf,
+
+ err = reftable_writer_add_ref(writer, &ref);
+ EXPECT_ERR(err);
+ }
+ reftable_writer_close(writer);
+
+ /*
+ * The written refs should be sufficiently large to result in a
+ * multi-level index.
+ */
+ stats = reftable_writer_stats(writer);
+ EXPECT(stats->ref_stats.max_index_level == 2);
+
+ block_source_from_strbuf(&source, &writer_buf);
+ err = reftable_new_reader(&reader, &source, "filename");
+ EXPECT_ERR(err);
+
+ /*
+ * Seeking the last ref should work as expected.
+ */
+ err = reftable_reader_seek_ref(reader, &it, "refs/heads/199");
+ EXPECT_ERR(err);
+
+ reftable_iterator_destroy(&it);
+ reftable_writer_free(writer);
+ reftable_reader_free(reader);
+ strbuf_release(&writer_buf);
+ strbuf_release(&buf);
+}
+
+static void test_corrupt_table_empty(void)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct reftable_block_source source = { NULL };
+ struct reftable_reader rd = { NULL };
+ int err;
+
+ block_source_from_strbuf(&source, &buf);
+ err = init_reader(&rd, &source, "file.log");
+ EXPECT(err == REFTABLE_FORMAT_ERROR);
+}
+
+static void test_corrupt_table(void)
+{
+ uint8_t zeros[1024] = { 0 };
+ struct strbuf buf = STRBUF_INIT;
+ struct reftable_block_source source = { NULL };
+ struct reftable_reader rd = { NULL };
+ int err;
+ strbuf_add(&buf, zeros, sizeof(zeros));
+
+ block_source_from_strbuf(&source, &buf);
+ err = init_reader(&rd, &source, "file.log");
+ EXPECT(err == REFTABLE_FORMAT_ERROR);
+ strbuf_release(&buf);
+}
+
+int readwrite_test_main(int argc, const char *argv[])
+{
+ RUN_TEST(test_log_zlib_corruption);
+ RUN_TEST(test_corrupt_table);
+ RUN_TEST(test_corrupt_table_empty);
+ RUN_TEST(test_log_write_read);
+ RUN_TEST(test_write_key_order);
+ RUN_TEST(test_table_read_write_seek_linear_sha256);
+ RUN_TEST(test_log_buffer_size);
+ RUN_TEST(test_table_write_small_table);
+ RUN_TEST(test_buffer);
+ RUN_TEST(test_table_read_api);
+ RUN_TEST(test_table_read_write_sequential);
+ RUN_TEST(test_table_read_write_seek_linear);
+ RUN_TEST(test_table_read_write_seek_index);
+ RUN_TEST(test_table_refs_for_no_index);
+ RUN_TEST(test_table_refs_for_obj_index);
+ RUN_TEST(test_write_empty_key);
+ RUN_TEST(test_write_empty_table);
+ RUN_TEST(test_log_overflow);
+ RUN_TEST(test_write_object_id_length);
+ RUN_TEST(test_write_object_id_min_length);
+ RUN_TEST(test_write_multiple_indices);
+ RUN_TEST(test_write_multi_level_index);
+ return 0;
+}
diff --git a/reftable/record.c b/reftable/record.c
new file mode 100644
index 0000000..5506f3e
--- /dev/null
+++ b/reftable/record.c
@@ -0,0 +1,1342 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+/* record.c - methods for different types of records. */
+
+#include "record.h"
+
+#include "system.h"
+#include "constants.h"
+#include "reftable-error.h"
+#include "basics.h"
+
+static struct reftable_record_vtable *
+reftable_record_vtable(struct reftable_record *rec);
+static void *reftable_record_data(struct reftable_record *rec);
+
+int get_var_int(uint64_t *dest, struct string_view *in)
+{
+ int ptr = 0;
+ uint64_t val;
+
+ if (in->len == 0)
+ return -1;
+ val = in->buf[ptr] & 0x7f;
+
+ while (in->buf[ptr] & 0x80) {
+ ptr++;
+ if (ptr > in->len) {
+ return -1;
+ }
+ val = (val + 1) << 7 | (uint64_t)(in->buf[ptr] & 0x7f);
+ }
+
+ *dest = val;
+ return ptr + 1;
+}
+
+int put_var_int(struct string_view *dest, uint64_t val)
+{
+ uint8_t buf[10] = { 0 };
+ int i = 9;
+ int n = 0;
+ buf[i] = (uint8_t)(val & 0x7f);
+ i--;
+ while (1) {
+ val >>= 7;
+ if (!val) {
+ break;
+ }
+ val--;
+ buf[i] = 0x80 | (uint8_t)(val & 0x7f);
+ i--;
+ }
+
+ n = sizeof(buf) - i - 1;
+ if (dest->len < n)
+ return -1;
+ memcpy(dest->buf, &buf[i + 1], n);
+ return n;
+}
+
+int reftable_is_block_type(uint8_t typ)
+{
+ switch (typ) {
+ case BLOCK_TYPE_REF:
+ case BLOCK_TYPE_LOG:
+ case BLOCK_TYPE_OBJ:
+ case BLOCK_TYPE_INDEX:
+ return 1;
+ }
+ return 0;
+}
+
+const unsigned char *reftable_ref_record_val1(const struct reftable_ref_record *rec)
+{
+ switch (rec->value_type) {
+ case REFTABLE_REF_VAL1:
+ return rec->value.val1;
+ case REFTABLE_REF_VAL2:
+ return rec->value.val2.value;
+ default:
+ return NULL;
+ }
+}
+
+const unsigned char *reftable_ref_record_val2(const struct reftable_ref_record *rec)
+{
+ switch (rec->value_type) {
+ case REFTABLE_REF_VAL2:
+ return rec->value.val2.target_value;
+ default:
+ return NULL;
+ }
+}
+
+static int decode_string(struct strbuf *dest, struct string_view in)
+{
+ int start_len = in.len;
+ uint64_t tsize = 0;
+ int n = get_var_int(&tsize, &in);
+ if (n <= 0)
+ return -1;
+ string_view_consume(&in, n);
+ if (in.len < tsize)
+ return -1;
+
+ strbuf_reset(dest);
+ strbuf_add(dest, in.buf, tsize);
+ string_view_consume(&in, tsize);
+
+ return start_len - in.len;
+}
+
+static int encode_string(char *str, struct string_view s)
+{
+ struct string_view start = s;
+ int l = strlen(str);
+ int n = put_var_int(&s, l);
+ if (n < 0)
+ return -1;
+ string_view_consume(&s, n);
+ if (s.len < l)
+ return -1;
+ memcpy(s.buf, str, l);
+ string_view_consume(&s, l);
+
+ return start.len - s.len;
+}
+
+int reftable_encode_key(int *restart, struct string_view dest,
+ struct strbuf prev_key, struct strbuf key,
+ uint8_t extra)
+{
+ struct string_view start = dest;
+ int prefix_len = common_prefix_size(&prev_key, &key);
+ uint64_t suffix_len = key.len - prefix_len;
+ int n = put_var_int(&dest, (uint64_t)prefix_len);
+ if (n < 0)
+ return -1;
+ string_view_consume(&dest, n);
+
+ *restart = (prefix_len == 0);
+
+ n = put_var_int(&dest, suffix_len << 3 | (uint64_t)extra);
+ if (n < 0)
+ return -1;
+ string_view_consume(&dest, n);
+
+ if (dest.len < suffix_len)
+ return -1;
+ memcpy(dest.buf, key.buf + prefix_len, suffix_len);
+ string_view_consume(&dest, suffix_len);
+
+ return start.len - dest.len;
+}
+
+int reftable_decode_keylen(struct string_view in,
+ uint64_t *prefix_len,
+ uint64_t *suffix_len,
+ uint8_t *extra)
+{
+ size_t start_len = in.len;
+ int n;
+
+ n = get_var_int(prefix_len, &in);
+ if (n < 0)
+ return -1;
+ string_view_consume(&in, n);
+
+ n = get_var_int(suffix_len, &in);
+ if (n <= 0)
+ return -1;
+ string_view_consume(&in, n);
+
+ *extra = (uint8_t)(*suffix_len & 0x7);
+ *suffix_len >>= 3;
+
+ return start_len - in.len;
+}
+
+int reftable_decode_key(struct strbuf *last_key, uint8_t *extra,
+ struct string_view in)
+{
+ int start_len = in.len;
+ uint64_t prefix_len = 0;
+ uint64_t suffix_len = 0;
+ int n;
+
+ n = reftable_decode_keylen(in, &prefix_len, &suffix_len, extra);
+ if (n < 0)
+ return -1;
+ string_view_consume(&in, n);
+
+ if (in.len < suffix_len ||
+ prefix_len > last_key->len)
+ return -1;
+
+ strbuf_setlen(last_key, prefix_len);
+ strbuf_add(last_key, in.buf, suffix_len);
+ string_view_consume(&in, suffix_len);
+
+ return start_len - in.len;
+}
+
+static void reftable_ref_record_key(const void *r, struct strbuf *dest)
+{
+ const struct reftable_ref_record *rec =
+ (const struct reftable_ref_record *)r;
+ strbuf_reset(dest);
+ strbuf_addstr(dest, rec->refname);
+}
+
+static void reftable_ref_record_copy_from(void *rec, const void *src_rec,
+ int hash_size)
+{
+ struct reftable_ref_record *ref = rec;
+ const struct reftable_ref_record *src = src_rec;
+ char *refname = NULL;
+ size_t refname_cap = 0;
+
+ assert(hash_size > 0);
+
+ SWAP(refname, ref->refname);
+ SWAP(refname_cap, ref->refname_cap);
+ reftable_ref_record_release(ref);
+ SWAP(ref->refname, refname);
+ SWAP(ref->refname_cap, refname_cap);
+
+ if (src->refname) {
+ size_t refname_len = strlen(src->refname);
+
+ REFTABLE_ALLOC_GROW(ref->refname, refname_len + 1,
+ ref->refname_cap);
+ memcpy(ref->refname, src->refname, refname_len);
+ ref->refname[refname_len] = 0;
+ }
+
+ ref->update_index = src->update_index;
+ ref->value_type = src->value_type;
+ switch (src->value_type) {
+ case REFTABLE_REF_DELETION:
+ break;
+ case REFTABLE_REF_VAL1:
+ memcpy(ref->value.val1, src->value.val1, hash_size);
+ break;
+ case REFTABLE_REF_VAL2:
+ memcpy(ref->value.val2.value, src->value.val2.value, hash_size);
+ memcpy(ref->value.val2.target_value,
+ src->value.val2.target_value, hash_size);
+ break;
+ case REFTABLE_REF_SYMREF:
+ ref->value.symref = xstrdup(src->value.symref);
+ break;
+ }
+}
+
+static char hexdigit(int c)
+{
+ if (c <= 9)
+ return '0' + c;
+ return 'a' + (c - 10);
+}
+
+static void hex_format(char *dest, const unsigned char *src, int hash_size)
+{
+ assert(hash_size > 0);
+ if (src) {
+ int i = 0;
+ for (i = 0; i < hash_size; i++) {
+ dest[2 * i] = hexdigit(src[i] >> 4);
+ dest[2 * i + 1] = hexdigit(src[i] & 0xf);
+ }
+ dest[2 * hash_size] = 0;
+ }
+}
+
+static void reftable_ref_record_print_sz(const struct reftable_ref_record *ref,
+ int hash_size)
+{
+ char hex[GIT_MAX_HEXSZ + 1] = { 0 }; /* BUG */
+ printf("ref{%s(%" PRIu64 ") ", ref->refname, ref->update_index);
+ switch (ref->value_type) {
+ case REFTABLE_REF_SYMREF:
+ printf("=> %s", ref->value.symref);
+ break;
+ case REFTABLE_REF_VAL2:
+ hex_format(hex, ref->value.val2.value, hash_size);
+ printf("val 2 %s", hex);
+ hex_format(hex, ref->value.val2.target_value,
+ hash_size);
+ printf("(T %s)", hex);
+ break;
+ case REFTABLE_REF_VAL1:
+ hex_format(hex, ref->value.val1, hash_size);
+ printf("val 1 %s", hex);
+ break;
+ case REFTABLE_REF_DELETION:
+ printf("delete");
+ break;
+ }
+ printf("}\n");
+}
+
+void reftable_ref_record_print(const struct reftable_ref_record *ref,
+ uint32_t hash_id) {
+ reftable_ref_record_print_sz(ref, hash_size(hash_id));
+}
+
+static void reftable_ref_record_release_void(void *rec)
+{
+ reftable_ref_record_release(rec);
+}
+
+void reftable_ref_record_release(struct reftable_ref_record *ref)
+{
+ switch (ref->value_type) {
+ case REFTABLE_REF_SYMREF:
+ reftable_free(ref->value.symref);
+ break;
+ case REFTABLE_REF_VAL2:
+ break;
+ case REFTABLE_REF_VAL1:
+ break;
+ case REFTABLE_REF_DELETION:
+ break;
+ default:
+ abort();
+ }
+
+ reftable_free(ref->refname);
+ memset(ref, 0, sizeof(struct reftable_ref_record));
+}
+
+static uint8_t reftable_ref_record_val_type(const void *rec)
+{
+ const struct reftable_ref_record *r =
+ (const struct reftable_ref_record *)rec;
+ return r->value_type;
+}
+
+static int reftable_ref_record_encode(const void *rec, struct string_view s,
+ int hash_size)
+{
+ const struct reftable_ref_record *r =
+ (const struct reftable_ref_record *)rec;
+ struct string_view start = s;
+ int n = put_var_int(&s, r->update_index);
+ assert(hash_size > 0);
+ if (n < 0)
+ return -1;
+ string_view_consume(&s, n);
+
+ switch (r->value_type) {
+ case REFTABLE_REF_SYMREF:
+ n = encode_string(r->value.symref, s);
+ if (n < 0) {
+ return -1;
+ }
+ string_view_consume(&s, n);
+ break;
+ case REFTABLE_REF_VAL2:
+ if (s.len < 2 * hash_size) {
+ return -1;
+ }
+ memcpy(s.buf, r->value.val2.value, hash_size);
+ string_view_consume(&s, hash_size);
+ memcpy(s.buf, r->value.val2.target_value, hash_size);
+ string_view_consume(&s, hash_size);
+ break;
+ case REFTABLE_REF_VAL1:
+ if (s.len < hash_size) {
+ return -1;
+ }
+ memcpy(s.buf, r->value.val1, hash_size);
+ string_view_consume(&s, hash_size);
+ break;
+ case REFTABLE_REF_DELETION:
+ break;
+ default:
+ abort();
+ }
+
+ return start.len - s.len;
+}
+
+static int reftable_ref_record_decode(void *rec, struct strbuf key,
+ uint8_t val_type, struct string_view in,
+ int hash_size, struct strbuf *scratch)
+{
+ struct reftable_ref_record *r = rec;
+ struct string_view start = in;
+ uint64_t update_index = 0;
+ const char *refname = NULL;
+ size_t refname_cap = 0;
+ int n;
+
+ assert(hash_size > 0);
+
+ n = get_var_int(&update_index, &in);
+ if (n < 0)
+ return n;
+ string_view_consume(&in, n);
+
+ SWAP(refname, r->refname);
+ SWAP(refname_cap, r->refname_cap);
+ reftable_ref_record_release(r);
+ SWAP(r->refname, refname);
+ SWAP(r->refname_cap, refname_cap);
+
+ REFTABLE_ALLOC_GROW(r->refname, key.len + 1, r->refname_cap);
+ memcpy(r->refname, key.buf, key.len);
+ r->refname[key.len] = 0;
+
+ r->update_index = update_index;
+ r->value_type = val_type;
+ switch (val_type) {
+ case REFTABLE_REF_VAL1:
+ if (in.len < hash_size) {
+ return -1;
+ }
+
+ memcpy(r->value.val1, in.buf, hash_size);
+ string_view_consume(&in, hash_size);
+ break;
+
+ case REFTABLE_REF_VAL2:
+ if (in.len < 2 * hash_size) {
+ return -1;
+ }
+
+ memcpy(r->value.val2.value, in.buf, hash_size);
+ string_view_consume(&in, hash_size);
+
+ memcpy(r->value.val2.target_value, in.buf, hash_size);
+ string_view_consume(&in, hash_size);
+ break;
+
+ case REFTABLE_REF_SYMREF: {
+ int n = decode_string(scratch, in);
+ if (n < 0) {
+ return -1;
+ }
+ string_view_consume(&in, n);
+ r->value.symref = strbuf_detach(scratch, NULL);
+ } break;
+
+ case REFTABLE_REF_DELETION:
+ break;
+ default:
+ abort();
+ break;
+ }
+
+ return start.len - in.len;
+}
+
+static int reftable_ref_record_is_deletion_void(const void *p)
+{
+ return reftable_ref_record_is_deletion(
+ (const struct reftable_ref_record *)p);
+}
+
+static int reftable_ref_record_equal_void(const void *a,
+ const void *b, int hash_size)
+{
+ struct reftable_ref_record *ra = (struct reftable_ref_record *) a;
+ struct reftable_ref_record *rb = (struct reftable_ref_record *) b;
+ return reftable_ref_record_equal(ra, rb, hash_size);
+}
+
+static int reftable_ref_record_cmp_void(const void *_a, const void *_b)
+{
+ const struct reftable_ref_record *a = _a;
+ const struct reftable_ref_record *b = _b;
+ return strcmp(a->refname, b->refname);
+}
+
+static void reftable_ref_record_print_void(const void *rec,
+ int hash_size)
+{
+ reftable_ref_record_print_sz((struct reftable_ref_record *) rec, hash_size);
+}
+
+static struct reftable_record_vtable reftable_ref_record_vtable = {
+ .key = &reftable_ref_record_key,
+ .type = BLOCK_TYPE_REF,
+ .copy_from = &reftable_ref_record_copy_from,
+ .val_type = &reftable_ref_record_val_type,
+ .encode = &reftable_ref_record_encode,
+ .decode = &reftable_ref_record_decode,
+ .release = &reftable_ref_record_release_void,
+ .is_deletion = &reftable_ref_record_is_deletion_void,
+ .equal = &reftable_ref_record_equal_void,
+ .cmp = &reftable_ref_record_cmp_void,
+ .print = &reftable_ref_record_print_void,
+};
+
+static void reftable_obj_record_key(const void *r, struct strbuf *dest)
+{
+ const struct reftable_obj_record *rec =
+ (const struct reftable_obj_record *)r;
+ strbuf_reset(dest);
+ strbuf_add(dest, rec->hash_prefix, rec->hash_prefix_len);
+}
+
+static void reftable_obj_record_release(void *rec)
+{
+ struct reftable_obj_record *obj = rec;
+ FREE_AND_NULL(obj->hash_prefix);
+ FREE_AND_NULL(obj->offsets);
+ memset(obj, 0, sizeof(struct reftable_obj_record));
+}
+
+static void reftable_obj_record_print(const void *rec, int hash_size)
+{
+ const struct reftable_obj_record *obj = rec;
+ char hex[GIT_MAX_HEXSZ + 1] = { 0 };
+ struct strbuf offset_str = STRBUF_INIT;
+ int i;
+
+ for (i = 0; i < obj->offset_len; i++)
+ strbuf_addf(&offset_str, "%" PRIu64 " ", obj->offsets[i]);
+ hex_format(hex, obj->hash_prefix, obj->hash_prefix_len);
+ printf("prefix %s (len %d), offsets [%s]\n",
+ hex, obj->hash_prefix_len, offset_str.buf);
+ strbuf_release(&offset_str);
+}
+
+static void reftable_obj_record_copy_from(void *rec, const void *src_rec,
+ int hash_size)
+{
+ struct reftable_obj_record *obj = rec;
+ const struct reftable_obj_record *src =
+ (const struct reftable_obj_record *)src_rec;
+
+ reftable_obj_record_release(obj);
+
+ REFTABLE_ALLOC_ARRAY(obj->hash_prefix, src->hash_prefix_len);
+ obj->hash_prefix_len = src->hash_prefix_len;
+ if (src->hash_prefix_len)
+ memcpy(obj->hash_prefix, src->hash_prefix, obj->hash_prefix_len);
+
+ REFTABLE_ALLOC_ARRAY(obj->offsets, src->offset_len);
+ obj->offset_len = src->offset_len;
+ COPY_ARRAY(obj->offsets, src->offsets, src->offset_len);
+}
+
+static uint8_t reftable_obj_record_val_type(const void *rec)
+{
+ const struct reftable_obj_record *r = rec;
+ if (r->offset_len > 0 && r->offset_len < 8)
+ return r->offset_len;
+ return 0;
+}
+
+static int reftable_obj_record_encode(const void *rec, struct string_view s,
+ int hash_size)
+{
+ const struct reftable_obj_record *r = rec;
+ struct string_view start = s;
+ int i = 0;
+ int n = 0;
+ uint64_t last = 0;
+ if (r->offset_len == 0 || r->offset_len >= 8) {
+ n = put_var_int(&s, r->offset_len);
+ if (n < 0) {
+ return -1;
+ }
+ string_view_consume(&s, n);
+ }
+ if (r->offset_len == 0)
+ return start.len - s.len;
+ n = put_var_int(&s, r->offsets[0]);
+ if (n < 0)
+ return -1;
+ string_view_consume(&s, n);
+
+ last = r->offsets[0];
+ for (i = 1; i < r->offset_len; i++) {
+ int n = put_var_int(&s, r->offsets[i] - last);
+ if (n < 0) {
+ return -1;
+ }
+ string_view_consume(&s, n);
+ last = r->offsets[i];
+ }
+ return start.len - s.len;
+}
+
+static int reftable_obj_record_decode(void *rec, struct strbuf key,
+ uint8_t val_type, struct string_view in,
+ int hash_size, struct strbuf *scratch UNUSED)
+{
+ struct string_view start = in;
+ struct reftable_obj_record *r = rec;
+ uint64_t count = val_type;
+ int n = 0;
+ uint64_t last;
+ int j;
+
+ reftable_obj_record_release(r);
+
+ REFTABLE_ALLOC_ARRAY(r->hash_prefix, key.len);
+ memcpy(r->hash_prefix, key.buf, key.len);
+ r->hash_prefix_len = key.len;
+
+ if (val_type == 0) {
+ n = get_var_int(&count, &in);
+ if (n < 0) {
+ return n;
+ }
+
+ string_view_consume(&in, n);
+ }
+
+ r->offsets = NULL;
+ r->offset_len = 0;
+ if (count == 0)
+ return start.len - in.len;
+
+ REFTABLE_ALLOC_ARRAY(r->offsets, count);
+ r->offset_len = count;
+
+ n = get_var_int(&r->offsets[0], &in);
+ if (n < 0)
+ return n;
+ string_view_consume(&in, n);
+
+ last = r->offsets[0];
+ j = 1;
+ while (j < count) {
+ uint64_t delta = 0;
+ int n = get_var_int(&delta, &in);
+ if (n < 0) {
+ return n;
+ }
+ string_view_consume(&in, n);
+
+ last = r->offsets[j] = (delta + last);
+ j++;
+ }
+ return start.len - in.len;
+}
+
+static int not_a_deletion(const void *p)
+{
+ return 0;
+}
+
+static int reftable_obj_record_equal_void(const void *a, const void *b, int hash_size)
+{
+ struct reftable_obj_record *ra = (struct reftable_obj_record *) a;
+ struct reftable_obj_record *rb = (struct reftable_obj_record *) b;
+
+ if (ra->hash_prefix_len != rb->hash_prefix_len
+ || ra->offset_len != rb->offset_len)
+ return 0;
+
+ if (ra->hash_prefix_len &&
+ memcmp(ra->hash_prefix, rb->hash_prefix, ra->hash_prefix_len))
+ return 0;
+ if (ra->offset_len &&
+ memcmp(ra->offsets, rb->offsets, ra->offset_len * sizeof(uint64_t)))
+ return 0;
+
+ return 1;
+}
+
+static int reftable_obj_record_cmp_void(const void *_a, const void *_b)
+{
+ const struct reftable_obj_record *a = _a;
+ const struct reftable_obj_record *b = _b;
+ int cmp;
+
+ cmp = memcmp(a->hash_prefix, b->hash_prefix,
+ a->hash_prefix_len > b->hash_prefix_len ?
+ a->hash_prefix_len : b->hash_prefix_len);
+ if (cmp)
+ return cmp;
+
+ /*
+ * When the prefix is the same then the object record that is longer is
+ * considered to be bigger.
+ */
+ return a->hash_prefix_len - b->hash_prefix_len;
+}
+
+static struct reftable_record_vtable reftable_obj_record_vtable = {
+ .key = &reftable_obj_record_key,
+ .type = BLOCK_TYPE_OBJ,
+ .copy_from = &reftable_obj_record_copy_from,
+ .val_type = &reftable_obj_record_val_type,
+ .encode = &reftable_obj_record_encode,
+ .decode = &reftable_obj_record_decode,
+ .release = &reftable_obj_record_release,
+ .is_deletion = &not_a_deletion,
+ .equal = &reftable_obj_record_equal_void,
+ .cmp = &reftable_obj_record_cmp_void,
+ .print = &reftable_obj_record_print,
+};
+
+static void reftable_log_record_print_sz(struct reftable_log_record *log,
+ int hash_size)
+{
+ char hex[GIT_MAX_HEXSZ + 1] = { 0 };
+
+ switch (log->value_type) {
+ case REFTABLE_LOG_DELETION:
+ printf("log{%s(%" PRIu64 ") delete\n", log->refname,
+ log->update_index);
+ break;
+ case REFTABLE_LOG_UPDATE:
+ printf("log{%s(%" PRIu64 ") %s <%s> %" PRIu64 " %04d\n",
+ log->refname, log->update_index,
+ log->value.update.name ? log->value.update.name : "",
+ log->value.update.email ? log->value.update.email : "",
+ log->value.update.time,
+ log->value.update.tz_offset);
+ hex_format(hex, log->value.update.old_hash, hash_size);
+ printf("%s => ", hex);
+ hex_format(hex, log->value.update.new_hash, hash_size);
+ printf("%s\n\n%s\n}\n", hex,
+ log->value.update.message ? log->value.update.message : "");
+ break;
+ }
+}
+
+void reftable_log_record_print(struct reftable_log_record *log,
+ uint32_t hash_id)
+{
+ reftable_log_record_print_sz(log, hash_size(hash_id));
+}
+
+static void reftable_log_record_key(const void *r, struct strbuf *dest)
+{
+ const struct reftable_log_record *rec =
+ (const struct reftable_log_record *)r;
+ int len = strlen(rec->refname);
+ uint8_t i64[8];
+ uint64_t ts = 0;
+ strbuf_reset(dest);
+ strbuf_add(dest, (uint8_t *)rec->refname, len + 1);
+
+ ts = (~ts) - rec->update_index;
+ put_be64(&i64[0], ts);
+ strbuf_add(dest, i64, sizeof(i64));
+}
+
+static void reftable_log_record_copy_from(void *rec, const void *src_rec,
+ int hash_size)
+{
+ struct reftable_log_record *dst = rec;
+ const struct reftable_log_record *src =
+ (const struct reftable_log_record *)src_rec;
+
+ reftable_log_record_release(dst);
+ *dst = *src;
+ if (dst->refname) {
+ dst->refname = xstrdup(dst->refname);
+ }
+ switch (dst->value_type) {
+ case REFTABLE_LOG_DELETION:
+ break;
+ case REFTABLE_LOG_UPDATE:
+ if (dst->value.update.email) {
+ dst->value.update.email =
+ xstrdup(dst->value.update.email);
+ }
+ if (dst->value.update.name) {
+ dst->value.update.name =
+ xstrdup(dst->value.update.name);
+ }
+ if (dst->value.update.message) {
+ dst->value.update.message =
+ xstrdup(dst->value.update.message);
+ }
+
+ memcpy(dst->value.update.new_hash,
+ src->value.update.new_hash, hash_size);
+ memcpy(dst->value.update.old_hash,
+ src->value.update.old_hash, hash_size);
+ break;
+ }
+}
+
+static void reftable_log_record_release_void(void *rec)
+{
+ struct reftable_log_record *r = rec;
+ reftable_log_record_release(r);
+}
+
+void reftable_log_record_release(struct reftable_log_record *r)
+{
+ reftable_free(r->refname);
+ switch (r->value_type) {
+ case REFTABLE_LOG_DELETION:
+ break;
+ case REFTABLE_LOG_UPDATE:
+ reftable_free(r->value.update.name);
+ reftable_free(r->value.update.email);
+ reftable_free(r->value.update.message);
+ break;
+ }
+ memset(r, 0, sizeof(struct reftable_log_record));
+}
+
+static uint8_t reftable_log_record_val_type(const void *rec)
+{
+ const struct reftable_log_record *log =
+ (const struct reftable_log_record *)rec;
+
+ return reftable_log_record_is_deletion(log) ? 0 : 1;
+}
+
+static int reftable_log_record_encode(const void *rec, struct string_view s,
+ int hash_size)
+{
+ const struct reftable_log_record *r = rec;
+ struct string_view start = s;
+ int n = 0;
+ if (reftable_log_record_is_deletion(r))
+ return 0;
+
+ if (s.len < 2 * hash_size)
+ return -1;
+
+ memcpy(s.buf, r->value.update.old_hash, hash_size);
+ memcpy(s.buf + hash_size, r->value.update.new_hash, hash_size);
+ string_view_consume(&s, 2 * hash_size);
+
+ n = encode_string(r->value.update.name ? r->value.update.name : "", s);
+ if (n < 0)
+ return -1;
+ string_view_consume(&s, n);
+
+ n = encode_string(r->value.update.email ? r->value.update.email : "",
+ s);
+ if (n < 0)
+ return -1;
+ string_view_consume(&s, n);
+
+ n = put_var_int(&s, r->value.update.time);
+ if (n < 0)
+ return -1;
+ string_view_consume(&s, n);
+
+ if (s.len < 2)
+ return -1;
+
+ put_be16(s.buf, r->value.update.tz_offset);
+ string_view_consume(&s, 2);
+
+ n = encode_string(
+ r->value.update.message ? r->value.update.message : "", s);
+ if (n < 0)
+ return -1;
+ string_view_consume(&s, n);
+
+ return start.len - s.len;
+}
+
+static int reftable_log_record_decode(void *rec, struct strbuf key,
+ uint8_t val_type, struct string_view in,
+ int hash_size, struct strbuf *scratch)
+{
+ struct string_view start = in;
+ struct reftable_log_record *r = rec;
+ uint64_t max = 0;
+ uint64_t ts = 0;
+ int n;
+
+ if (key.len <= 9 || key.buf[key.len - 9] != 0)
+ return REFTABLE_FORMAT_ERROR;
+
+ REFTABLE_ALLOC_GROW(r->refname, key.len - 8, r->refname_cap);
+ memcpy(r->refname, key.buf, key.len - 8);
+ ts = get_be64(key.buf + key.len - 8);
+
+ r->update_index = (~max) - ts;
+
+ if (val_type != r->value_type) {
+ switch (r->value_type) {
+ case REFTABLE_LOG_UPDATE:
+ FREE_AND_NULL(r->value.update.message);
+ r->value.update.message_cap = 0;
+ FREE_AND_NULL(r->value.update.email);
+ FREE_AND_NULL(r->value.update.name);
+ break;
+ case REFTABLE_LOG_DELETION:
+ break;
+ }
+ }
+
+ r->value_type = val_type;
+ if (val_type == REFTABLE_LOG_DELETION)
+ return 0;
+
+ if (in.len < 2 * hash_size)
+ return REFTABLE_FORMAT_ERROR;
+
+ memcpy(r->value.update.old_hash, in.buf, hash_size);
+ memcpy(r->value.update.new_hash, in.buf + hash_size, hash_size);
+
+ string_view_consume(&in, 2 * hash_size);
+
+ n = decode_string(scratch, in);
+ if (n < 0)
+ goto done;
+ string_view_consume(&in, n);
+
+ /*
+ * In almost all cases we can expect the reflog name to not change for
+ * reflog entries as they are tied to the local identity, not to the
+ * target commits. As an optimization for this common case we can thus
+ * skip copying over the name in case it's accurate already.
+ */
+ if (!r->value.update.name ||
+ strcmp(r->value.update.name, scratch->buf)) {
+ r->value.update.name =
+ reftable_realloc(r->value.update.name, scratch->len + 1);
+ memcpy(r->value.update.name, scratch->buf, scratch->len);
+ r->value.update.name[scratch->len] = 0;
+ }
+
+ n = decode_string(scratch, in);
+ if (n < 0)
+ goto done;
+ string_view_consume(&in, n);
+
+ /* Same as above, but for the reflog email. */
+ if (!r->value.update.email ||
+ strcmp(r->value.update.email, scratch->buf)) {
+ r->value.update.email =
+ reftable_realloc(r->value.update.email, scratch->len + 1);
+ memcpy(r->value.update.email, scratch->buf, scratch->len);
+ r->value.update.email[scratch->len] = 0;
+ }
+
+ ts = 0;
+ n = get_var_int(&ts, &in);
+ if (n < 0)
+ goto done;
+ string_view_consume(&in, n);
+ r->value.update.time = ts;
+ if (in.len < 2)
+ goto done;
+
+ r->value.update.tz_offset = get_be16(in.buf);
+ string_view_consume(&in, 2);
+
+ n = decode_string(scratch, in);
+ if (n < 0)
+ goto done;
+ string_view_consume(&in, n);
+
+ REFTABLE_ALLOC_GROW(r->value.update.message, scratch->len + 1,
+ r->value.update.message_cap);
+ memcpy(r->value.update.message, scratch->buf, scratch->len);
+ r->value.update.message[scratch->len] = 0;
+
+ return start.len - in.len;
+
+done:
+ return REFTABLE_FORMAT_ERROR;
+}
+
+static int null_streq(char *a, char *b)
+{
+ char *empty = "";
+ if (!a)
+ a = empty;
+
+ if (!b)
+ b = empty;
+
+ return 0 == strcmp(a, b);
+}
+
+static int reftable_log_record_equal_void(const void *a,
+ const void *b, int hash_size)
+{
+ return reftable_log_record_equal((struct reftable_log_record *) a,
+ (struct reftable_log_record *) b,
+ hash_size);
+}
+
+static int reftable_log_record_cmp_void(const void *_a, const void *_b)
+{
+ const struct reftable_log_record *a = _a;
+ const struct reftable_log_record *b = _b;
+ int cmp = strcmp(a->refname, b->refname);
+ if (cmp)
+ return cmp;
+
+ /*
+ * Note that the comparison here is reversed. This is because the
+ * update index is reversed when comparing keys. For reference, see how
+ * we handle this in reftable_log_record_key()`.
+ */
+ return b->update_index - a->update_index;
+}
+
+int reftable_log_record_equal(const struct reftable_log_record *a,
+ const struct reftable_log_record *b, int hash_size)
+{
+ if (!(null_streq(a->refname, b->refname) &&
+ a->update_index == b->update_index &&
+ a->value_type == b->value_type))
+ return 0;
+
+ switch (a->value_type) {
+ case REFTABLE_LOG_DELETION:
+ return 1;
+ case REFTABLE_LOG_UPDATE:
+ return null_streq(a->value.update.name, b->value.update.name) &&
+ a->value.update.time == b->value.update.time &&
+ a->value.update.tz_offset == b->value.update.tz_offset &&
+ null_streq(a->value.update.email,
+ b->value.update.email) &&
+ null_streq(a->value.update.message,
+ b->value.update.message) &&
+ !memcmp(a->value.update.old_hash,
+ b->value.update.old_hash, hash_size) &&
+ !memcmp(a->value.update.new_hash,
+ b->value.update.new_hash, hash_size);
+ }
+
+ abort();
+}
+
+static int reftable_log_record_is_deletion_void(const void *p)
+{
+ return reftable_log_record_is_deletion(
+ (const struct reftable_log_record *)p);
+}
+
+static void reftable_log_record_print_void(const void *rec, int hash_size)
+{
+ reftable_log_record_print_sz((struct reftable_log_record*)rec, hash_size);
+}
+
+static struct reftable_record_vtable reftable_log_record_vtable = {
+ .key = &reftable_log_record_key,
+ .type = BLOCK_TYPE_LOG,
+ .copy_from = &reftable_log_record_copy_from,
+ .val_type = &reftable_log_record_val_type,
+ .encode = &reftable_log_record_encode,
+ .decode = &reftable_log_record_decode,
+ .release = &reftable_log_record_release_void,
+ .is_deletion = &reftable_log_record_is_deletion_void,
+ .equal = &reftable_log_record_equal_void,
+ .cmp = &reftable_log_record_cmp_void,
+ .print = &reftable_log_record_print_void,
+};
+
+static void reftable_index_record_key(const void *r, struct strbuf *dest)
+{
+ const struct reftable_index_record *rec = r;
+ strbuf_reset(dest);
+ strbuf_addbuf(dest, &rec->last_key);
+}
+
+static void reftable_index_record_copy_from(void *rec, const void *src_rec,
+ int hash_size)
+{
+ struct reftable_index_record *dst = rec;
+ const struct reftable_index_record *src = src_rec;
+
+ strbuf_reset(&dst->last_key);
+ strbuf_addbuf(&dst->last_key, &src->last_key);
+ dst->offset = src->offset;
+}
+
+static void reftable_index_record_release(void *rec)
+{
+ struct reftable_index_record *idx = rec;
+ strbuf_release(&idx->last_key);
+}
+
+static uint8_t reftable_index_record_val_type(const void *rec)
+{
+ return 0;
+}
+
+static int reftable_index_record_encode(const void *rec, struct string_view out,
+ int hash_size)
+{
+ const struct reftable_index_record *r =
+ (const struct reftable_index_record *)rec;
+ struct string_view start = out;
+
+ int n = put_var_int(&out, r->offset);
+ if (n < 0)
+ return n;
+
+ string_view_consume(&out, n);
+
+ return start.len - out.len;
+}
+
+static int reftable_index_record_decode(void *rec, struct strbuf key,
+ uint8_t val_type, struct string_view in,
+ int hash_size, struct strbuf *scratch UNUSED)
+{
+ struct string_view start = in;
+ struct reftable_index_record *r = rec;
+ int n = 0;
+
+ strbuf_reset(&r->last_key);
+ strbuf_addbuf(&r->last_key, &key);
+
+ n = get_var_int(&r->offset, &in);
+ if (n < 0)
+ return n;
+
+ string_view_consume(&in, n);
+ return start.len - in.len;
+}
+
+static int reftable_index_record_equal(const void *a, const void *b, int hash_size)
+{
+ struct reftable_index_record *ia = (struct reftable_index_record *) a;
+ struct reftable_index_record *ib = (struct reftable_index_record *) b;
+
+ return ia->offset == ib->offset && !strbuf_cmp(&ia->last_key, &ib->last_key);
+}
+
+static int reftable_index_record_cmp(const void *_a, const void *_b)
+{
+ const struct reftable_index_record *a = _a;
+ const struct reftable_index_record *b = _b;
+ return strbuf_cmp(&a->last_key, &b->last_key);
+}
+
+static void reftable_index_record_print(const void *rec, int hash_size)
+{
+ const struct reftable_index_record *idx = rec;
+ /* TODO: escape null chars? */
+ printf("\"%s\" %" PRIu64 "\n", idx->last_key.buf, idx->offset);
+}
+
+static struct reftable_record_vtable reftable_index_record_vtable = {
+ .key = &reftable_index_record_key,
+ .type = BLOCK_TYPE_INDEX,
+ .copy_from = &reftable_index_record_copy_from,
+ .val_type = &reftable_index_record_val_type,
+ .encode = &reftable_index_record_encode,
+ .decode = &reftable_index_record_decode,
+ .release = &reftable_index_record_release,
+ .is_deletion = &not_a_deletion,
+ .equal = &reftable_index_record_equal,
+ .cmp = &reftable_index_record_cmp,
+ .print = &reftable_index_record_print,
+};
+
+void reftable_record_key(struct reftable_record *rec, struct strbuf *dest)
+{
+ reftable_record_vtable(rec)->key(reftable_record_data(rec), dest);
+}
+
+int reftable_record_encode(struct reftable_record *rec, struct string_view dest,
+ int hash_size)
+{
+ return reftable_record_vtable(rec)->encode(reftable_record_data(rec),
+ dest, hash_size);
+}
+
+void reftable_record_copy_from(struct reftable_record *rec,
+ struct reftable_record *src, int hash_size)
+{
+ assert(src->type == rec->type);
+
+ reftable_record_vtable(rec)->copy_from(reftable_record_data(rec),
+ reftable_record_data(src),
+ hash_size);
+}
+
+uint8_t reftable_record_val_type(struct reftable_record *rec)
+{
+ return reftable_record_vtable(rec)->val_type(reftable_record_data(rec));
+}
+
+int reftable_record_decode(struct reftable_record *rec, struct strbuf key,
+ uint8_t extra, struct string_view src, int hash_size,
+ struct strbuf *scratch)
+{
+ return reftable_record_vtable(rec)->decode(reftable_record_data(rec),
+ key, extra, src, hash_size,
+ scratch);
+}
+
+void reftable_record_release(struct reftable_record *rec)
+{
+ reftable_record_vtable(rec)->release(reftable_record_data(rec));
+}
+
+int reftable_record_is_deletion(struct reftable_record *rec)
+{
+ return reftable_record_vtable(rec)->is_deletion(
+ reftable_record_data(rec));
+}
+
+int reftable_record_cmp(struct reftable_record *a, struct reftable_record *b)
+{
+ if (a->type != b->type)
+ BUG("cannot compare reftable records of different type");
+ return reftable_record_vtable(a)->cmp(
+ reftable_record_data(a), reftable_record_data(b));
+}
+
+int reftable_record_equal(struct reftable_record *a, struct reftable_record *b, int hash_size)
+{
+ if (a->type != b->type)
+ return 0;
+ return reftable_record_vtable(a)->equal(
+ reftable_record_data(a), reftable_record_data(b), hash_size);
+}
+
+static int hash_equal(const unsigned char *a, const unsigned char *b, int hash_size)
+{
+ if (a && b)
+ return !memcmp(a, b, hash_size);
+
+ return a == b;
+}
+
+int reftable_ref_record_equal(const struct reftable_ref_record *a,
+ const struct reftable_ref_record *b, int hash_size)
+{
+ assert(hash_size > 0);
+ if (!null_streq(a->refname, b->refname))
+ return 0;
+
+ if (a->update_index != b->update_index ||
+ a->value_type != b->value_type)
+ return 0;
+
+ switch (a->value_type) {
+ case REFTABLE_REF_SYMREF:
+ return !strcmp(a->value.symref, b->value.symref);
+ case REFTABLE_REF_VAL2:
+ return hash_equal(a->value.val2.value, b->value.val2.value,
+ hash_size) &&
+ hash_equal(a->value.val2.target_value,
+ b->value.val2.target_value, hash_size);
+ case REFTABLE_REF_VAL1:
+ return hash_equal(a->value.val1, b->value.val1, hash_size);
+ case REFTABLE_REF_DELETION:
+ return 1;
+ default:
+ abort();
+ }
+}
+
+int reftable_ref_record_compare_name(const void *a, const void *b)
+{
+ return strcmp(((struct reftable_ref_record *)a)->refname,
+ ((struct reftable_ref_record *)b)->refname);
+}
+
+int reftable_ref_record_is_deletion(const struct reftable_ref_record *ref)
+{
+ return ref->value_type == REFTABLE_REF_DELETION;
+}
+
+int reftable_log_record_compare_key(const void *a, const void *b)
+{
+ const struct reftable_log_record *la = a;
+ const struct reftable_log_record *lb = b;
+
+ int cmp = strcmp(la->refname, lb->refname);
+ if (cmp)
+ return cmp;
+ if (la->update_index > lb->update_index)
+ return -1;
+ return (la->update_index < lb->update_index) ? 1 : 0;
+}
+
+int reftable_log_record_is_deletion(const struct reftable_log_record *log)
+{
+ return (log->value_type == REFTABLE_LOG_DELETION);
+}
+
+static void *reftable_record_data(struct reftable_record *rec)
+{
+ switch (rec->type) {
+ case BLOCK_TYPE_REF:
+ return &rec->u.ref;
+ case BLOCK_TYPE_LOG:
+ return &rec->u.log;
+ case BLOCK_TYPE_INDEX:
+ return &rec->u.idx;
+ case BLOCK_TYPE_OBJ:
+ return &rec->u.obj;
+ }
+ abort();
+}
+
+static struct reftable_record_vtable *
+reftable_record_vtable(struct reftable_record *rec)
+{
+ switch (rec->type) {
+ case BLOCK_TYPE_REF:
+ return &reftable_ref_record_vtable;
+ case BLOCK_TYPE_LOG:
+ return &reftable_log_record_vtable;
+ case BLOCK_TYPE_INDEX:
+ return &reftable_index_record_vtable;
+ case BLOCK_TYPE_OBJ:
+ return &reftable_obj_record_vtable;
+ }
+ abort();
+}
+
+void reftable_record_init(struct reftable_record *rec, uint8_t typ)
+{
+ memset(rec, 0, sizeof(*rec));
+ rec->type = typ;
+
+ switch (typ) {
+ case BLOCK_TYPE_REF:
+ case BLOCK_TYPE_LOG:
+ case BLOCK_TYPE_OBJ:
+ return;
+ case BLOCK_TYPE_INDEX:
+ strbuf_init(&rec->u.idx.last_key, 0);
+ return;
+ default:
+ BUG("unhandled record type");
+ }
+}
+
+void reftable_record_print(struct reftable_record *rec, int hash_size)
+{
+ printf("'%c': ", rec->type);
+ reftable_record_vtable(rec)->print(reftable_record_data(rec), hash_size);
+}
diff --git a/reftable/record.h b/reftable/record.h
new file mode 100644
index 0000000..d778133
--- /dev/null
+++ b/reftable/record.h
@@ -0,0 +1,165 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef RECORD_H
+#define RECORD_H
+
+#include "system.h"
+
+#include <stdint.h>
+
+#include "reftable-record.h"
+
+/*
+ * A substring of existing string data. This structure takes no responsibility
+ * for the lifetime of the data it points to.
+ */
+struct string_view {
+ uint8_t *buf;
+ size_t len;
+};
+
+/* Advance `s.buf` by `n`, and decrease length. */
+static inline void string_view_consume(struct string_view *s, int n)
+{
+ s->buf += n;
+ s->len -= n;
+}
+
+/* utilities for de/encoding varints */
+
+int get_var_int(uint64_t *dest, struct string_view *in);
+int put_var_int(struct string_view *dest, uint64_t val);
+
+/* Methods for records. */
+struct reftable_record_vtable {
+ /* encode the key of to a uint8_t strbuf. */
+ void (*key)(const void *rec, struct strbuf *dest);
+
+ /* The record type of ('r' for ref). */
+ uint8_t type;
+
+ void (*copy_from)(void *dest, const void *src, int hash_size);
+
+ /* a value of [0..7], indicating record subvariants (eg. ref vs. symref
+ * vs ref deletion) */
+ uint8_t (*val_type)(const void *rec);
+
+ /* encodes rec into dest, returning how much space was used. */
+ int (*encode)(const void *rec, struct string_view dest, int hash_size);
+
+ /* decode data from `src` into the record. */
+ int (*decode)(void *rec, struct strbuf key, uint8_t extra,
+ struct string_view src, int hash_size,
+ struct strbuf *scratch);
+
+ /* deallocate and null the record. */
+ void (*release)(void *rec);
+
+ /* is this a tombstone? */
+ int (*is_deletion)(const void *rec);
+
+ /* Are two records equal? This assumes they have the same type. Returns 0 for non-equal. */
+ int (*equal)(const void *a, const void *b, int hash_size);
+
+ /*
+ * Compare keys of two records with each other. The records must have
+ * the same type.
+ */
+ int (*cmp)(const void *a, const void *b);
+
+ /* Print on stdout, for debugging. */
+ void (*print)(const void *rec, int hash_size);
+};
+
+/* returns true for recognized block types. Block start with the block type. */
+int reftable_is_block_type(uint8_t typ);
+
+/* Encode `key` into `dest`. Sets `is_restart` to indicate a restart. Returns
+ * number of bytes written. */
+int reftable_encode_key(int *is_restart, struct string_view dest,
+ struct strbuf prev_key, struct strbuf key,
+ uint8_t extra);
+
+/* Decode a record's key lengths. */
+int reftable_decode_keylen(struct string_view in,
+ uint64_t *prefix_len,
+ uint64_t *suffix_len,
+ uint8_t *extra);
+
+/*
+ * Decode into `last_key` and `extra` from `in`. `last_key` is expected to
+ * contain the decoded key of the preceding record, if any.
+ */
+int reftable_decode_key(struct strbuf *last_key, uint8_t *extra,
+ struct string_view in);
+
+/* reftable_index_record are used internally to speed up lookups. */
+struct reftable_index_record {
+ uint64_t offset; /* Offset of block */
+ struct strbuf last_key; /* Last key of the block. */
+};
+
+/* reftable_obj_record stores an object ID => ref mapping. */
+struct reftable_obj_record {
+ uint8_t *hash_prefix; /* leading bytes of the object ID */
+ int hash_prefix_len; /* number of leading bytes. Constant
+ * across a single table. */
+ uint64_t *offsets; /* a vector of file offsets. */
+ int offset_len;
+};
+
+/* record is a generic wrapper for different types of records. It is normally
+ * created on the stack, or embedded within another struct. If the type is
+ * known, a fresh instance can be initialized explicitly. Otherwise, use
+ * `reftable_record_init()` to initialize generically (as the index_record is
+ * not valid as 0-initialized structure)
+ */
+struct reftable_record {
+ uint8_t type;
+ union {
+ struct reftable_ref_record ref;
+ struct reftable_log_record log;
+ struct reftable_obj_record obj;
+ struct reftable_index_record idx;
+ } u;
+};
+
+/* Initialize the reftable record for the given type */
+void reftable_record_init(struct reftable_record *rec, uint8_t typ);
+
+/* see struct record_vtable */
+int reftable_record_cmp(struct reftable_record *a, struct reftable_record *b);
+int reftable_record_equal(struct reftable_record *a, struct reftable_record *b, int hash_size);
+void reftable_record_print(struct reftable_record *rec, int hash_size);
+void reftable_record_key(struct reftable_record *rec, struct strbuf *dest);
+void reftable_record_copy_from(struct reftable_record *rec,
+ struct reftable_record *src, int hash_size);
+uint8_t reftable_record_val_type(struct reftable_record *rec);
+int reftable_record_encode(struct reftable_record *rec, struct string_view dest,
+ int hash_size);
+int reftable_record_decode(struct reftable_record *rec, struct strbuf key,
+ uint8_t extra, struct string_view src,
+ int hash_size, struct strbuf *scratch);
+int reftable_record_is_deletion(struct reftable_record *rec);
+
+static inline uint8_t reftable_record_type(struct reftable_record *rec)
+{
+ return rec->type;
+}
+
+/* frees and zeroes out the embedded record */
+void reftable_record_release(struct reftable_record *rec);
+
+/* for qsort. */
+int reftable_ref_record_compare_name(const void *a, const void *b);
+
+/* for qsort. */
+int reftable_log_record_compare_key(const void *a, const void *b);
+
+#endif
diff --git a/reftable/record_test.c b/reftable/record_test.c
new file mode 100644
index 0000000..c158ee7
--- /dev/null
+++ b/reftable/record_test.c
@@ -0,0 +1,419 @@
+/*
+ Copyright 2020 Google LLC
+
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file or at
+ https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "record.h"
+
+#include "system.h"
+#include "basics.h"
+#include "constants.h"
+#include "test_framework.h"
+#include "reftable-tests.h"
+
+static void test_copy(struct reftable_record *rec)
+{
+ struct reftable_record copy;
+ uint8_t typ;
+
+ typ = reftable_record_type(rec);
+ reftable_record_init(&copy, typ);
+ reftable_record_copy_from(&copy, rec, GIT_SHA1_RAWSZ);
+ /* do it twice to catch memory leaks */
+ reftable_record_copy_from(&copy, rec, GIT_SHA1_RAWSZ);
+ EXPECT(reftable_record_equal(rec, &copy, GIT_SHA1_RAWSZ));
+
+ puts("testing print coverage:\n");
+ reftable_record_print(&copy, GIT_SHA1_RAWSZ);
+
+ reftable_record_release(&copy);
+}
+
+static void test_varint_roundtrip(void)
+{
+ uint64_t inputs[] = { 0,
+ 1,
+ 27,
+ 127,
+ 128,
+ 257,
+ 4096,
+ ((uint64_t)1 << 63),
+ ((uint64_t)1 << 63) + ((uint64_t)1 << 63) - 1 };
+ int i = 0;
+ for (i = 0; i < ARRAY_SIZE(inputs); i++) {
+ uint8_t dest[10];
+
+ struct string_view out = {
+ .buf = dest,
+ .len = sizeof(dest),
+ };
+ uint64_t in = inputs[i];
+ int n = put_var_int(&out, in);
+ uint64_t got = 0;
+
+ EXPECT(n > 0);
+ out.len = n;
+ n = get_var_int(&got, &out);
+ EXPECT(n > 0);
+
+ EXPECT(got == in);
+ }
+}
+
+static void test_common_prefix(void)
+{
+ struct {
+ const char *a, *b;
+ int want;
+ } cases[] = {
+ { "abc", "ab", 2 },
+ { "", "abc", 0 },
+ { "abc", "abd", 2 },
+ { "abc", "pqr", 0 },
+ };
+
+ int i = 0;
+ for (i = 0; i < ARRAY_SIZE(cases); i++) {
+ struct strbuf a = STRBUF_INIT;
+ struct strbuf b = STRBUF_INIT;
+ strbuf_addstr(&a, cases[i].a);
+ strbuf_addstr(&b, cases[i].b);
+ EXPECT(common_prefix_size(&a, &b) == cases[i].want);
+
+ strbuf_release(&a);
+ strbuf_release(&b);
+ }
+}
+
+static void set_hash(uint8_t *h, int j)
+{
+ int i = 0;
+ for (i = 0; i < hash_size(GIT_SHA1_FORMAT_ID); i++) {
+ h[i] = (j >> i) & 0xff;
+ }
+}
+
+static void test_reftable_ref_record_roundtrip(void)
+{
+ struct strbuf scratch = STRBUF_INIT;
+ int i = 0;
+
+ for (i = REFTABLE_REF_DELETION; i < REFTABLE_NR_REF_VALUETYPES; i++) {
+ struct reftable_record in = {
+ .type = BLOCK_TYPE_REF,
+ };
+ struct reftable_record out = { .type = BLOCK_TYPE_REF };
+ struct strbuf key = STRBUF_INIT;
+ uint8_t buffer[1024] = { 0 };
+ struct string_view dest = {
+ .buf = buffer,
+ .len = sizeof(buffer),
+ };
+ int n, m;
+
+ in.u.ref.value_type = i;
+ switch (i) {
+ case REFTABLE_REF_DELETION:
+ break;
+ case REFTABLE_REF_VAL1:
+ set_hash(in.u.ref.value.val1, 1);
+ break;
+ case REFTABLE_REF_VAL2:
+ set_hash(in.u.ref.value.val2.value, 1);
+ set_hash(in.u.ref.value.val2.target_value, 2);
+ break;
+ case REFTABLE_REF_SYMREF:
+ in.u.ref.value.symref = xstrdup("target");
+ break;
+ }
+ in.u.ref.refname = xstrdup("refs/heads/master");
+
+ test_copy(&in);
+
+ EXPECT(reftable_record_val_type(&in) == i);
+
+ reftable_record_key(&in, &key);
+ n = reftable_record_encode(&in, dest, GIT_SHA1_RAWSZ);
+ EXPECT(n > 0);
+
+ /* decode into a non-zero reftable_record to test for leaks. */
+ m = reftable_record_decode(&out, key, i, dest, GIT_SHA1_RAWSZ, &scratch);
+ EXPECT(n == m);
+
+ EXPECT(reftable_ref_record_equal(&in.u.ref, &out.u.ref,
+ GIT_SHA1_RAWSZ));
+ reftable_record_release(&in);
+
+ strbuf_release(&key);
+ reftable_record_release(&out);
+ }
+
+ strbuf_release(&scratch);
+}
+
+static void test_reftable_log_record_equal(void)
+{
+ struct reftable_log_record in[2] = {
+ {
+ .refname = xstrdup("refs/heads/master"),
+ .update_index = 42,
+ },
+ {
+ .refname = xstrdup("refs/heads/master"),
+ .update_index = 22,
+ }
+ };
+
+ EXPECT(!reftable_log_record_equal(&in[0], &in[1], GIT_SHA1_RAWSZ));
+ in[1].update_index = in[0].update_index;
+ EXPECT(reftable_log_record_equal(&in[0], &in[1], GIT_SHA1_RAWSZ));
+ reftable_log_record_release(&in[0]);
+ reftable_log_record_release(&in[1]);
+}
+
+static void test_reftable_log_record_roundtrip(void)
+{
+ int i;
+ struct reftable_log_record in[] = {
+ {
+ .refname = xstrdup("refs/heads/master"),
+ .update_index = 42,
+ .value_type = REFTABLE_LOG_UPDATE,
+ .value = {
+ .update = {
+ .name = xstrdup("han-wen"),
+ .email = xstrdup("hanwen@google.com"),
+ .message = xstrdup("test"),
+ .time = 1577123507,
+ .tz_offset = 100,
+ },
+ }
+ },
+ {
+ .refname = xstrdup("refs/heads/master"),
+ .update_index = 22,
+ .value_type = REFTABLE_LOG_DELETION,
+ },
+ {
+ .refname = xstrdup("branch"),
+ .update_index = 33,
+ .value_type = REFTABLE_LOG_UPDATE,
+ }
+ };
+ struct strbuf scratch = STRBUF_INIT;
+
+ set_test_hash(in[0].value.update.new_hash, 1);
+ set_test_hash(in[0].value.update.old_hash, 2);
+ set_test_hash(in[2].value.update.new_hash, 3);
+ set_test_hash(in[2].value.update.old_hash, 4);
+ for (i = 0; i < ARRAY_SIZE(in); i++) {
+ struct reftable_record rec = { .type = BLOCK_TYPE_LOG };
+ struct strbuf key = STRBUF_INIT;
+ uint8_t buffer[1024] = { 0 };
+ struct string_view dest = {
+ .buf = buffer,
+ .len = sizeof(buffer),
+ };
+ /* populate out, to check for leaks. */
+ struct reftable_record out = {
+ .type = BLOCK_TYPE_LOG,
+ .u.log = {
+ .refname = xstrdup("old name"),
+ .value_type = REFTABLE_LOG_UPDATE,
+ .value = {
+ .update = {
+ .name = xstrdup("old name"),
+ .email = xstrdup("old@email"),
+ .message = xstrdup("old message"),
+ },
+ },
+ },
+ };
+ int n, m, valtype;
+
+ rec.u.log = in[i];
+
+ test_copy(&rec);
+
+ reftable_record_key(&rec, &key);
+
+ n = reftable_record_encode(&rec, dest, GIT_SHA1_RAWSZ);
+ EXPECT(n >= 0);
+ valtype = reftable_record_val_type(&rec);
+ m = reftable_record_decode(&out, key, valtype, dest,
+ GIT_SHA1_RAWSZ, &scratch);
+ EXPECT(n == m);
+
+ EXPECT(reftable_log_record_equal(&in[i], &out.u.log,
+ GIT_SHA1_RAWSZ));
+ reftable_log_record_release(&in[i]);
+ strbuf_release(&key);
+ reftable_record_release(&out);
+ }
+
+ strbuf_release(&scratch);
+}
+
+static void test_u24_roundtrip(void)
+{
+ uint32_t in = 0x112233;
+ uint8_t dest[3];
+ uint32_t out;
+ put_be24(dest, in);
+ out = get_be24(dest);
+ EXPECT(in == out);
+}
+
+static void test_key_roundtrip(void)
+{
+ uint8_t buffer[1024] = { 0 };
+ struct string_view dest = {
+ .buf = buffer,
+ .len = sizeof(buffer),
+ };
+ struct strbuf last_key = STRBUF_INIT;
+ struct strbuf key = STRBUF_INIT;
+ struct strbuf roundtrip = STRBUF_INIT;
+ int restart;
+ uint8_t extra;
+ int n, m;
+ uint8_t rt_extra;
+
+ strbuf_addstr(&last_key, "refs/heads/master");
+ strbuf_addstr(&key, "refs/tags/bla");
+ extra = 6;
+ n = reftable_encode_key(&restart, dest, last_key, key, extra);
+ EXPECT(!restart);
+ EXPECT(n > 0);
+
+ strbuf_addstr(&roundtrip, "refs/heads/master");
+ m = reftable_decode_key(&roundtrip, &rt_extra, dest);
+ EXPECT(n == m);
+ EXPECT(0 == strbuf_cmp(&key, &roundtrip));
+ EXPECT(rt_extra == extra);
+
+ strbuf_release(&last_key);
+ strbuf_release(&key);
+ strbuf_release(&roundtrip);
+}
+
+static void test_reftable_obj_record_roundtrip(void)
+{
+ uint8_t testHash1[GIT_SHA1_RAWSZ] = { 1, 2, 3, 4, 0 };
+ uint64_t till9[] = { 1, 2, 3, 4, 500, 600, 700, 800, 9000 };
+ struct reftable_obj_record recs[3] = {
+ {
+ .hash_prefix = testHash1,
+ .hash_prefix_len = 5,
+ .offsets = till9,
+ .offset_len = 3,
+ },
+ {
+ .hash_prefix = testHash1,
+ .hash_prefix_len = 5,
+ .offsets = till9,
+ .offset_len = 9,
+ },
+ {
+ .hash_prefix = testHash1,
+ .hash_prefix_len = 5,
+ },
+ };
+ struct strbuf scratch = STRBUF_INIT;
+ int i = 0;
+
+ for (i = 0; i < ARRAY_SIZE(recs); i++) {
+ uint8_t buffer[1024] = { 0 };
+ struct string_view dest = {
+ .buf = buffer,
+ .len = sizeof(buffer),
+ };
+ struct reftable_record in = {
+ .type = BLOCK_TYPE_OBJ,
+ .u = {
+ .obj = recs[i],
+ },
+ };
+ struct strbuf key = STRBUF_INIT;
+ struct reftable_record out = { .type = BLOCK_TYPE_OBJ };
+ int n, m;
+ uint8_t extra;
+
+ test_copy(&in);
+ reftable_record_key(&in, &key);
+ n = reftable_record_encode(&in, dest, GIT_SHA1_RAWSZ);
+ EXPECT(n > 0);
+ extra = reftable_record_val_type(&in);
+ m = reftable_record_decode(&out, key, extra, dest,
+ GIT_SHA1_RAWSZ, &scratch);
+ EXPECT(n == m);
+
+ EXPECT(reftable_record_equal(&in, &out, GIT_SHA1_RAWSZ));
+ strbuf_release(&key);
+ reftable_record_release(&out);
+ }
+
+ strbuf_release(&scratch);
+}
+
+static void test_reftable_index_record_roundtrip(void)
+{
+ struct reftable_record in = {
+ .type = BLOCK_TYPE_INDEX,
+ .u.idx = {
+ .offset = 42,
+ .last_key = STRBUF_INIT,
+ },
+ };
+ uint8_t buffer[1024] = { 0 };
+ struct string_view dest = {
+ .buf = buffer,
+ .len = sizeof(buffer),
+ };
+ struct strbuf scratch = STRBUF_INIT;
+ struct strbuf key = STRBUF_INIT;
+ struct reftable_record out = {
+ .type = BLOCK_TYPE_INDEX,
+ .u.idx = { .last_key = STRBUF_INIT },
+ };
+ int n, m;
+ uint8_t extra;
+
+ strbuf_addstr(&in.u.idx.last_key, "refs/heads/master");
+ reftable_record_key(&in, &key);
+ test_copy(&in);
+
+ EXPECT(0 == strbuf_cmp(&key, &in.u.idx.last_key));
+ n = reftable_record_encode(&in, dest, GIT_SHA1_RAWSZ);
+ EXPECT(n > 0);
+
+ extra = reftable_record_val_type(&in);
+ m = reftable_record_decode(&out, key, extra, dest, GIT_SHA1_RAWSZ,
+ &scratch);
+ EXPECT(m == n);
+
+ EXPECT(reftable_record_equal(&in, &out, GIT_SHA1_RAWSZ));
+
+ reftable_record_release(&out);
+ strbuf_release(&key);
+ strbuf_release(&scratch);
+ strbuf_release(&in.u.idx.last_key);
+}
+
+int record_test_main(int argc, const char *argv[])
+{
+ RUN_TEST(test_reftable_log_record_equal);
+ RUN_TEST(test_reftable_log_record_roundtrip);
+ RUN_TEST(test_reftable_ref_record_roundtrip);
+ RUN_TEST(test_varint_roundtrip);
+ RUN_TEST(test_key_roundtrip);
+ RUN_TEST(test_common_prefix);
+ RUN_TEST(test_reftable_obj_record_roundtrip);
+ RUN_TEST(test_reftable_index_record_roundtrip);
+ RUN_TEST(test_u24_roundtrip);
+ return 0;
+}
diff --git a/reftable/refname.c b/reftable/refname.c
new file mode 100644
index 0000000..bbfde15
--- /dev/null
+++ b/reftable/refname.c
@@ -0,0 +1,206 @@
+/*
+ Copyright 2020 Google LLC
+
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file or at
+ https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "system.h"
+#include "reftable-error.h"
+#include "basics.h"
+#include "refname.h"
+#include "reftable-iterator.h"
+
+struct refname_needle_lesseq_args {
+ char **haystack;
+ const char *needle;
+};
+
+static int refname_needle_lesseq(size_t k, void *_args)
+{
+ struct refname_needle_lesseq_args *args = _args;
+ return strcmp(args->needle, args->haystack[k]) <= 0;
+}
+
+static int modification_has_ref(struct modification *mod, const char *name)
+{
+ struct reftable_ref_record ref = { NULL };
+ int err = 0;
+
+ if (mod->add_len > 0) {
+ struct refname_needle_lesseq_args args = {
+ .haystack = mod->add,
+ .needle = name,
+ };
+ size_t idx = binsearch(mod->add_len, refname_needle_lesseq, &args);
+ if (idx < mod->add_len && !strcmp(mod->add[idx], name))
+ return 0;
+ }
+
+ if (mod->del_len > 0) {
+ struct refname_needle_lesseq_args args = {
+ .haystack = mod->del,
+ .needle = name,
+ };
+ size_t idx = binsearch(mod->del_len, refname_needle_lesseq, &args);
+ if (idx < mod->del_len && !strcmp(mod->del[idx], name))
+ return 1;
+ }
+
+ err = reftable_table_read_ref(&mod->tab, name, &ref);
+ reftable_ref_record_release(&ref);
+ return err;
+}
+
+static void modification_release(struct modification *mod)
+{
+ /* don't delete the strings themselves; they're owned by ref records.
+ */
+ FREE_AND_NULL(mod->add);
+ FREE_AND_NULL(mod->del);
+ mod->add_len = 0;
+ mod->del_len = 0;
+}
+
+static int modification_has_ref_with_prefix(struct modification *mod,
+ const char *prefix)
+{
+ struct reftable_iterator it = { NULL };
+ struct reftable_ref_record ref = { NULL };
+ int err = 0;
+
+ if (mod->add_len > 0) {
+ struct refname_needle_lesseq_args args = {
+ .haystack = mod->add,
+ .needle = prefix,
+ };
+ size_t idx = binsearch(mod->add_len, refname_needle_lesseq, &args);
+ if (idx < mod->add_len &&
+ !strncmp(prefix, mod->add[idx], strlen(prefix)))
+ goto done;
+ }
+ err = reftable_table_seek_ref(&mod->tab, &it, prefix);
+ if (err)
+ goto done;
+
+ while (1) {
+ err = reftable_iterator_next_ref(&it, &ref);
+ if (err)
+ goto done;
+
+ if (mod->del_len > 0) {
+ struct refname_needle_lesseq_args args = {
+ .haystack = mod->del,
+ .needle = ref.refname,
+ };
+ size_t idx = binsearch(mod->del_len, refname_needle_lesseq, &args);
+ if (idx < mod->del_len &&
+ !strcmp(ref.refname, mod->del[idx]))
+ continue;
+ }
+
+ if (strncmp(ref.refname, prefix, strlen(prefix))) {
+ err = 1;
+ goto done;
+ }
+ err = 0;
+ goto done;
+ }
+
+done:
+ reftable_ref_record_release(&ref);
+ reftable_iterator_destroy(&it);
+ return err;
+}
+
+static int validate_refname(const char *name)
+{
+ while (1) {
+ char *next = strchr(name, '/');
+ if (!*name) {
+ return REFTABLE_REFNAME_ERROR;
+ }
+ if (!next) {
+ return 0;
+ }
+ if (next - name == 0 || (next - name == 1 && *name == '.') ||
+ (next - name == 2 && name[0] == '.' && name[1] == '.'))
+ return REFTABLE_REFNAME_ERROR;
+ name = next + 1;
+ }
+ return 0;
+}
+
+int validate_ref_record_addition(struct reftable_table tab,
+ struct reftable_ref_record *recs, size_t sz)
+{
+ struct modification mod = {
+ .tab = tab,
+ .add = reftable_calloc(sz, sizeof(*mod.add)),
+ .del = reftable_calloc(sz, sizeof(*mod.del)),
+ };
+ int i = 0;
+ int err = 0;
+ for (; i < sz; i++) {
+ if (reftable_ref_record_is_deletion(&recs[i])) {
+ mod.del[mod.del_len++] = recs[i].refname;
+ } else {
+ mod.add[mod.add_len++] = recs[i].refname;
+ }
+ }
+
+ err = modification_validate(&mod);
+ modification_release(&mod);
+ return err;
+}
+
+static void strbuf_trim_component(struct strbuf *sl)
+{
+ while (sl->len > 0) {
+ int is_slash = (sl->buf[sl->len - 1] == '/');
+ strbuf_setlen(sl, sl->len - 1);
+ if (is_slash)
+ break;
+ }
+}
+
+int modification_validate(struct modification *mod)
+{
+ struct strbuf slashed = STRBUF_INIT;
+ int err = 0;
+ int i = 0;
+ for (; i < mod->add_len; i++) {
+ err = validate_refname(mod->add[i]);
+ if (err)
+ goto done;
+ strbuf_reset(&slashed);
+ strbuf_addstr(&slashed, mod->add[i]);
+ strbuf_addstr(&slashed, "/");
+
+ err = modification_has_ref_with_prefix(mod, slashed.buf);
+ if (err == 0) {
+ err = REFTABLE_NAME_CONFLICT;
+ goto done;
+ }
+ if (err < 0)
+ goto done;
+
+ strbuf_reset(&slashed);
+ strbuf_addstr(&slashed, mod->add[i]);
+ while (slashed.len) {
+ strbuf_trim_component(&slashed);
+ err = modification_has_ref(mod, slashed.buf);
+ if (err == 0) {
+ err = REFTABLE_NAME_CONFLICT;
+ goto done;
+ }
+ if (err < 0)
+ goto done;
+ }
+ }
+ err = 0;
+done:
+ strbuf_release(&slashed);
+ return err;
+}
diff --git a/reftable/refname.h b/reftable/refname.h
new file mode 100644
index 0000000..a24b40f
--- /dev/null
+++ b/reftable/refname.h
@@ -0,0 +1,29 @@
+/*
+ Copyright 2020 Google LLC
+
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file or at
+ https://developers.google.com/open-source/licenses/bsd
+*/
+#ifndef REFNAME_H
+#define REFNAME_H
+
+#include "reftable-record.h"
+#include "reftable-generic.h"
+
+struct modification {
+ struct reftable_table tab;
+
+ char **add;
+ size_t add_len;
+
+ char **del;
+ size_t del_len;
+};
+
+int validate_ref_record_addition(struct reftable_table tab,
+ struct reftable_ref_record *recs, size_t sz);
+
+int modification_validate(struct modification *mod);
+
+#endif
diff --git a/reftable/refname_test.c b/reftable/refname_test.c
new file mode 100644
index 0000000..b9cc625
--- /dev/null
+++ b/reftable/refname_test.c
@@ -0,0 +1,101 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "basics.h"
+#include "block.h"
+#include "blocksource.h"
+#include "reader.h"
+#include "record.h"
+#include "refname.h"
+#include "reftable-error.h"
+#include "reftable-writer.h"
+#include "system.h"
+
+#include "test_framework.h"
+#include "reftable-tests.h"
+
+struct testcase {
+ char *add;
+ char *del;
+ int error_code;
+};
+
+static void test_conflict(void)
+{
+ struct reftable_write_options opts = { 0 };
+ struct strbuf buf = STRBUF_INIT;
+ struct reftable_writer *w =
+ reftable_new_writer(&strbuf_add_void, &noop_flush, &buf, &opts);
+ struct reftable_ref_record rec = {
+ .refname = "a/b",
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = "destination", /* make sure it's not a symref.
+ */
+ .update_index = 1,
+ };
+ int err;
+ int i;
+ struct reftable_block_source source = { NULL };
+ struct reftable_reader *rd = NULL;
+ struct reftable_table tab = { NULL };
+ struct testcase cases[] = {
+ { "a/b/c", NULL, REFTABLE_NAME_CONFLICT },
+ { "b", NULL, 0 },
+ { "a", NULL, REFTABLE_NAME_CONFLICT },
+ { "a", "a/b", 0 },
+
+ { "p/", NULL, REFTABLE_REFNAME_ERROR },
+ { "p//q", NULL, REFTABLE_REFNAME_ERROR },
+ { "p/./q", NULL, REFTABLE_REFNAME_ERROR },
+ { "p/../q", NULL, REFTABLE_REFNAME_ERROR },
+
+ { "a/b/c", "a/b", 0 },
+ { NULL, "a//b", 0 },
+ };
+ reftable_writer_set_limits(w, 1, 1);
+
+ err = reftable_writer_add_ref(w, &rec);
+ EXPECT_ERR(err);
+
+ err = reftable_writer_close(w);
+ EXPECT_ERR(err);
+ reftable_writer_free(w);
+
+ block_source_from_strbuf(&source, &buf);
+ err = reftable_new_reader(&rd, &source, "filename");
+ EXPECT_ERR(err);
+
+ reftable_table_from_reader(&tab, rd);
+
+ for (i = 0; i < ARRAY_SIZE(cases); i++) {
+ struct modification mod = {
+ .tab = tab,
+ };
+
+ if (cases[i].add) {
+ mod.add = &cases[i].add;
+ mod.add_len = 1;
+ }
+ if (cases[i].del) {
+ mod.del = &cases[i].del;
+ mod.del_len = 1;
+ }
+
+ err = modification_validate(&mod);
+ EXPECT(err == cases[i].error_code);
+ }
+
+ reftable_reader_free(rd);
+ strbuf_release(&buf);
+}
+
+int refname_test_main(int argc, const char *argv[])
+{
+ RUN_TEST(test_conflict);
+ return 0;
+}
diff --git a/reftable/reftable-blocksource.h b/reftable/reftable-blocksource.h
new file mode 100644
index 0000000..5aa3990
--- /dev/null
+++ b/reftable/reftable-blocksource.h
@@ -0,0 +1,49 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef REFTABLE_BLOCKSOURCE_H
+#define REFTABLE_BLOCKSOURCE_H
+
+#include <stdint.h>
+
+/* block_source is a generic wrapper for a seekable readable file.
+ */
+struct reftable_block_source {
+ struct reftable_block_source_vtable *ops;
+ void *arg;
+};
+
+/* a contiguous segment of bytes. It keeps track of its generating block_source
+ * so it can return itself into the pool. */
+struct reftable_block {
+ uint8_t *data;
+ int len;
+ struct reftable_block_source source;
+};
+
+/* block_source_vtable are the operations that make up block_source */
+struct reftable_block_source_vtable {
+ /* returns the size of a block source */
+ uint64_t (*size)(void *source);
+
+ /* reads a segment from the block source. It is an error to read
+ beyond the end of the block */
+ int (*read_block)(void *source, struct reftable_block *dest,
+ uint64_t off, uint32_t size);
+ /* mark the block as read; may return the data back to malloc */
+ void (*return_block)(void *source, struct reftable_block *blockp);
+
+ /* release all resources associated with the block source */
+ void (*close)(void *source);
+};
+
+/* opens a file on the file system as a block_source */
+int reftable_block_source_from_file(struct reftable_block_source *block_src,
+ const char *name);
+
+#endif
diff --git a/reftable/reftable-error.h b/reftable/reftable-error.h
new file mode 100644
index 0000000..e9b07c9
--- /dev/null
+++ b/reftable/reftable-error.h
@@ -0,0 +1,69 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef REFTABLE_ERROR_H
+#define REFTABLE_ERROR_H
+
+/*
+ * Errors in reftable calls are signaled with negative integer return values. 0
+ * means success.
+ */
+enum reftable_error {
+ /* Unexpected file system behavior */
+ REFTABLE_IO_ERROR = -2,
+
+ /* Format inconsistency on reading data */
+ REFTABLE_FORMAT_ERROR = -3,
+
+ /* File does not exist. Returned from block_source_from_file(), because
+ * it needs special handling in stack.
+ */
+ REFTABLE_NOT_EXIST_ERROR = -4,
+
+ /* Trying to access locked data. */
+ REFTABLE_LOCK_ERROR = -5,
+
+ /* Misuse of the API:
+ * - on writing a record with NULL refname.
+ * - on writing a reftable_ref_record outside the table limits
+ * - on writing a ref or log record before the stack's
+ * next_update_inde*x
+ * - on writing a log record with multiline message with
+ * exact_log_message unset
+ * - on reading a reftable_ref_record from log iterator, or vice versa.
+ *
+ * When a call misuses the API, the internal state of the library is
+ * kept unchanged.
+ */
+ REFTABLE_API_ERROR = -6,
+
+ /* Decompression error */
+ REFTABLE_ZLIB_ERROR = -7,
+
+ /* Wrote a table without blocks. */
+ REFTABLE_EMPTY_TABLE_ERROR = -8,
+
+ /* Dir/file conflict. */
+ REFTABLE_NAME_CONFLICT = -9,
+
+ /* Invalid ref name. */
+ REFTABLE_REFNAME_ERROR = -10,
+
+ /* Entry does not fit. This can happen when writing outsize reflog
+ messages. */
+ REFTABLE_ENTRY_TOO_BIG_ERROR = -11,
+
+ /* Trying to write out-of-date data. */
+ REFTABLE_OUTDATED_ERROR = -12,
+};
+
+/* convert the numeric error code to a string. The string should not be
+ * deallocated. */
+const char *reftable_error_str(int err);
+
+#endif
diff --git a/reftable/reftable-generic.h b/reftable/reftable-generic.h
new file mode 100644
index 0000000..d239751
--- /dev/null
+++ b/reftable/reftable-generic.h
@@ -0,0 +1,47 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef REFTABLE_GENERIC_H
+#define REFTABLE_GENERIC_H
+
+#include "reftable-iterator.h"
+
+struct reftable_table_vtable;
+
+/*
+ * Provides a unified API for reading tables, either merged tables, or single
+ * readers. */
+struct reftable_table {
+ struct reftable_table_vtable *ops;
+ void *table_arg;
+};
+
+int reftable_table_seek_log(struct reftable_table *tab,
+ struct reftable_iterator *it, const char *name);
+
+int reftable_table_seek_ref(struct reftable_table *tab,
+ struct reftable_iterator *it, const char *name);
+
+/* returns the hash ID from a generic reftable_table */
+uint32_t reftable_table_hash_id(struct reftable_table *tab);
+
+/* returns the max update_index covered by this table. */
+uint64_t reftable_table_max_update_index(struct reftable_table *tab);
+
+/* returns the min update_index covered by this table. */
+uint64_t reftable_table_min_update_index(struct reftable_table *tab);
+
+/* convenience function to read a single ref. Returns < 0 for error, 0
+ for success, and 1 if ref not found. */
+int reftable_table_read_ref(struct reftable_table *tab, const char *name,
+ struct reftable_ref_record *ref);
+
+/* dump table contents onto stdout for debugging */
+int reftable_table_print(struct reftable_table *tab);
+
+#endif
diff --git a/reftable/reftable-iterator.h b/reftable/reftable-iterator.h
new file mode 100644
index 0000000..d3eee7a
--- /dev/null
+++ b/reftable/reftable-iterator.h
@@ -0,0 +1,39 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef REFTABLE_ITERATOR_H
+#define REFTABLE_ITERATOR_H
+
+#include "reftable-record.h"
+
+struct reftable_iterator_vtable;
+
+/* iterator is the generic interface for walking over data stored in a
+ * reftable.
+ */
+struct reftable_iterator {
+ struct reftable_iterator_vtable *ops;
+ void *iter_arg;
+};
+
+/* reads the next reftable_ref_record. Returns < 0 for error, 0 for OK and > 0:
+ * end of iteration.
+ */
+int reftable_iterator_next_ref(struct reftable_iterator *it,
+ struct reftable_ref_record *ref);
+
+/* reads the next reftable_log_record. Returns < 0 for error, 0 for OK and > 0:
+ * end of iteration.
+ */
+int reftable_iterator_next_log(struct reftable_iterator *it,
+ struct reftable_log_record *log);
+
+/* releases resources associated with an iterator. */
+void reftable_iterator_destroy(struct reftable_iterator *it);
+
+#endif
diff --git a/reftable/reftable-malloc.h b/reftable/reftable-malloc.h
new file mode 100644
index 0000000..5f2185f
--- /dev/null
+++ b/reftable/reftable-malloc.h
@@ -0,0 +1,18 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef REFTABLE_H
+#define REFTABLE_H
+
+#include <stddef.h>
+
+/* Overrides the functions to use for memory management. */
+void reftable_set_alloc(void *(*malloc)(size_t),
+ void *(*realloc)(void *, size_t), void (*free)(void *));
+
+#endif
diff --git a/reftable/reftable-merged.h b/reftable/reftable-merged.h
new file mode 100644
index 0000000..c91a2d8
--- /dev/null
+++ b/reftable/reftable-merged.h
@@ -0,0 +1,72 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef REFTABLE_MERGED_H
+#define REFTABLE_MERGED_H
+
+#include "reftable-iterator.h"
+
+/*
+ * Merged tables
+ *
+ * A ref database kept in a sequence of table files. The merged_table presents a
+ * unified view to reading (seeking, iterating) a sequence of immutable tables.
+ *
+ * The merged tables are on purpose kept disconnected from their actual storage
+ * (eg. files on disk), because it is useful to merge tables aren't files. For
+ * example, the per-workspace and global ref namespace can be implemented as a
+ * merged table of two stacks of file-backed reftables.
+ */
+
+/* A merged table is implements seeking/iterating over a stack of tables. */
+struct reftable_merged_table;
+
+/* A generic reftable; see below. */
+struct reftable_table;
+
+/* reftable_new_merged_table creates a new merged table. It takes ownership of
+ the stack array.
+*/
+int reftable_new_merged_table(struct reftable_merged_table **dest,
+ struct reftable_table *stack, size_t n,
+ uint32_t hash_id);
+
+/* returns an iterator positioned just before 'name' */
+int reftable_merged_table_seek_ref(struct reftable_merged_table *mt,
+ struct reftable_iterator *it,
+ const char *name);
+
+/* returns an iterator for log entry, at given update_index */
+int reftable_merged_table_seek_log_at(struct reftable_merged_table *mt,
+ struct reftable_iterator *it,
+ const char *name, uint64_t update_index);
+
+/* like reftable_merged_table_seek_log_at but look for the newest entry. */
+int reftable_merged_table_seek_log(struct reftable_merged_table *mt,
+ struct reftable_iterator *it,
+ const char *name);
+
+/* returns the max update_index covered by this merged table. */
+uint64_t
+reftable_merged_table_max_update_index(struct reftable_merged_table *mt);
+
+/* returns the min update_index covered by this merged table. */
+uint64_t
+reftable_merged_table_min_update_index(struct reftable_merged_table *mt);
+
+/* releases memory for the merged_table */
+void reftable_merged_table_free(struct reftable_merged_table *m);
+
+/* return the hash ID of the merged table. */
+uint32_t reftable_merged_table_hash_id(struct reftable_merged_table *m);
+
+/* create a generic table from reftable_merged_table */
+void reftable_table_from_merged_table(struct reftable_table *tab,
+ struct reftable_merged_table *table);
+
+#endif
diff --git a/reftable/reftable-reader.h b/reftable/reftable-reader.h
new file mode 100644
index 0000000..4a4bc2f
--- /dev/null
+++ b/reftable/reftable-reader.h
@@ -0,0 +1,101 @@
+/*
+ Copyright 2020 Google LLC
+
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file or at
+ https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef REFTABLE_READER_H
+#define REFTABLE_READER_H
+
+#include "reftable-iterator.h"
+#include "reftable-blocksource.h"
+
+/*
+ * Reading single tables
+ *
+ * The follow routines are for reading single files. For an
+ * application-level interface, skip ahead to struct
+ * reftable_merged_table and struct reftable_stack.
+ */
+
+/* The reader struct is a handle to an open reftable file. */
+struct reftable_reader;
+
+/* Generic table. */
+struct reftable_table;
+
+/* reftable_new_reader opens a reftable for reading. If successful,
+ * returns 0 code and sets pp. The name is used for creating a
+ * stack. Typically, it is the basename of the file. The block source
+ * `src` is owned by the reader, and is closed on calling
+ * reftable_reader_destroy(). On error, the block source `src` is
+ * closed as well.
+ */
+int reftable_new_reader(struct reftable_reader **pp,
+ struct reftable_block_source *src, const char *name);
+
+/* reftable_reader_seek_ref returns an iterator where 'name' would be inserted
+ in the table. To seek to the start of the table, use name = "".
+
+ example:
+
+ struct reftable_reader *r = NULL;
+ int err = reftable_new_reader(&r, &src, "filename");
+ if (err < 0) { ... }
+ struct reftable_iterator it = {0};
+ err = reftable_reader_seek_ref(r, &it, "refs/heads/master");
+ if (err < 0) { ... }
+ struct reftable_ref_record ref = {0};
+ while (1) {
+ err = reftable_iterator_next_ref(&it, &ref);
+ if (err > 0) {
+ break;
+ }
+ if (err < 0) {
+ ..error handling..
+ }
+ ..found..
+ }
+ reftable_iterator_destroy(&it);
+ reftable_ref_record_release(&ref);
+*/
+int reftable_reader_seek_ref(struct reftable_reader *r,
+ struct reftable_iterator *it, const char *name);
+
+/* returns the hash ID used in this table. */
+uint32_t reftable_reader_hash_id(struct reftable_reader *r);
+
+/* seek to logs for the given name, older than update_index. To seek to the
+ start of the table, use name = "".
+*/
+int reftable_reader_seek_log_at(struct reftable_reader *r,
+ struct reftable_iterator *it, const char *name,
+ uint64_t update_index);
+
+/* seek to newest log entry for given name. */
+int reftable_reader_seek_log(struct reftable_reader *r,
+ struct reftable_iterator *it, const char *name);
+
+/* closes and deallocates a reader. */
+void reftable_reader_free(struct reftable_reader *);
+
+/* return an iterator for the refs pointing to `oid`. */
+int reftable_reader_refs_for(struct reftable_reader *r,
+ struct reftable_iterator *it, uint8_t *oid);
+
+/* return the max_update_index for a table */
+uint64_t reftable_reader_max_update_index(struct reftable_reader *r);
+
+/* return the min_update_index for a table */
+uint64_t reftable_reader_min_update_index(struct reftable_reader *r);
+
+/* creates a generic table from a file reader. */
+void reftable_table_from_reader(struct reftable_table *tab,
+ struct reftable_reader *reader);
+
+/* print table onto stdout for debugging. */
+int reftable_reader_print_file(const char *tablename);
+
+#endif
diff --git a/reftable/reftable-record.h b/reftable/reftable-record.h
new file mode 100644
index 0000000..2a2943c
--- /dev/null
+++ b/reftable/reftable-record.h
@@ -0,0 +1,118 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef REFTABLE_RECORD_H
+#define REFTABLE_RECORD_H
+
+#include "hash-ll.h"
+#include <stdint.h>
+
+/*
+ * Basic data types
+ *
+ * Reftables store the state of each ref in struct reftable_ref_record, and they
+ * store a sequence of reflog updates in struct reftable_log_record.
+ */
+
+/* reftable_ref_record holds a ref database entry target_value */
+struct reftable_ref_record {
+ char *refname; /* Name of the ref, malloced. */
+ size_t refname_cap;
+ uint64_t update_index; /* Logical timestamp at which this value is
+ * written */
+
+ enum {
+ /* tombstone to hide deletions from earlier tables */
+ REFTABLE_REF_DELETION = 0x0,
+
+ /* a simple ref */
+ REFTABLE_REF_VAL1 = 0x1,
+ /* a tag, plus its peeled hash */
+ REFTABLE_REF_VAL2 = 0x2,
+
+ /* a symbolic reference */
+ REFTABLE_REF_SYMREF = 0x3,
+#define REFTABLE_NR_REF_VALUETYPES 4
+ } value_type;
+ union {
+ unsigned char val1[GIT_MAX_RAWSZ];
+ struct {
+ unsigned char value[GIT_MAX_RAWSZ]; /* first hash */
+ unsigned char target_value[GIT_MAX_RAWSZ]; /* second hash */
+ } val2;
+ char *symref; /* referent, malloced 0-terminated string */
+ } value;
+};
+
+/* Returns the first hash, or NULL if `rec` is not of type
+ * REFTABLE_REF_VAL1 or REFTABLE_REF_VAL2. */
+const unsigned char *reftable_ref_record_val1(const struct reftable_ref_record *rec);
+
+/* Returns the second hash, or NULL if `rec` is not of type
+ * REFTABLE_REF_VAL2. */
+const unsigned char *reftable_ref_record_val2(const struct reftable_ref_record *rec);
+
+/* returns whether 'ref' represents a deletion */
+int reftable_ref_record_is_deletion(const struct reftable_ref_record *ref);
+
+/* prints a reftable_ref_record onto stdout. Useful for debugging. */
+void reftable_ref_record_print(const struct reftable_ref_record *ref,
+ uint32_t hash_id);
+
+/* frees and nulls all pointer values inside `ref`. */
+void reftable_ref_record_release(struct reftable_ref_record *ref);
+
+/* returns whether two reftable_ref_records are the same. Useful for testing. */
+int reftable_ref_record_equal(const struct reftable_ref_record *a,
+ const struct reftable_ref_record *b, int hash_size);
+
+/* reftable_log_record holds a reflog entry */
+struct reftable_log_record {
+ char *refname;
+ size_t refname_cap;
+ uint64_t update_index; /* logical timestamp of a transactional update.
+ */
+
+ enum {
+ /* tombstone to hide deletions from earlier tables */
+ REFTABLE_LOG_DELETION = 0x0,
+
+ /* a simple update */
+ REFTABLE_LOG_UPDATE = 0x1,
+#define REFTABLE_NR_LOG_VALUETYPES 2
+ } value_type;
+
+ union {
+ struct {
+ unsigned char new_hash[GIT_MAX_RAWSZ];
+ unsigned char old_hash[GIT_MAX_RAWSZ];
+ char *name;
+ char *email;
+ uint64_t time;
+ int16_t tz_offset;
+ char *message;
+ size_t message_cap;
+ } update;
+ } value;
+};
+
+/* returns whether 'ref' represents the deletion of a log record. */
+int reftable_log_record_is_deletion(const struct reftable_log_record *log);
+
+/* frees and nulls all pointer values. */
+void reftable_log_record_release(struct reftable_log_record *log);
+
+/* returns whether two records are equal. Useful for testing. */
+int reftable_log_record_equal(const struct reftable_log_record *a,
+ const struct reftable_log_record *b, int hash_size);
+
+/* dumps a reftable_log_record on stdout, for debugging/testing. */
+void reftable_log_record_print(struct reftable_log_record *log,
+ uint32_t hash_id);
+
+#endif
diff --git a/reftable/reftable-stack.h b/reftable/reftable-stack.h
new file mode 100644
index 0000000..1b602dd
--- /dev/null
+++ b/reftable/reftable-stack.h
@@ -0,0 +1,128 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef REFTABLE_STACK_H
+#define REFTABLE_STACK_H
+
+#include "reftable-writer.h"
+
+/*
+ * The stack presents an interface to a mutable sequence of reftables.
+
+ * A stack can be mutated by pushing a table to the top of the stack.
+
+ * The reftable_stack automatically compacts files on disk to ensure good
+ * amortized performance.
+ *
+ * For windows and other platforms that cannot have open files as rename
+ * destinations, concurrent access from multiple processes needs the rand()
+ * random seed to be randomized.
+ */
+struct reftable_stack;
+
+/* open a new reftable stack. The tables along with the table list will be
+ * stored in 'dir'. Typically, this should be .git/reftables.
+ */
+int reftable_new_stack(struct reftable_stack **dest, const char *dir,
+ struct reftable_write_options config);
+
+/* returns the update_index at which a next table should be written. */
+uint64_t reftable_stack_next_update_index(struct reftable_stack *st);
+
+/* holds a transaction to add tables at the top of a stack. */
+struct reftable_addition;
+
+/*
+ * returns a new transaction to add reftables to the given stack. As a side
+ * effect, the ref database is locked.
+ */
+int reftable_stack_new_addition(struct reftable_addition **dest,
+ struct reftable_stack *st);
+
+/* Adds a reftable to transaction. */
+int reftable_addition_add(struct reftable_addition *add,
+ int (*write_table)(struct reftable_writer *wr,
+ void *arg),
+ void *arg);
+
+/* Commits the transaction, releasing the lock. After calling this,
+ * reftable_addition_destroy should still be called.
+ */
+int reftable_addition_commit(struct reftable_addition *add);
+
+/* Release all non-committed data from the transaction, and deallocate the
+ * transaction. Releases the lock if held. */
+void reftable_addition_destroy(struct reftable_addition *add);
+
+/* add a new table to the stack. The write_table function must call
+ * reftable_writer_set_limits, add refs and return an error value. */
+int reftable_stack_add(struct reftable_stack *st,
+ int (*write_table)(struct reftable_writer *wr,
+ void *write_arg),
+ void *write_arg);
+
+/* returns the merged_table for seeking. This table is valid until the
+ * next write or reload, and should not be closed or deleted.
+ */
+struct reftable_merged_table *
+reftable_stack_merged_table(struct reftable_stack *st);
+
+/* frees all resources associated with the stack. */
+void reftable_stack_destroy(struct reftable_stack *st);
+
+/* Reloads the stack if necessary. This is very cheap to run if the stack was up
+ * to date */
+int reftable_stack_reload(struct reftable_stack *st);
+
+/* Policy for expiring reflog entries. */
+struct reftable_log_expiry_config {
+ /* Drop entries older than this timestamp */
+ uint64_t time;
+
+ /* Drop older entries */
+ uint64_t min_update_index;
+};
+
+/* compacts all reftables into a giant table. Expire reflog entries if config is
+ * non-NULL */
+int reftable_stack_compact_all(struct reftable_stack *st,
+ struct reftable_log_expiry_config *config);
+
+/* heuristically compact unbalanced table stack. */
+int reftable_stack_auto_compact(struct reftable_stack *st);
+
+/* delete stale .ref tables. */
+int reftable_stack_clean(struct reftable_stack *st);
+
+/* convenience function to read a single ref. Returns < 0 for error, 0 for
+ * success, and 1 if ref not found. */
+int reftable_stack_read_ref(struct reftable_stack *st, const char *refname,
+ struct reftable_ref_record *ref);
+
+/* convenience function to read a single log. Returns < 0 for error, 0 for
+ * success, and 1 if ref not found. */
+int reftable_stack_read_log(struct reftable_stack *st, const char *refname,
+ struct reftable_log_record *log);
+
+/* statistics on past compactions. */
+struct reftable_compaction_stats {
+ uint64_t bytes; /* total number of bytes written */
+ uint64_t entries_written; /* total number of entries written, including
+ failures. */
+ int attempts; /* how often we tried to compact */
+ int failures; /* failures happen on concurrent updates */
+};
+
+/* return statistics for compaction up till now. */
+struct reftable_compaction_stats *
+reftable_stack_compaction_stats(struct reftable_stack *st);
+
+/* print the entire stack represented by the directory */
+int reftable_stack_print_directory(const char *stackdir, uint32_t hash_id);
+
+#endif
diff --git a/reftable/reftable-tests.h b/reftable/reftable-tests.h
new file mode 100644
index 0000000..0019cbc
--- /dev/null
+++ b/reftable/reftable-tests.h
@@ -0,0 +1,23 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef REFTABLE_TESTS_H
+#define REFTABLE_TESTS_H
+
+int basics_test_main(int argc, const char **argv);
+int block_test_main(int argc, const char **argv);
+int merged_test_main(int argc, const char **argv);
+int pq_test_main(int argc, const char **argv);
+int record_test_main(int argc, const char **argv);
+int refname_test_main(int argc, const char **argv);
+int readwrite_test_main(int argc, const char **argv);
+int stack_test_main(int argc, const char **argv);
+int tree_test_main(int argc, const char **argv);
+int reftable_dump_main(int argc, char *const *argv);
+
+#endif
diff --git a/reftable/reftable-writer.h b/reftable/reftable-writer.h
new file mode 100644
index 0000000..155bf0b
--- /dev/null
+++ b/reftable/reftable-writer.h
@@ -0,0 +1,155 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef REFTABLE_WRITER_H
+#define REFTABLE_WRITER_H
+
+#include "reftable-record.h"
+
+#include <stdint.h>
+#include <unistd.h> /* ssize_t */
+
+/* Writing single reftables */
+
+/* reftable_write_options sets options for writing a single reftable. */
+struct reftable_write_options {
+ /* boolean: do not pad out blocks to block size. */
+ unsigned unpadded : 1;
+
+ /* the blocksize. Should be less than 2^24. */
+ uint32_t block_size;
+
+ /* boolean: do not generate a SHA1 => ref index. */
+ unsigned skip_index_objects : 1;
+
+ /* how often to write complete keys in each block. */
+ int restart_interval;
+
+ /* 4-byte identifier ("sha1", "s256") of the hash.
+ * Defaults to SHA1 if unset
+ */
+ uint32_t hash_id;
+
+ /* Default mode for creating files. If unset, use 0666 (+umask) */
+ unsigned int default_permissions;
+
+ /* boolean: do not check ref names for validity or dir/file conflicts.
+ */
+ unsigned skip_name_check : 1;
+
+ /* boolean: copy log messages exactly. If unset, check that the message
+ * is a single line, and add '\n' if missing.
+ */
+ unsigned exact_log_message : 1;
+
+ /* boolean: Prevent auto-compaction of tables. */
+ unsigned disable_auto_compact : 1;
+};
+
+/* reftable_block_stats holds statistics for a single block type */
+struct reftable_block_stats {
+ /* total number of entries written */
+ int entries;
+ /* total number of key restarts */
+ int restarts;
+ /* total number of blocks */
+ int blocks;
+ /* total number of index blocks */
+ int index_blocks;
+ /* depth of the index */
+ int max_index_level;
+
+ /* offset of the first block for this type */
+ uint64_t offset;
+ /* offset of the top level index block for this type, or 0 if not
+ * present */
+ uint64_t index_offset;
+};
+
+/* stats holds overall statistics for a single reftable */
+struct reftable_stats {
+ /* total number of blocks written. */
+ int blocks;
+ /* stats for ref data */
+ struct reftable_block_stats ref_stats;
+ /* stats for the SHA1 to ref map. */
+ struct reftable_block_stats obj_stats;
+ /* stats for index blocks */
+ struct reftable_block_stats idx_stats;
+ /* stats for log blocks */
+ struct reftable_block_stats log_stats;
+
+ /* disambiguation length of shortened object IDs. */
+ int object_id_len;
+};
+
+/* reftable_new_writer creates a new writer */
+struct reftable_writer *
+reftable_new_writer(ssize_t (*writer_func)(void *, const void *, size_t),
+ int (*flush_func)(void *),
+ void *writer_arg, struct reftable_write_options *opts);
+
+/* Set the range of update indices for the records we will add. When writing a
+ table into a stack, the min should be at least
+ reftable_stack_next_update_index(), or REFTABLE_API_ERROR is returned.
+
+ For transactional updates to a stack, typically min==max, and the
+ update_index can be obtained by inspeciting the stack. When converting an
+ existing ref database into a single reftable, this would be a range of
+ update-index timestamps.
+ */
+void reftable_writer_set_limits(struct reftable_writer *w, uint64_t min,
+ uint64_t max);
+
+/*
+ Add a reftable_ref_record. The record should have names that come after
+ already added records.
+
+ The update_index must be within the limits set by
+ reftable_writer_set_limits(), or REFTABLE_API_ERROR is returned. It is an
+ REFTABLE_API_ERROR error to write a ref record after a log record.
+*/
+int reftable_writer_add_ref(struct reftable_writer *w,
+ struct reftable_ref_record *ref);
+
+/*
+ Convenience function to add multiple reftable_ref_records; the function sorts
+ the records before adding them, reordering the records array passed in.
+*/
+int reftable_writer_add_refs(struct reftable_writer *w,
+ struct reftable_ref_record *refs, int n);
+
+/*
+ adds reftable_log_records. Log records are keyed by (refname, decreasing
+ update_index). The key for the record added must come after the already added
+ log records.
+*/
+int reftable_writer_add_log(struct reftable_writer *w,
+ struct reftable_log_record *log);
+
+/*
+ Convenience function to add multiple reftable_log_records; the function sorts
+ the records before adding them, reordering records array passed in.
+*/
+int reftable_writer_add_logs(struct reftable_writer *w,
+ struct reftable_log_record *logs, int n);
+
+/* reftable_writer_close finalizes the reftable. The writer is retained so
+ * statistics can be inspected. */
+int reftable_writer_close(struct reftable_writer *w);
+
+/* writer_stats returns the statistics on the reftable being written.
+
+ This struct becomes invalid when the writer is freed.
+ */
+const struct reftable_stats *reftable_writer_stats(struct reftable_writer *w);
+
+/* reftable_writer_free deallocates memory for the writer */
+void reftable_writer_free(struct reftable_writer *w);
+
+#endif
diff --git a/reftable/stack.c b/reftable/stack.c
new file mode 100644
index 0000000..80266bc
--- /dev/null
+++ b/reftable/stack.c
@@ -0,0 +1,1518 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "stack.h"
+
+#include "../write-or-die.h"
+#include "system.h"
+#include "merged.h"
+#include "reader.h"
+#include "refname.h"
+#include "reftable-error.h"
+#include "reftable-record.h"
+#include "reftable-merged.h"
+#include "writer.h"
+#include "tempfile.h"
+
+static int stack_try_add(struct reftable_stack *st,
+ int (*write_table)(struct reftable_writer *wr,
+ void *arg),
+ void *arg);
+static int stack_write_compact(struct reftable_stack *st,
+ struct reftable_writer *wr,
+ size_t first, size_t last,
+ struct reftable_log_expiry_config *config);
+static int stack_check_addition(struct reftable_stack *st,
+ const char *new_tab_name);
+static void reftable_addition_close(struct reftable_addition *add);
+static int reftable_stack_reload_maybe_reuse(struct reftable_stack *st,
+ int reuse_open);
+
+static void stack_filename(struct strbuf *dest, struct reftable_stack *st,
+ const char *name)
+{
+ strbuf_reset(dest);
+ strbuf_addstr(dest, st->reftable_dir);
+ strbuf_addstr(dest, "/");
+ strbuf_addstr(dest, name);
+}
+
+static ssize_t reftable_fd_write(void *arg, const void *data, size_t sz)
+{
+ int *fdp = (int *)arg;
+ return write_in_full(*fdp, data, sz);
+}
+
+static int reftable_fd_flush(void *arg)
+{
+ int *fdp = (int *)arg;
+
+ return fsync_component(FSYNC_COMPONENT_REFERENCE, *fdp);
+}
+
+int reftable_new_stack(struct reftable_stack **dest, const char *dir,
+ struct reftable_write_options config)
+{
+ struct reftable_stack *p = reftable_calloc(1, sizeof(*p));
+ struct strbuf list_file_name = STRBUF_INIT;
+ int err = 0;
+
+ if (config.hash_id == 0) {
+ config.hash_id = GIT_SHA1_FORMAT_ID;
+ }
+
+ *dest = NULL;
+
+ strbuf_reset(&list_file_name);
+ strbuf_addstr(&list_file_name, dir);
+ strbuf_addstr(&list_file_name, "/tables.list");
+
+ p->list_file = strbuf_detach(&list_file_name, NULL);
+ p->list_fd = -1;
+ p->reftable_dir = xstrdup(dir);
+ p->config = config;
+
+ err = reftable_stack_reload_maybe_reuse(p, 1);
+ if (err < 0) {
+ reftable_stack_destroy(p);
+ } else {
+ *dest = p;
+ }
+ return err;
+}
+
+static int fd_read_lines(int fd, char ***namesp)
+{
+ off_t size = lseek(fd, 0, SEEK_END);
+ char *buf = NULL;
+ int err = 0;
+ if (size < 0) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+ err = lseek(fd, 0, SEEK_SET);
+ if (err < 0) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+
+ REFTABLE_ALLOC_ARRAY(buf, size + 1);
+ if (read_in_full(fd, buf, size) != size) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+ buf[size] = 0;
+
+ parse_names(buf, size, namesp);
+
+done:
+ reftable_free(buf);
+ return err;
+}
+
+int read_lines(const char *filename, char ***namesp)
+{
+ int fd = open(filename, O_RDONLY);
+ int err = 0;
+ if (fd < 0) {
+ if (errno == ENOENT) {
+ REFTABLE_CALLOC_ARRAY(*namesp, 1);
+ return 0;
+ }
+
+ return REFTABLE_IO_ERROR;
+ }
+ err = fd_read_lines(fd, namesp);
+ close(fd);
+ return err;
+}
+
+struct reftable_merged_table *
+reftable_stack_merged_table(struct reftable_stack *st)
+{
+ return st->merged;
+}
+
+static int has_name(char **names, const char *name)
+{
+ while (*names) {
+ if (!strcmp(*names, name))
+ return 1;
+ names++;
+ }
+ return 0;
+}
+
+/* Close and free the stack */
+void reftable_stack_destroy(struct reftable_stack *st)
+{
+ char **names = NULL;
+ int err = 0;
+ if (st->merged) {
+ reftable_merged_table_free(st->merged);
+ st->merged = NULL;
+ }
+
+ err = read_lines(st->list_file, &names);
+ if (err < 0) {
+ FREE_AND_NULL(names);
+ }
+
+ if (st->readers) {
+ int i = 0;
+ struct strbuf filename = STRBUF_INIT;
+ for (i = 0; i < st->readers_len; i++) {
+ const char *name = reader_name(st->readers[i]);
+ strbuf_reset(&filename);
+ if (names && !has_name(names, name)) {
+ stack_filename(&filename, st, name);
+ }
+ reftable_reader_free(st->readers[i]);
+
+ if (filename.len) {
+ /* On Windows, can only unlink after closing. */
+ unlink(filename.buf);
+ }
+ }
+ strbuf_release(&filename);
+ st->readers_len = 0;
+ FREE_AND_NULL(st->readers);
+ }
+
+ if (st->list_fd >= 0) {
+ close(st->list_fd);
+ st->list_fd = -1;
+ }
+
+ FREE_AND_NULL(st->list_file);
+ FREE_AND_NULL(st->reftable_dir);
+ reftable_free(st);
+ free_names(names);
+}
+
+static struct reftable_reader **stack_copy_readers(struct reftable_stack *st,
+ int cur_len)
+{
+ struct reftable_reader **cur = reftable_calloc(cur_len, sizeof(*cur));
+ int i = 0;
+ for (i = 0; i < cur_len; i++) {
+ cur[i] = st->readers[i];
+ }
+ return cur;
+}
+
+static int reftable_stack_reload_once(struct reftable_stack *st, char **names,
+ int reuse_open)
+{
+ size_t cur_len = !st->merged ? 0 : st->merged->stack_len;
+ struct reftable_reader **cur = stack_copy_readers(st, cur_len);
+ size_t names_len = names_length(names);
+ struct reftable_reader **new_readers =
+ reftable_calloc(names_len, sizeof(*new_readers));
+ struct reftable_table *new_tables =
+ reftable_calloc(names_len, sizeof(*new_tables));
+ size_t new_readers_len = 0;
+ struct reftable_merged_table *new_merged = NULL;
+ struct strbuf table_path = STRBUF_INIT;
+ int err = 0;
+ size_t i;
+
+ while (*names) {
+ struct reftable_reader *rd = NULL;
+ char *name = *names++;
+
+ /* this is linear; we assume compaction keeps the number of
+ tables under control so this is not quadratic. */
+ for (i = 0; reuse_open && i < cur_len; i++) {
+ if (cur[i] && 0 == strcmp(cur[i]->name, name)) {
+ rd = cur[i];
+ cur[i] = NULL;
+ break;
+ }
+ }
+
+ if (!rd) {
+ struct reftable_block_source src = { NULL };
+ stack_filename(&table_path, st, name);
+
+ err = reftable_block_source_from_file(&src,
+ table_path.buf);
+ if (err < 0)
+ goto done;
+
+ err = reftable_new_reader(&rd, &src, name);
+ if (err < 0)
+ goto done;
+ }
+
+ new_readers[new_readers_len] = rd;
+ reftable_table_from_reader(&new_tables[new_readers_len], rd);
+ new_readers_len++;
+ }
+
+ /* success! */
+ err = reftable_new_merged_table(&new_merged, new_tables,
+ new_readers_len, st->config.hash_id);
+ if (err < 0)
+ goto done;
+
+ new_tables = NULL;
+ st->readers_len = new_readers_len;
+ if (st->merged) {
+ merged_table_release(st->merged);
+ reftable_merged_table_free(st->merged);
+ }
+ if (st->readers) {
+ reftable_free(st->readers);
+ }
+ st->readers = new_readers;
+ new_readers = NULL;
+ new_readers_len = 0;
+
+ new_merged->suppress_deletions = 1;
+ st->merged = new_merged;
+ for (i = 0; i < cur_len; i++) {
+ if (cur[i]) {
+ const char *name = reader_name(cur[i]);
+ stack_filename(&table_path, st, name);
+
+ reader_close(cur[i]);
+ reftable_reader_free(cur[i]);
+
+ /* On Windows, can only unlink after closing. */
+ unlink(table_path.buf);
+ }
+ }
+
+done:
+ for (i = 0; i < new_readers_len; i++) {
+ reader_close(new_readers[i]);
+ reftable_reader_free(new_readers[i]);
+ }
+ reftable_free(new_readers);
+ reftable_free(new_tables);
+ reftable_free(cur);
+ strbuf_release(&table_path);
+ return err;
+}
+
+/* return negative if a before b. */
+static int tv_cmp(struct timeval *a, struct timeval *b)
+{
+ time_t diff = a->tv_sec - b->tv_sec;
+ int udiff = a->tv_usec - b->tv_usec;
+
+ if (diff != 0)
+ return diff;
+
+ return udiff;
+}
+
+static int reftable_stack_reload_maybe_reuse(struct reftable_stack *st,
+ int reuse_open)
+{
+ char **names = NULL, **names_after = NULL;
+ struct timeval deadline;
+ int64_t delay = 0;
+ int tries = 0, err;
+ int fd = -1;
+
+ err = gettimeofday(&deadline, NULL);
+ if (err < 0)
+ goto out;
+ deadline.tv_sec += 3;
+
+ while (1) {
+ struct timeval now;
+
+ err = gettimeofday(&now, NULL);
+ if (err < 0)
+ goto out;
+
+ /*
+ * Only look at deadlines after the first few times. This
+ * simplifies debugging in GDB.
+ */
+ tries++;
+ if (tries > 3 && tv_cmp(&now, &deadline) >= 0)
+ goto out;
+
+ fd = open(st->list_file, O_RDONLY);
+ if (fd < 0) {
+ if (errno != ENOENT) {
+ err = REFTABLE_IO_ERROR;
+ goto out;
+ }
+
+ REFTABLE_CALLOC_ARRAY(names, 1);
+ } else {
+ err = fd_read_lines(fd, &names);
+ if (err < 0)
+ goto out;
+ }
+
+ err = reftable_stack_reload_once(st, names, reuse_open);
+ if (!err)
+ break;
+ if (err != REFTABLE_NOT_EXIST_ERROR)
+ goto out;
+
+ /*
+ * REFTABLE_NOT_EXIST_ERROR can be caused by a concurrent
+ * writer. Check if there was one by checking if the name list
+ * changed.
+ */
+ err = read_lines(st->list_file, &names_after);
+ if (err < 0)
+ goto out;
+ if (names_equal(names_after, names)) {
+ err = REFTABLE_NOT_EXIST_ERROR;
+ goto out;
+ }
+
+ free_names(names);
+ names = NULL;
+ free_names(names_after);
+ names_after = NULL;
+ close(fd);
+ fd = -1;
+
+ delay = delay + (delay * rand()) / RAND_MAX + 1;
+ sleep_millisec(delay);
+ }
+
+out:
+ /*
+ * Invalidate the stat cache. It is sufficient to only close the file
+ * descriptor and keep the cached stat info because we never use the
+ * latter when the former is negative.
+ */
+ if (st->list_fd >= 0) {
+ close(st->list_fd);
+ st->list_fd = -1;
+ }
+
+ /*
+ * Cache stat information in case it provides a useful signal to us.
+ * According to POSIX, "The st_ino and st_dev fields taken together
+ * uniquely identify the file within the system." That being said,
+ * Windows is not POSIX compliant and we do not have these fields
+ * available. So the information we have there is insufficient to
+ * determine whether two file descriptors point to the same file.
+ *
+ * While we could fall back to using other signals like the file's
+ * mtime, those are not sufficient to avoid races. We thus refrain from
+ * using the stat cache on such systems and fall back to the secondary
+ * caching mechanism, which is to check whether contents of the file
+ * have changed.
+ *
+ * On other systems which are POSIX compliant we must keep the file
+ * descriptor open. This is to avoid a race condition where two
+ * processes access the reftable stack at the same point in time:
+ *
+ * 1. A reads the reftable stack and caches its stat info.
+ *
+ * 2. B updates the stack, appending a new table to "tables.list".
+ * This will both use a new inode and result in a different file
+ * size, thus invalidating A's cache in theory.
+ *
+ * 3. B decides to auto-compact the stack and merges two tables. The
+ * file size now matches what A has cached again. Furthermore, the
+ * filesystem may decide to recycle the inode number of the file
+ * we have replaced in (2) because it is not in use anymore.
+ *
+ * 4. A reloads the reftable stack. Neither the inode number nor the
+ * file size changed. If the timestamps did not change either then
+ * we think the cached copy of our stack is up-to-date.
+ *
+ * By keeping the file descriptor open the inode number cannot be
+ * recycled, mitigating the race.
+ */
+ if (!err && fd >= 0 && !fstat(fd, &st->list_st) &&
+ st->list_st.st_dev && st->list_st.st_ino) {
+ st->list_fd = fd;
+ fd = -1;
+ }
+
+ if (fd >= 0)
+ close(fd);
+ free_names(names);
+ free_names(names_after);
+ return err;
+}
+
+/* -1 = error
+ 0 = up to date
+ 1 = changed. */
+static int stack_uptodate(struct reftable_stack *st)
+{
+ char **names = NULL;
+ int err;
+ int i = 0;
+
+ /*
+ * When we have cached stat information available then we use it to
+ * verify whether the file has been rewritten.
+ *
+ * Note that we explicitly do not want to use `stat_validity_check()`
+ * and friends here because they may end up not comparing the `st_dev`
+ * and `st_ino` fields. These functions thus cannot guarantee that we
+ * indeed still have the same file.
+ */
+ if (st->list_fd >= 0) {
+ struct stat list_st;
+
+ if (stat(st->list_file, &list_st) < 0) {
+ /*
+ * It's fine for "tables.list" to not exist. In that
+ * case, we have to refresh when the loaded stack has
+ * any readers.
+ */
+ if (errno == ENOENT)
+ return !!st->readers_len;
+ return REFTABLE_IO_ERROR;
+ }
+
+ /*
+ * When "tables.list" refers to the same file we can assume
+ * that it didn't change. This is because we always use
+ * rename(3P) to update the file and never write to it
+ * directly.
+ */
+ if (st->list_st.st_dev == list_st.st_dev &&
+ st->list_st.st_ino == list_st.st_ino)
+ return 0;
+ }
+
+ err = read_lines(st->list_file, &names);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < st->readers_len; i++) {
+ if (!names[i]) {
+ err = 1;
+ goto done;
+ }
+
+ if (strcmp(st->readers[i]->name, names[i])) {
+ err = 1;
+ goto done;
+ }
+ }
+
+ if (names[st->merged->stack_len]) {
+ err = 1;
+ goto done;
+ }
+
+done:
+ free_names(names);
+ return err;
+}
+
+int reftable_stack_reload(struct reftable_stack *st)
+{
+ int err = stack_uptodate(st);
+ if (err > 0)
+ return reftable_stack_reload_maybe_reuse(st, 1);
+ return err;
+}
+
+int reftable_stack_add(struct reftable_stack *st,
+ int (*write)(struct reftable_writer *wr, void *arg),
+ void *arg)
+{
+ int err = stack_try_add(st, write, arg);
+ if (err < 0) {
+ if (err == REFTABLE_OUTDATED_ERROR) {
+ /* Ignore error return, we want to propagate
+ REFTABLE_OUTDATED_ERROR.
+ */
+ reftable_stack_reload(st);
+ }
+ return err;
+ }
+
+ return 0;
+}
+
+static void format_name(struct strbuf *dest, uint64_t min, uint64_t max)
+{
+ char buf[100];
+ uint32_t rnd = (uint32_t)git_rand();
+ snprintf(buf, sizeof(buf), "0x%012" PRIx64 "-0x%012" PRIx64 "-%08x",
+ min, max, rnd);
+ strbuf_reset(dest);
+ strbuf_addstr(dest, buf);
+}
+
+struct reftable_addition {
+ struct tempfile *lock_file;
+ struct reftable_stack *stack;
+
+ char **new_tables;
+ size_t new_tables_len, new_tables_cap;
+ uint64_t next_update_index;
+};
+
+#define REFTABLE_ADDITION_INIT {0}
+
+static int reftable_stack_init_addition(struct reftable_addition *add,
+ struct reftable_stack *st)
+{
+ struct strbuf lock_file_name = STRBUF_INIT;
+ int err = 0;
+ add->stack = st;
+
+ strbuf_addf(&lock_file_name, "%s.lock", st->list_file);
+
+ add->lock_file = create_tempfile(lock_file_name.buf);
+ if (!add->lock_file) {
+ if (errno == EEXIST) {
+ err = REFTABLE_LOCK_ERROR;
+ } else {
+ err = REFTABLE_IO_ERROR;
+ }
+ goto done;
+ }
+ if (st->config.default_permissions) {
+ if (chmod(add->lock_file->filename.buf, st->config.default_permissions) < 0) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+ }
+
+ err = stack_uptodate(st);
+ if (err < 0)
+ goto done;
+ if (err > 0) {
+ err = REFTABLE_OUTDATED_ERROR;
+ goto done;
+ }
+
+ add->next_update_index = reftable_stack_next_update_index(st);
+done:
+ if (err) {
+ reftable_addition_close(add);
+ }
+ strbuf_release(&lock_file_name);
+ return err;
+}
+
+static void reftable_addition_close(struct reftable_addition *add)
+{
+ struct strbuf nm = STRBUF_INIT;
+ size_t i;
+
+ for (i = 0; i < add->new_tables_len; i++) {
+ stack_filename(&nm, add->stack, add->new_tables[i]);
+ unlink(nm.buf);
+ reftable_free(add->new_tables[i]);
+ add->new_tables[i] = NULL;
+ }
+ reftable_free(add->new_tables);
+ add->new_tables = NULL;
+ add->new_tables_len = 0;
+ add->new_tables_cap = 0;
+
+ delete_tempfile(&add->lock_file);
+ strbuf_release(&nm);
+}
+
+void reftable_addition_destroy(struct reftable_addition *add)
+{
+ if (!add) {
+ return;
+ }
+ reftable_addition_close(add);
+ reftable_free(add);
+}
+
+int reftable_addition_commit(struct reftable_addition *add)
+{
+ struct strbuf table_list = STRBUF_INIT;
+ int lock_file_fd = get_tempfile_fd(add->lock_file);
+ int err = 0;
+ size_t i;
+
+ if (add->new_tables_len == 0)
+ goto done;
+
+ for (i = 0; i < add->stack->merged->stack_len; i++) {
+ strbuf_addstr(&table_list, add->stack->readers[i]->name);
+ strbuf_addstr(&table_list, "\n");
+ }
+ for (i = 0; i < add->new_tables_len; i++) {
+ strbuf_addstr(&table_list, add->new_tables[i]);
+ strbuf_addstr(&table_list, "\n");
+ }
+
+ err = write_in_full(lock_file_fd, table_list.buf, table_list.len);
+ strbuf_release(&table_list);
+ if (err < 0) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+
+ fsync_component_or_die(FSYNC_COMPONENT_REFERENCE, lock_file_fd,
+ get_tempfile_path(add->lock_file));
+
+ err = rename_tempfile(&add->lock_file, add->stack->list_file);
+ if (err < 0) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+
+ /* success, no more state to clean up. */
+ for (i = 0; i < add->new_tables_len; i++)
+ reftable_free(add->new_tables[i]);
+ reftable_free(add->new_tables);
+ add->new_tables = NULL;
+ add->new_tables_len = 0;
+ add->new_tables_cap = 0;
+
+ err = reftable_stack_reload_maybe_reuse(add->stack, 1);
+ if (err)
+ goto done;
+
+ if (!add->stack->config.disable_auto_compact) {
+ /*
+ * Auto-compact the stack to keep the number of tables in
+ * control. It is possible that a concurrent writer is already
+ * trying to compact parts of the stack, which would lead to a
+ * `REFTABLE_LOCK_ERROR` because parts of the stack are locked
+ * already. This is a benign error though, so we ignore it.
+ */
+ err = reftable_stack_auto_compact(add->stack);
+ if (err < 0 && err != REFTABLE_LOCK_ERROR)
+ goto done;
+ err = 0;
+ }
+
+done:
+ reftable_addition_close(add);
+ return err;
+}
+
+int reftable_stack_new_addition(struct reftable_addition **dest,
+ struct reftable_stack *st)
+{
+ int err = 0;
+ struct reftable_addition empty = REFTABLE_ADDITION_INIT;
+ REFTABLE_CALLOC_ARRAY(*dest, 1);
+ **dest = empty;
+ err = reftable_stack_init_addition(*dest, st);
+ if (err) {
+ reftable_free(*dest);
+ *dest = NULL;
+ }
+ return err;
+}
+
+static int stack_try_add(struct reftable_stack *st,
+ int (*write_table)(struct reftable_writer *wr,
+ void *arg),
+ void *arg)
+{
+ struct reftable_addition add = REFTABLE_ADDITION_INIT;
+ int err = reftable_stack_init_addition(&add, st);
+ if (err < 0)
+ goto done;
+
+ err = reftable_addition_add(&add, write_table, arg);
+ if (err < 0)
+ goto done;
+
+ err = reftable_addition_commit(&add);
+done:
+ reftable_addition_close(&add);
+ return err;
+}
+
+int reftable_addition_add(struct reftable_addition *add,
+ int (*write_table)(struct reftable_writer *wr,
+ void *arg),
+ void *arg)
+{
+ struct strbuf temp_tab_file_name = STRBUF_INIT;
+ struct strbuf tab_file_name = STRBUF_INIT;
+ struct strbuf next_name = STRBUF_INIT;
+ struct reftable_writer *wr = NULL;
+ struct tempfile *tab_file = NULL;
+ int err = 0;
+ int tab_fd;
+
+ strbuf_reset(&next_name);
+ format_name(&next_name, add->next_update_index, add->next_update_index);
+
+ stack_filename(&temp_tab_file_name, add->stack, next_name.buf);
+ strbuf_addstr(&temp_tab_file_name, ".temp.XXXXXX");
+
+ tab_file = mks_tempfile(temp_tab_file_name.buf);
+ if (!tab_file) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+ if (add->stack->config.default_permissions) {
+ if (chmod(get_tempfile_path(tab_file),
+ add->stack->config.default_permissions)) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+ }
+ tab_fd = get_tempfile_fd(tab_file);
+
+ wr = reftable_new_writer(reftable_fd_write, reftable_fd_flush, &tab_fd,
+ &add->stack->config);
+ err = write_table(wr, arg);
+ if (err < 0)
+ goto done;
+
+ err = reftable_writer_close(wr);
+ if (err == REFTABLE_EMPTY_TABLE_ERROR) {
+ err = 0;
+ goto done;
+ }
+ if (err < 0)
+ goto done;
+
+ err = close_tempfile_gently(tab_file);
+ if (err < 0) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+
+ err = stack_check_addition(add->stack, get_tempfile_path(tab_file));
+ if (err < 0)
+ goto done;
+
+ if (wr->min_update_index < add->next_update_index) {
+ err = REFTABLE_API_ERROR;
+ goto done;
+ }
+
+ format_name(&next_name, wr->min_update_index, wr->max_update_index);
+ strbuf_addstr(&next_name, ".ref");
+ stack_filename(&tab_file_name, add->stack, next_name.buf);
+
+ /*
+ On windows, this relies on rand() picking a unique destination name.
+ Maybe we should do retry loop as well?
+ */
+ err = rename_tempfile(&tab_file, tab_file_name.buf);
+ if (err < 0) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+
+ REFTABLE_ALLOC_GROW(add->new_tables, add->new_tables_len + 1,
+ add->new_tables_cap);
+ add->new_tables[add->new_tables_len++] = strbuf_detach(&next_name, NULL);
+done:
+ delete_tempfile(&tab_file);
+ strbuf_release(&temp_tab_file_name);
+ strbuf_release(&tab_file_name);
+ strbuf_release(&next_name);
+ reftable_writer_free(wr);
+ return err;
+}
+
+uint64_t reftable_stack_next_update_index(struct reftable_stack *st)
+{
+ int sz = st->merged->stack_len;
+ if (sz > 0)
+ return reftable_reader_max_update_index(st->readers[sz - 1]) +
+ 1;
+ return 1;
+}
+
+static int stack_compact_locked(struct reftable_stack *st,
+ size_t first, size_t last,
+ struct reftable_log_expiry_config *config,
+ struct tempfile **tab_file_out)
+{
+ struct strbuf next_name = STRBUF_INIT;
+ struct strbuf tab_file_path = STRBUF_INIT;
+ struct reftable_writer *wr = NULL;
+ struct tempfile *tab_file;
+ int tab_fd, err = 0;
+
+ format_name(&next_name,
+ reftable_reader_min_update_index(st->readers[first]),
+ reftable_reader_max_update_index(st->readers[last]));
+ stack_filename(&tab_file_path, st, next_name.buf);
+ strbuf_addstr(&tab_file_path, ".temp.XXXXXX");
+
+ tab_file = mks_tempfile(tab_file_path.buf);
+ if (!tab_file) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+ tab_fd = get_tempfile_fd(tab_file);
+
+ if (st->config.default_permissions &&
+ chmod(get_tempfile_path(tab_file), st->config.default_permissions) < 0) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+
+ wr = reftable_new_writer(reftable_fd_write, reftable_fd_flush,
+ &tab_fd, &st->config);
+ err = stack_write_compact(st, wr, first, last, config);
+ if (err < 0)
+ goto done;
+
+ err = reftable_writer_close(wr);
+ if (err < 0)
+ goto done;
+
+ err = close_tempfile_gently(tab_file);
+ if (err < 0)
+ goto done;
+
+ *tab_file_out = tab_file;
+ tab_file = NULL;
+
+done:
+ delete_tempfile(&tab_file);
+ reftable_writer_free(wr);
+ strbuf_release(&next_name);
+ strbuf_release(&tab_file_path);
+ return err;
+}
+
+static int stack_write_compact(struct reftable_stack *st,
+ struct reftable_writer *wr,
+ size_t first, size_t last,
+ struct reftable_log_expiry_config *config)
+{
+ size_t subtabs_len = last - first + 1;
+ struct reftable_table *subtabs = reftable_calloc(
+ last - first + 1, sizeof(*subtabs));
+ struct reftable_merged_table *mt = NULL;
+ struct reftable_iterator it = { NULL };
+ struct reftable_ref_record ref = { NULL };
+ struct reftable_log_record log = { NULL };
+ uint64_t entries = 0;
+ int err = 0;
+
+ for (size_t i = first, j = 0; i <= last; i++) {
+ struct reftable_reader *t = st->readers[i];
+ reftable_table_from_reader(&subtabs[j++], t);
+ st->stats.bytes += t->size;
+ }
+ reftable_writer_set_limits(wr, st->readers[first]->min_update_index,
+ st->readers[last]->max_update_index);
+
+ err = reftable_new_merged_table(&mt, subtabs, subtabs_len,
+ st->config.hash_id);
+ if (err < 0) {
+ reftable_free(subtabs);
+ goto done;
+ }
+
+ err = reftable_merged_table_seek_ref(mt, &it, "");
+ if (err < 0)
+ goto done;
+
+ while (1) {
+ err = reftable_iterator_next_ref(&it, &ref);
+ if (err > 0) {
+ err = 0;
+ break;
+ }
+ if (err < 0)
+ goto done;
+
+ if (first == 0 && reftable_ref_record_is_deletion(&ref)) {
+ continue;
+ }
+
+ err = reftable_writer_add_ref(wr, &ref);
+ if (err < 0)
+ goto done;
+ entries++;
+ }
+ reftable_iterator_destroy(&it);
+
+ err = reftable_merged_table_seek_log(mt, &it, "");
+ if (err < 0)
+ goto done;
+
+ while (1) {
+ err = reftable_iterator_next_log(&it, &log);
+ if (err > 0) {
+ err = 0;
+ break;
+ }
+ if (err < 0)
+ goto done;
+ if (first == 0 && reftable_log_record_is_deletion(&log)) {
+ continue;
+ }
+
+ if (config && config->min_update_index > 0 &&
+ log.update_index < config->min_update_index) {
+ continue;
+ }
+
+ if (config && config->time > 0 &&
+ log.value.update.time < config->time) {
+ continue;
+ }
+
+ err = reftable_writer_add_log(wr, &log);
+ if (err < 0)
+ goto done;
+ entries++;
+ }
+
+done:
+ reftable_iterator_destroy(&it);
+ if (mt) {
+ merged_table_release(mt);
+ reftable_merged_table_free(mt);
+ }
+ reftable_ref_record_release(&ref);
+ reftable_log_record_release(&log);
+ st->stats.entries_written += entries;
+ return err;
+}
+
+/*
+ * Compact all tables in the range `[first, last)` into a single new table.
+ *
+ * This function returns `0` on success or a code `< 0` on failure. When the
+ * stack or any of the tables in the specified range are already locked then
+ * this function returns `REFTABLE_LOCK_ERROR`. This is a benign error that
+ * callers can either ignore, or they may choose to retry compaction after some
+ * amount of time.
+ */
+static int stack_compact_range(struct reftable_stack *st,
+ size_t first, size_t last,
+ struct reftable_log_expiry_config *expiry)
+{
+ struct strbuf tables_list_buf = STRBUF_INIT;
+ struct strbuf new_table_name = STRBUF_INIT;
+ struct strbuf new_table_path = STRBUF_INIT;
+ struct strbuf table_name = STRBUF_INIT;
+ struct lock_file tables_list_lock = LOCK_INIT;
+ struct lock_file *table_locks = NULL;
+ struct tempfile *new_table = NULL;
+ int is_empty_table = 0, err = 0;
+ size_t i;
+
+ if (first > last || (!expiry && first == last)) {
+ err = 0;
+ goto done;
+ }
+
+ st->stats.attempts++;
+
+ /*
+ * Hold the lock so that we can read "tables.list" and lock all tables
+ * which are part of the user-specified range.
+ */
+ err = hold_lock_file_for_update(&tables_list_lock, st->list_file,
+ LOCK_NO_DEREF);
+ if (err < 0) {
+ if (errno == EEXIST)
+ err = REFTABLE_LOCK_ERROR;
+ else
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+
+ err = stack_uptodate(st);
+ if (err)
+ goto done;
+
+ /*
+ * Lock all tables in the user-provided range. This is the slice of our
+ * stack which we'll compact.
+ */
+ REFTABLE_CALLOC_ARRAY(table_locks, last - first + 1);
+ for (i = first; i <= last; i++) {
+ stack_filename(&table_name, st, reader_name(st->readers[i]));
+
+ err = hold_lock_file_for_update(&table_locks[i - first],
+ table_name.buf, LOCK_NO_DEREF);
+ if (err < 0) {
+ if (errno == EEXIST)
+ err = REFTABLE_LOCK_ERROR;
+ else
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+
+ /*
+ * We need to close the lockfiles as we might otherwise easily
+ * run into file descriptor exhaustion when we compress a lot
+ * of tables.
+ */
+ err = close_lock_file_gently(&table_locks[i - first]);
+ if (err < 0) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+ }
+
+ /*
+ * We have locked all tables in our range and can thus release the
+ * "tables.list" lock while compacting the locked tables. This allows
+ * concurrent updates to the stack to proceed.
+ */
+ err = rollback_lock_file(&tables_list_lock);
+ if (err < 0) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+
+ /*
+ * Compact the now-locked tables into a new table. Note that compacting
+ * these tables may end up with an empty new table in case tombstones
+ * end up cancelling out all refs in that range.
+ */
+ err = stack_compact_locked(st, first, last, expiry, &new_table);
+ if (err < 0) {
+ if (err != REFTABLE_EMPTY_TABLE_ERROR)
+ goto done;
+ is_empty_table = 1;
+ }
+
+ /*
+ * Now that we have written the new, compacted table we need to re-lock
+ * "tables.list". We'll then replace the compacted range of tables with
+ * the new table.
+ */
+ err = hold_lock_file_for_update(&tables_list_lock, st->list_file,
+ LOCK_NO_DEREF);
+ if (err < 0) {
+ if (errno == EEXIST)
+ err = REFTABLE_LOCK_ERROR;
+ else
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+
+ if (st->config.default_permissions) {
+ if (chmod(get_lock_file_path(&tables_list_lock),
+ st->config.default_permissions) < 0) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+ }
+
+ /*
+ * If the resulting compacted table is not empty, then we need to move
+ * it into place now.
+ */
+ if (!is_empty_table) {
+ format_name(&new_table_name, st->readers[first]->min_update_index,
+ st->readers[last]->max_update_index);
+ strbuf_addstr(&new_table_name, ".ref");
+ stack_filename(&new_table_path, st, new_table_name.buf);
+
+ err = rename_tempfile(&new_table, new_table_path.buf);
+ if (err < 0) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+ }
+
+ /*
+ * Write the new "tables.list" contents with the compacted table we
+ * have just written. In case the compacted table became empty we
+ * simply skip writing it.
+ */
+ for (i = 0; i < first; i++)
+ strbuf_addf(&tables_list_buf, "%s\n", st->readers[i]->name);
+ if (!is_empty_table)
+ strbuf_addf(&tables_list_buf, "%s\n", new_table_name.buf);
+ for (i = last + 1; i < st->merged->stack_len; i++)
+ strbuf_addf(&tables_list_buf, "%s\n", st->readers[i]->name);
+
+ err = write_in_full(get_lock_file_fd(&tables_list_lock),
+ tables_list_buf.buf, tables_list_buf.len);
+ if (err < 0) {
+ err = REFTABLE_IO_ERROR;
+ unlink(new_table_path.buf);
+ goto done;
+ }
+
+ err = fsync_component(FSYNC_COMPONENT_REFERENCE, get_lock_file_fd(&tables_list_lock));
+ if (err < 0) {
+ err = REFTABLE_IO_ERROR;
+ unlink(new_table_path.buf);
+ goto done;
+ }
+
+ err = commit_lock_file(&tables_list_lock);
+ if (err < 0) {
+ err = REFTABLE_IO_ERROR;
+ unlink(new_table_path.buf);
+ goto done;
+ }
+
+ /*
+ * Reload the stack before deleting the compacted tables. We can only
+ * delete the files after we closed them on Windows, so this needs to
+ * happen first.
+ */
+ err = reftable_stack_reload_maybe_reuse(st, first < last);
+ if (err < 0)
+ goto done;
+
+ /*
+ * Delete the old tables. They may still be in use by concurrent
+ * readers, so it is expected that unlinking tables may fail.
+ */
+ for (i = first; i <= last; i++) {
+ struct lock_file *table_lock = &table_locks[i - first];
+ char *table_path = get_locked_file_path(table_lock);
+ unlink(table_path);
+ free(table_path);
+ }
+
+done:
+ rollback_lock_file(&tables_list_lock);
+ for (i = first; table_locks && i <= last; i++)
+ rollback_lock_file(&table_locks[i - first]);
+ reftable_free(table_locks);
+
+ delete_tempfile(&new_table);
+ strbuf_release(&new_table_name);
+ strbuf_release(&new_table_path);
+
+ strbuf_release(&tables_list_buf);
+ strbuf_release(&table_name);
+ return err;
+}
+
+int reftable_stack_compact_all(struct reftable_stack *st,
+ struct reftable_log_expiry_config *config)
+{
+ return stack_compact_range(st, 0, st->merged->stack_len ?
+ st->merged->stack_len - 1 : 0, config);
+}
+
+static int stack_compact_range_stats(struct reftable_stack *st,
+ size_t first, size_t last,
+ struct reftable_log_expiry_config *config)
+{
+ int err = stack_compact_range(st, first, last, config);
+ if (err == REFTABLE_LOCK_ERROR)
+ st->stats.failures++;
+ return err;
+}
+
+static int segment_size(struct segment *s)
+{
+ return s->end - s->start;
+}
+
+struct segment suggest_compaction_segment(uint64_t *sizes, size_t n)
+{
+ struct segment seg = { 0 };
+ uint64_t bytes;
+ size_t i;
+
+ /*
+ * If there are no tables or only a single one then we don't have to
+ * compact anything. The sequence is geometric by definition already.
+ */
+ if (n <= 1)
+ return seg;
+
+ /*
+ * Find the ending table of the compaction segment needed to restore the
+ * geometric sequence. Note that the segment end is exclusive.
+ *
+ * To do so, we iterate backwards starting from the most recent table
+ * until a valid segment end is found. If the preceding table is smaller
+ * than the current table multiplied by the geometric factor (2), the
+ * compaction segment end has been identified.
+ *
+ * Tables after the ending point are not added to the byte count because
+ * they are already valid members of the geometric sequence. Due to the
+ * properties of a geometric sequence, it is not possible for the sum of
+ * these tables to exceed the value of the ending point table.
+ *
+ * Example table size sequence requiring no compaction:
+ * 64, 32, 16, 8, 4, 2, 1
+ *
+ * Example table size sequence where compaction segment end is set to
+ * the last table. Since the segment end is exclusive, the last table is
+ * excluded during subsequent compaction and the table with size 3 is
+ * the final table included:
+ * 64, 32, 16, 8, 4, 3, 1
+ */
+ for (i = n - 1; i > 0; i--) {
+ if (sizes[i - 1] < sizes[i] * 2) {
+ seg.end = i + 1;
+ bytes = sizes[i];
+ break;
+ }
+ }
+
+ /*
+ * Find the starting table of the compaction segment by iterating
+ * through the remaining tables and keeping track of the accumulated
+ * size of all tables seen from the segment end table. The previous
+ * table is compared to the accumulated size because the tables from the
+ * segment end are merged backwards recursively.
+ *
+ * Note that we keep iterating even after we have found the first
+ * starting point. This is because there may be tables in the stack
+ * preceding that first starting point which violate the geometric
+ * sequence.
+ *
+ * Example compaction segment start set to table with size 32:
+ * 128, 32, 16, 8, 4, 3, 1
+ */
+ for (; i > 0; i--) {
+ uint64_t curr = bytes;
+ bytes += sizes[i - 1];
+
+ if (sizes[i - 1] < curr * 2) {
+ seg.start = i - 1;
+ seg.bytes = bytes;
+ }
+ }
+
+ return seg;
+}
+
+static uint64_t *stack_table_sizes_for_compaction(struct reftable_stack *st)
+{
+ uint64_t *sizes =
+ reftable_calloc(st->merged->stack_len, sizeof(*sizes));
+ int version = (st->config.hash_id == GIT_SHA1_FORMAT_ID) ? 1 : 2;
+ int overhead = header_size(version) - 1;
+ int i = 0;
+ for (i = 0; i < st->merged->stack_len; i++) {
+ sizes[i] = st->readers[i]->size - overhead;
+ }
+ return sizes;
+}
+
+int reftable_stack_auto_compact(struct reftable_stack *st)
+{
+ uint64_t *sizes = stack_table_sizes_for_compaction(st);
+ struct segment seg =
+ suggest_compaction_segment(sizes, st->merged->stack_len);
+ reftable_free(sizes);
+ if (segment_size(&seg) > 0)
+ return stack_compact_range_stats(st, seg.start, seg.end - 1,
+ NULL);
+
+ return 0;
+}
+
+struct reftable_compaction_stats *
+reftable_stack_compaction_stats(struct reftable_stack *st)
+{
+ return &st->stats;
+}
+
+int reftable_stack_read_ref(struct reftable_stack *st, const char *refname,
+ struct reftable_ref_record *ref)
+{
+ struct reftable_table tab = { NULL };
+ reftable_table_from_merged_table(&tab, reftable_stack_merged_table(st));
+ return reftable_table_read_ref(&tab, refname, ref);
+}
+
+int reftable_stack_read_log(struct reftable_stack *st, const char *refname,
+ struct reftable_log_record *log)
+{
+ struct reftable_iterator it = { NULL };
+ struct reftable_merged_table *mt = reftable_stack_merged_table(st);
+ int err = reftable_merged_table_seek_log(mt, &it, refname);
+ if (err)
+ goto done;
+
+ err = reftable_iterator_next_log(&it, log);
+ if (err)
+ goto done;
+
+ if (strcmp(log->refname, refname) ||
+ reftable_log_record_is_deletion(log)) {
+ err = 1;
+ goto done;
+ }
+
+done:
+ if (err) {
+ reftable_log_record_release(log);
+ }
+ reftable_iterator_destroy(&it);
+ return err;
+}
+
+static int stack_check_addition(struct reftable_stack *st,
+ const char *new_tab_name)
+{
+ int err = 0;
+ struct reftable_block_source src = { NULL };
+ struct reftable_reader *rd = NULL;
+ struct reftable_table tab = { NULL };
+ struct reftable_ref_record *refs = NULL;
+ struct reftable_iterator it = { NULL };
+ int cap = 0;
+ int len = 0;
+ int i = 0;
+
+ if (st->config.skip_name_check)
+ return 0;
+
+ err = reftable_block_source_from_file(&src, new_tab_name);
+ if (err < 0)
+ goto done;
+
+ err = reftable_new_reader(&rd, &src, new_tab_name);
+ if (err < 0)
+ goto done;
+
+ err = reftable_reader_seek_ref(rd, &it, "");
+ if (err > 0) {
+ err = 0;
+ goto done;
+ }
+ if (err < 0)
+ goto done;
+
+ while (1) {
+ struct reftable_ref_record ref = { NULL };
+ err = reftable_iterator_next_ref(&it, &ref);
+ if (err > 0)
+ break;
+ if (err < 0)
+ goto done;
+
+ REFTABLE_ALLOC_GROW(refs, len + 1, cap);
+ refs[len++] = ref;
+ }
+
+ reftable_table_from_merged_table(&tab, reftable_stack_merged_table(st));
+
+ err = validate_ref_record_addition(tab, refs, len);
+
+done:
+ for (i = 0; i < len; i++) {
+ reftable_ref_record_release(&refs[i]);
+ }
+
+ free(refs);
+ reftable_iterator_destroy(&it);
+ reftable_reader_free(rd);
+ return err;
+}
+
+static int is_table_name(const char *s)
+{
+ const char *dot = strrchr(s, '.');
+ return dot && !strcmp(dot, ".ref");
+}
+
+static void remove_maybe_stale_table(struct reftable_stack *st, uint64_t max,
+ const char *name)
+{
+ int err = 0;
+ uint64_t update_idx = 0;
+ struct reftable_block_source src = { NULL };
+ struct reftable_reader *rd = NULL;
+ struct strbuf table_path = STRBUF_INIT;
+ stack_filename(&table_path, st, name);
+
+ err = reftable_block_source_from_file(&src, table_path.buf);
+ if (err < 0)
+ goto done;
+
+ err = reftable_new_reader(&rd, &src, name);
+ if (err < 0)
+ goto done;
+
+ update_idx = reftable_reader_max_update_index(rd);
+ reftable_reader_free(rd);
+
+ if (update_idx <= max) {
+ unlink(table_path.buf);
+ }
+done:
+ strbuf_release(&table_path);
+}
+
+static int reftable_stack_clean_locked(struct reftable_stack *st)
+{
+ uint64_t max = reftable_merged_table_max_update_index(
+ reftable_stack_merged_table(st));
+ DIR *dir = opendir(st->reftable_dir);
+ struct dirent *d = NULL;
+ if (!dir) {
+ return REFTABLE_IO_ERROR;
+ }
+
+ while ((d = readdir(dir))) {
+ int i = 0;
+ int found = 0;
+ if (!is_table_name(d->d_name))
+ continue;
+
+ for (i = 0; !found && i < st->readers_len; i++) {
+ found = !strcmp(reader_name(st->readers[i]), d->d_name);
+ }
+ if (found)
+ continue;
+
+ remove_maybe_stale_table(st, max, d->d_name);
+ }
+
+ closedir(dir);
+ return 0;
+}
+
+int reftable_stack_clean(struct reftable_stack *st)
+{
+ struct reftable_addition *add = NULL;
+ int err = reftable_stack_new_addition(&add, st);
+ if (err < 0) {
+ goto done;
+ }
+
+ err = reftable_stack_reload(st);
+ if (err < 0) {
+ goto done;
+ }
+
+ err = reftable_stack_clean_locked(st);
+
+done:
+ reftable_addition_destroy(add);
+ return err;
+}
+
+int reftable_stack_print_directory(const char *stackdir, uint32_t hash_id)
+{
+ struct reftable_stack *stack = NULL;
+ struct reftable_write_options cfg = { .hash_id = hash_id };
+ struct reftable_merged_table *merged = NULL;
+ struct reftable_table table = { NULL };
+
+ int err = reftable_new_stack(&stack, stackdir, cfg);
+ if (err < 0)
+ goto done;
+
+ merged = reftable_stack_merged_table(stack);
+ reftable_table_from_merged_table(&table, merged);
+ err = reftable_table_print(&table);
+done:
+ if (stack)
+ reftable_stack_destroy(stack);
+ return err;
+}
diff --git a/reftable/stack.h b/reftable/stack.h
new file mode 100644
index 0000000..d43efa4
--- /dev/null
+++ b/reftable/stack.h
@@ -0,0 +1,40 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef STACK_H
+#define STACK_H
+
+#include "system.h"
+#include "reftable-writer.h"
+#include "reftable-stack.h"
+
+struct reftable_stack {
+ struct stat list_st;
+ char *list_file;
+ int list_fd;
+
+ char *reftable_dir;
+
+ struct reftable_write_options config;
+
+ struct reftable_reader **readers;
+ size_t readers_len;
+ struct reftable_merged_table *merged;
+ struct reftable_compaction_stats stats;
+};
+
+int read_lines(const char *filename, char ***lines);
+
+struct segment {
+ size_t start, end;
+ uint64_t bytes;
+};
+
+struct segment suggest_compaction_segment(uint64_t *sizes, size_t n);
+
+#endif
diff --git a/reftable/stack_test.c b/reftable/stack_test.c
new file mode 100644
index 0000000..1df3ffc
--- /dev/null
+++ b/reftable/stack_test.c
@@ -0,0 +1,1112 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "stack.h"
+
+#include "system.h"
+
+#include "reftable-reader.h"
+#include "merged.h"
+#include "basics.h"
+#include "record.h"
+#include "test_framework.h"
+#include "reftable-tests.h"
+#include "reader.h"
+
+#include <sys/types.h>
+#include <dirent.h>
+
+static void clear_dir(const char *dirname)
+{
+ struct strbuf path = STRBUF_INIT;
+ strbuf_addstr(&path, dirname);
+ remove_dir_recursively(&path, 0);
+ strbuf_release(&path);
+}
+
+static int count_dir_entries(const char *dirname)
+{
+ DIR *dir = opendir(dirname);
+ int len = 0;
+ struct dirent *d;
+ if (!dir)
+ return 0;
+
+ while ((d = readdir(dir))) {
+ /*
+ * Besides skipping over "." and "..", we also need to
+ * skip over other files that have a leading ".". This
+ * is due to behaviour of NFS, which will rename files
+ * to ".nfs*" to emulate delete-on-last-close.
+ *
+ * In any case this should be fine as the reftable
+ * library will never write files with leading dots
+ * anyway.
+ */
+ if (starts_with(d->d_name, "."))
+ continue;
+ len++;
+ }
+ closedir(dir);
+ return len;
+}
+
+/*
+ * Work linenumber into the tempdir, so we can see which tests forget to
+ * cleanup.
+ */
+static char *get_tmp_template(int linenumber)
+{
+ const char *tmp = getenv("TMPDIR");
+ static char template[1024];
+ snprintf(template, sizeof(template) - 1, "%s/stack_test-%d.XXXXXX",
+ tmp ? tmp : "/tmp", linenumber);
+ return template;
+}
+
+static char *get_tmp_dir(int linenumber)
+{
+ char *dir = get_tmp_template(linenumber);
+ EXPECT(mkdtemp(dir));
+ return dir;
+}
+
+static void test_read_file(void)
+{
+ char *fn = get_tmp_template(__LINE__);
+ int fd = mkstemp(fn);
+ char out[1024] = "line1\n\nline2\nline3";
+ int n, err;
+ char **names = NULL;
+ char *want[] = { "line1", "line2", "line3" };
+ int i = 0;
+
+ EXPECT(fd > 0);
+ n = write_in_full(fd, out, strlen(out));
+ EXPECT(n == strlen(out));
+ err = close(fd);
+ EXPECT(err >= 0);
+
+ err = read_lines(fn, &names);
+ EXPECT_ERR(err);
+
+ for (i = 0; names[i]; i++) {
+ EXPECT(0 == strcmp(want[i], names[i]));
+ }
+ free_names(names);
+ (void) remove(fn);
+}
+
+static void test_parse_names(void)
+{
+ char buf[] = "line\n";
+ char **names = NULL;
+ parse_names(buf, strlen(buf), &names);
+
+ EXPECT(NULL != names[0]);
+ EXPECT(0 == strcmp(names[0], "line"));
+ EXPECT(NULL == names[1]);
+ free_names(names);
+}
+
+static void test_names_equal(void)
+{
+ char *a[] = { "a", "b", "c", NULL };
+ char *b[] = { "a", "b", "d", NULL };
+ char *c[] = { "a", "b", NULL };
+
+ EXPECT(names_equal(a, a));
+ EXPECT(!names_equal(a, b));
+ EXPECT(!names_equal(a, c));
+}
+
+static int write_test_ref(struct reftable_writer *wr, void *arg)
+{
+ struct reftable_ref_record *ref = arg;
+ reftable_writer_set_limits(wr, ref->update_index, ref->update_index);
+ return reftable_writer_add_ref(wr, ref);
+}
+
+struct write_log_arg {
+ struct reftable_log_record *log;
+ uint64_t update_index;
+};
+
+static int write_test_log(struct reftable_writer *wr, void *arg)
+{
+ struct write_log_arg *wla = arg;
+
+ reftable_writer_set_limits(wr, wla->update_index, wla->update_index);
+ return reftable_writer_add_log(wr, wla->log);
+}
+
+static void test_reftable_stack_add_one(void)
+{
+ char *dir = get_tmp_dir(__LINE__);
+ struct strbuf scratch = STRBUF_INIT;
+ int mask = umask(002);
+ struct reftable_write_options cfg = {
+ .default_permissions = 0660,
+ };
+ struct reftable_stack *st = NULL;
+ int err;
+ struct reftable_ref_record ref = {
+ .refname = "HEAD",
+ .update_index = 1,
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = "master",
+ };
+ struct reftable_ref_record dest = { NULL };
+ struct stat stat_result = { 0 };
+ err = reftable_new_stack(&st, dir, cfg);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_add(st, &write_test_ref, &ref);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_read_ref(st, ref.refname, &dest);
+ EXPECT_ERR(err);
+ EXPECT(0 == strcmp("master", dest.value.symref));
+ EXPECT(st->readers_len > 0);
+
+ printf("testing print functionality:\n");
+ err = reftable_stack_print_directory(dir, GIT_SHA1_FORMAT_ID);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_print_directory(dir, GIT_SHA256_FORMAT_ID);
+ EXPECT(err == REFTABLE_FORMAT_ERROR);
+
+#ifndef GIT_WINDOWS_NATIVE
+ strbuf_addstr(&scratch, dir);
+ strbuf_addstr(&scratch, "/tables.list");
+ err = stat(scratch.buf, &stat_result);
+ EXPECT(!err);
+ EXPECT((stat_result.st_mode & 0777) == cfg.default_permissions);
+
+ strbuf_reset(&scratch);
+ strbuf_addstr(&scratch, dir);
+ strbuf_addstr(&scratch, "/");
+ /* do not try at home; not an external API for reftable. */
+ strbuf_addstr(&scratch, st->readers[0]->name);
+ err = stat(scratch.buf, &stat_result);
+ EXPECT(!err);
+ EXPECT((stat_result.st_mode & 0777) == cfg.default_permissions);
+#else
+ (void) stat_result;
+#endif
+
+ reftable_ref_record_release(&dest);
+ reftable_stack_destroy(st);
+ strbuf_release(&scratch);
+ clear_dir(dir);
+ umask(mask);
+}
+
+static void test_reftable_stack_uptodate(void)
+{
+ struct reftable_write_options cfg = { 0 };
+ struct reftable_stack *st1 = NULL;
+ struct reftable_stack *st2 = NULL;
+ char *dir = get_tmp_dir(__LINE__);
+
+ int err;
+ struct reftable_ref_record ref1 = {
+ .refname = "HEAD",
+ .update_index = 1,
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = "master",
+ };
+ struct reftable_ref_record ref2 = {
+ .refname = "branch2",
+ .update_index = 2,
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = "master",
+ };
+
+
+ /* simulate multi-process access to the same stack
+ by creating two stacks for the same directory.
+ */
+ err = reftable_new_stack(&st1, dir, cfg);
+ EXPECT_ERR(err);
+
+ err = reftable_new_stack(&st2, dir, cfg);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_add(st1, &write_test_ref, &ref1);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_add(st2, &write_test_ref, &ref2);
+ EXPECT(err == REFTABLE_OUTDATED_ERROR);
+
+ err = reftable_stack_reload(st2);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_add(st2, &write_test_ref, &ref2);
+ EXPECT_ERR(err);
+ reftable_stack_destroy(st1);
+ reftable_stack_destroy(st2);
+ clear_dir(dir);
+}
+
+static void test_reftable_stack_transaction_api(void)
+{
+ char *dir = get_tmp_dir(__LINE__);
+
+ struct reftable_write_options cfg = { 0 };
+ struct reftable_stack *st = NULL;
+ int err;
+ struct reftable_addition *add = NULL;
+
+ struct reftable_ref_record ref = {
+ .refname = "HEAD",
+ .update_index = 1,
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = "master",
+ };
+ struct reftable_ref_record dest = { NULL };
+
+
+ err = reftable_new_stack(&st, dir, cfg);
+ EXPECT_ERR(err);
+
+ reftable_addition_destroy(add);
+
+ err = reftable_stack_new_addition(&add, st);
+ EXPECT_ERR(err);
+
+ err = reftable_addition_add(add, &write_test_ref, &ref);
+ EXPECT_ERR(err);
+
+ err = reftable_addition_commit(add);
+ EXPECT_ERR(err);
+
+ reftable_addition_destroy(add);
+
+ err = reftable_stack_read_ref(st, ref.refname, &dest);
+ EXPECT_ERR(err);
+ EXPECT(REFTABLE_REF_SYMREF == dest.value_type);
+ EXPECT(0 == strcmp("master", dest.value.symref));
+
+ reftable_ref_record_release(&dest);
+ reftable_stack_destroy(st);
+ clear_dir(dir);
+}
+
+static void test_reftable_stack_transaction_api_performs_auto_compaction(void)
+{
+ char *dir = get_tmp_dir(__LINE__);
+ struct reftable_write_options cfg = {0};
+ struct reftable_addition *add = NULL;
+ struct reftable_stack *st = NULL;
+ int i, n = 20, err;
+
+ err = reftable_new_stack(&st, dir, cfg);
+ EXPECT_ERR(err);
+
+ for (i = 0; i <= n; i++) {
+ struct reftable_ref_record ref = {
+ .update_index = reftable_stack_next_update_index(st),
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = "master",
+ };
+ char name[100];
+
+ snprintf(name, sizeof(name), "branch%04d", i);
+ ref.refname = name;
+
+ /*
+ * Disable auto-compaction for all but the last runs. Like this
+ * we can ensure that we indeed honor this setting and have
+ * better control over when exactly auto compaction runs.
+ */
+ st->config.disable_auto_compact = i != n;
+
+ err = reftable_stack_new_addition(&add, st);
+ EXPECT_ERR(err);
+
+ err = reftable_addition_add(add, &write_test_ref, &ref);
+ EXPECT_ERR(err);
+
+ err = reftable_addition_commit(add);
+ EXPECT_ERR(err);
+
+ reftable_addition_destroy(add);
+
+ /*
+ * The stack length should grow continuously for all runs where
+ * auto compaction is disabled. When enabled, we should merge
+ * all tables in the stack.
+ */
+ if (i != n)
+ EXPECT(st->merged->stack_len == i + 1);
+ else
+ EXPECT(st->merged->stack_len == 1);
+ }
+
+ reftable_stack_destroy(st);
+ clear_dir(dir);
+}
+
+static void test_reftable_stack_auto_compaction_fails_gracefully(void)
+{
+ struct reftable_ref_record ref = {
+ .refname = "refs/heads/master",
+ .update_index = 1,
+ .value_type = REFTABLE_REF_VAL1,
+ .value.val1 = {0x01},
+ };
+ struct reftable_write_options cfg = {0};
+ struct reftable_stack *st;
+ struct strbuf table_path = STRBUF_INIT;
+ char *dir = get_tmp_dir(__LINE__);
+ int err;
+
+ err = reftable_new_stack(&st, dir, cfg);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_add(st, write_test_ref, &ref);
+ EXPECT_ERR(err);
+ EXPECT(st->merged->stack_len == 1);
+ EXPECT(st->stats.attempts == 0);
+ EXPECT(st->stats.failures == 0);
+
+ /*
+ * Lock the newly written table such that it cannot be compacted.
+ * Adding a new table to the stack should not be impacted by this, even
+ * though auto-compaction will now fail.
+ */
+ strbuf_addf(&table_path, "%s/%s.lock", dir, st->readers[0]->name);
+ write_file_buf(table_path.buf, "", 0);
+
+ ref.update_index = 2;
+ err = reftable_stack_add(st, write_test_ref, &ref);
+ EXPECT_ERR(err);
+ EXPECT(st->merged->stack_len == 2);
+ EXPECT(st->stats.attempts == 1);
+ EXPECT(st->stats.failures == 1);
+
+ reftable_stack_destroy(st);
+ strbuf_release(&table_path);
+ clear_dir(dir);
+}
+
+static void test_reftable_stack_validate_refname(void)
+{
+ struct reftable_write_options cfg = { 0 };
+ struct reftable_stack *st = NULL;
+ int err;
+ char *dir = get_tmp_dir(__LINE__);
+
+ int i;
+ struct reftable_ref_record ref = {
+ .refname = "a/b",
+ .update_index = 1,
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = "master",
+ };
+ char *additions[] = { "a", "a/b/c" };
+
+ err = reftable_new_stack(&st, dir, cfg);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_add(st, &write_test_ref, &ref);
+ EXPECT_ERR(err);
+
+ for (i = 0; i < ARRAY_SIZE(additions); i++) {
+ struct reftable_ref_record ref = {
+ .refname = additions[i],
+ .update_index = 1,
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = "master",
+ };
+
+ err = reftable_stack_add(st, &write_test_ref, &ref);
+ EXPECT(err == REFTABLE_NAME_CONFLICT);
+ }
+
+ reftable_stack_destroy(st);
+ clear_dir(dir);
+}
+
+static int write_error(struct reftable_writer *wr, void *arg)
+{
+ return *((int *)arg);
+}
+
+static void test_reftable_stack_update_index_check(void)
+{
+ char *dir = get_tmp_dir(__LINE__);
+
+ struct reftable_write_options cfg = { 0 };
+ struct reftable_stack *st = NULL;
+ int err;
+ struct reftable_ref_record ref1 = {
+ .refname = "name1",
+ .update_index = 1,
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = "master",
+ };
+ struct reftable_ref_record ref2 = {
+ .refname = "name2",
+ .update_index = 1,
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = "master",
+ };
+
+ err = reftable_new_stack(&st, dir, cfg);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_add(st, &write_test_ref, &ref1);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_add(st, &write_test_ref, &ref2);
+ EXPECT(err == REFTABLE_API_ERROR);
+ reftable_stack_destroy(st);
+ clear_dir(dir);
+}
+
+static void test_reftable_stack_lock_failure(void)
+{
+ char *dir = get_tmp_dir(__LINE__);
+
+ struct reftable_write_options cfg = { 0 };
+ struct reftable_stack *st = NULL;
+ int err, i;
+
+ err = reftable_new_stack(&st, dir, cfg);
+ EXPECT_ERR(err);
+ for (i = -1; i != REFTABLE_EMPTY_TABLE_ERROR; i--) {
+ err = reftable_stack_add(st, &write_error, &i);
+ EXPECT(err == i);
+ }
+
+ reftable_stack_destroy(st);
+ clear_dir(dir);
+}
+
+static void test_reftable_stack_add(void)
+{
+ int i = 0;
+ int err = 0;
+ struct reftable_write_options cfg = {
+ .exact_log_message = 1,
+ .default_permissions = 0660,
+ .disable_auto_compact = 1,
+ };
+ struct reftable_stack *st = NULL;
+ char *dir = get_tmp_dir(__LINE__);
+ struct reftable_ref_record refs[2] = { { NULL } };
+ struct reftable_log_record logs[2] = { { NULL } };
+ struct strbuf path = STRBUF_INIT;
+ struct stat stat_result;
+ int N = ARRAY_SIZE(refs);
+
+ err = reftable_new_stack(&st, dir, cfg);
+ EXPECT_ERR(err);
+
+ for (i = 0; i < N; i++) {
+ char buf[256];
+ snprintf(buf, sizeof(buf), "branch%02d", i);
+ refs[i].refname = xstrdup(buf);
+ refs[i].update_index = i + 1;
+ refs[i].value_type = REFTABLE_REF_VAL1;
+ set_test_hash(refs[i].value.val1, i);
+
+ logs[i].refname = xstrdup(buf);
+ logs[i].update_index = N + i + 1;
+ logs[i].value_type = REFTABLE_LOG_UPDATE;
+ logs[i].value.update.email = xstrdup("identity@invalid");
+ set_test_hash(logs[i].value.update.new_hash, i);
+ }
+
+ for (i = 0; i < N; i++) {
+ int err = reftable_stack_add(st, &write_test_ref, &refs[i]);
+ EXPECT_ERR(err);
+ }
+
+ for (i = 0; i < N; i++) {
+ struct write_log_arg arg = {
+ .log = &logs[i],
+ .update_index = reftable_stack_next_update_index(st),
+ };
+ int err = reftable_stack_add(st, &write_test_log, &arg);
+ EXPECT_ERR(err);
+ }
+
+ err = reftable_stack_compact_all(st, NULL);
+ EXPECT_ERR(err);
+
+ for (i = 0; i < N; i++) {
+ struct reftable_ref_record dest = { NULL };
+
+ int err = reftable_stack_read_ref(st, refs[i].refname, &dest);
+ EXPECT_ERR(err);
+ EXPECT(reftable_ref_record_equal(&dest, refs + i,
+ GIT_SHA1_RAWSZ));
+ reftable_ref_record_release(&dest);
+ }
+
+ for (i = 0; i < N; i++) {
+ struct reftable_log_record dest = { NULL };
+ int err = reftable_stack_read_log(st, refs[i].refname, &dest);
+ EXPECT_ERR(err);
+ EXPECT(reftable_log_record_equal(&dest, logs + i,
+ GIT_SHA1_RAWSZ));
+ reftable_log_record_release(&dest);
+ }
+
+#ifndef GIT_WINDOWS_NATIVE
+ strbuf_addstr(&path, dir);
+ strbuf_addstr(&path, "/tables.list");
+ err = stat(path.buf, &stat_result);
+ EXPECT(!err);
+ EXPECT((stat_result.st_mode & 0777) == cfg.default_permissions);
+
+ strbuf_reset(&path);
+ strbuf_addstr(&path, dir);
+ strbuf_addstr(&path, "/");
+ /* do not try at home; not an external API for reftable. */
+ strbuf_addstr(&path, st->readers[0]->name);
+ err = stat(path.buf, &stat_result);
+ EXPECT(!err);
+ EXPECT((stat_result.st_mode & 0777) == cfg.default_permissions);
+#else
+ (void) stat_result;
+#endif
+
+ /* cleanup */
+ reftable_stack_destroy(st);
+ for (i = 0; i < N; i++) {
+ reftable_ref_record_release(&refs[i]);
+ reftable_log_record_release(&logs[i]);
+ }
+ strbuf_release(&path);
+ clear_dir(dir);
+}
+
+static void test_reftable_stack_log_normalize(void)
+{
+ int err = 0;
+ struct reftable_write_options cfg = {
+ 0,
+ };
+ struct reftable_stack *st = NULL;
+ char *dir = get_tmp_dir(__LINE__);
+ struct reftable_log_record input = {
+ .refname = "branch",
+ .update_index = 1,
+ .value_type = REFTABLE_LOG_UPDATE,
+ .value = {
+ .update = {
+ .new_hash = { 1 },
+ .old_hash = { 2 },
+ },
+ },
+ };
+ struct reftable_log_record dest = {
+ .update_index = 0,
+ };
+ struct write_log_arg arg = {
+ .log = &input,
+ .update_index = 1,
+ };
+
+ err = reftable_new_stack(&st, dir, cfg);
+ EXPECT_ERR(err);
+
+ input.value.update.message = "one\ntwo";
+ err = reftable_stack_add(st, &write_test_log, &arg);
+ EXPECT(err == REFTABLE_API_ERROR);
+
+ input.value.update.message = "one";
+ err = reftable_stack_add(st, &write_test_log, &arg);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_read_log(st, input.refname, &dest);
+ EXPECT_ERR(err);
+ EXPECT(0 == strcmp(dest.value.update.message, "one\n"));
+
+ input.value.update.message = "two\n";
+ arg.update_index = 2;
+ err = reftable_stack_add(st, &write_test_log, &arg);
+ EXPECT_ERR(err);
+ err = reftable_stack_read_log(st, input.refname, &dest);
+ EXPECT_ERR(err);
+ EXPECT(0 == strcmp(dest.value.update.message, "two\n"));
+
+ /* cleanup */
+ reftable_stack_destroy(st);
+ reftable_log_record_release(&dest);
+ clear_dir(dir);
+}
+
+static void test_reftable_stack_tombstone(void)
+{
+ int i = 0;
+ char *dir = get_tmp_dir(__LINE__);
+
+ struct reftable_write_options cfg = { 0 };
+ struct reftable_stack *st = NULL;
+ int err;
+ struct reftable_ref_record refs[2] = { { NULL } };
+ struct reftable_log_record logs[2] = { { NULL } };
+ int N = ARRAY_SIZE(refs);
+ struct reftable_ref_record dest = { NULL };
+ struct reftable_log_record log_dest = { NULL };
+
+
+ err = reftable_new_stack(&st, dir, cfg);
+ EXPECT_ERR(err);
+
+ /* even entries add the refs, odd entries delete them. */
+ for (i = 0; i < N; i++) {
+ const char *buf = "branch";
+ refs[i].refname = xstrdup(buf);
+ refs[i].update_index = i + 1;
+ if (i % 2 == 0) {
+ refs[i].value_type = REFTABLE_REF_VAL1;
+ set_test_hash(refs[i].value.val1, i);
+ }
+
+ logs[i].refname = xstrdup(buf);
+ /* update_index is part of the key. */
+ logs[i].update_index = 42;
+ if (i % 2 == 0) {
+ logs[i].value_type = REFTABLE_LOG_UPDATE;
+ set_test_hash(logs[i].value.update.new_hash, i);
+ logs[i].value.update.email =
+ xstrdup("identity@invalid");
+ }
+ }
+ for (i = 0; i < N; i++) {
+ int err = reftable_stack_add(st, &write_test_ref, &refs[i]);
+ EXPECT_ERR(err);
+ }
+
+ for (i = 0; i < N; i++) {
+ struct write_log_arg arg = {
+ .log = &logs[i],
+ .update_index = reftable_stack_next_update_index(st),
+ };
+ int err = reftable_stack_add(st, &write_test_log, &arg);
+ EXPECT_ERR(err);
+ }
+
+ err = reftable_stack_read_ref(st, "branch", &dest);
+ EXPECT(err == 1);
+ reftable_ref_record_release(&dest);
+
+ err = reftable_stack_read_log(st, "branch", &log_dest);
+ EXPECT(err == 1);
+ reftable_log_record_release(&log_dest);
+
+ err = reftable_stack_compact_all(st, NULL);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_read_ref(st, "branch", &dest);
+ EXPECT(err == 1);
+
+ err = reftable_stack_read_log(st, "branch", &log_dest);
+ EXPECT(err == 1);
+ reftable_ref_record_release(&dest);
+ reftable_log_record_release(&log_dest);
+
+ /* cleanup */
+ reftable_stack_destroy(st);
+ for (i = 0; i < N; i++) {
+ reftable_ref_record_release(&refs[i]);
+ reftable_log_record_release(&logs[i]);
+ }
+ clear_dir(dir);
+}
+
+static void test_reftable_stack_hash_id(void)
+{
+ char *dir = get_tmp_dir(__LINE__);
+
+ struct reftable_write_options cfg = { 0 };
+ struct reftable_stack *st = NULL;
+ int err;
+
+ struct reftable_ref_record ref = {
+ .refname = "master",
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = "target",
+ .update_index = 1,
+ };
+ struct reftable_write_options cfg32 = { .hash_id = GIT_SHA256_FORMAT_ID };
+ struct reftable_stack *st32 = NULL;
+ struct reftable_write_options cfg_default = { 0 };
+ struct reftable_stack *st_default = NULL;
+ struct reftable_ref_record dest = { NULL };
+
+ err = reftable_new_stack(&st, dir, cfg);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_add(st, &write_test_ref, &ref);
+ EXPECT_ERR(err);
+
+ /* can't read it with the wrong hash ID. */
+ err = reftable_new_stack(&st32, dir, cfg32);
+ EXPECT(err == REFTABLE_FORMAT_ERROR);
+
+ /* check that we can read it back with default config too. */
+ err = reftable_new_stack(&st_default, dir, cfg_default);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_read_ref(st_default, "master", &dest);
+ EXPECT_ERR(err);
+
+ EXPECT(reftable_ref_record_equal(&ref, &dest, GIT_SHA1_RAWSZ));
+ reftable_ref_record_release(&dest);
+ reftable_stack_destroy(st);
+ reftable_stack_destroy(st_default);
+ clear_dir(dir);
+}
+
+static void test_suggest_compaction_segment(void)
+{
+ uint64_t sizes[] = { 512, 64, 17, 16, 9, 9, 9, 16, 2, 16 };
+ struct segment min =
+ suggest_compaction_segment(sizes, ARRAY_SIZE(sizes));
+ EXPECT(min.start == 1);
+ EXPECT(min.end == 10);
+}
+
+static void test_suggest_compaction_segment_nothing(void)
+{
+ uint64_t sizes[] = { 64, 32, 16, 8, 4, 2 };
+ struct segment result =
+ suggest_compaction_segment(sizes, ARRAY_SIZE(sizes));
+ EXPECT(result.start == result.end);
+}
+
+static void test_reflog_expire(void)
+{
+ char *dir = get_tmp_dir(__LINE__);
+
+ struct reftable_write_options cfg = { 0 };
+ struct reftable_stack *st = NULL;
+ struct reftable_log_record logs[20] = { { NULL } };
+ int N = ARRAY_SIZE(logs) - 1;
+ int i = 0;
+ int err;
+ struct reftable_log_expiry_config expiry = {
+ .time = 10,
+ };
+ struct reftable_log_record log = { NULL };
+
+
+ err = reftable_new_stack(&st, dir, cfg);
+ EXPECT_ERR(err);
+
+ for (i = 1; i <= N; i++) {
+ char buf[256];
+ snprintf(buf, sizeof(buf), "branch%02d", i);
+
+ logs[i].refname = xstrdup(buf);
+ logs[i].update_index = i;
+ logs[i].value_type = REFTABLE_LOG_UPDATE;
+ logs[i].value.update.time = i;
+ logs[i].value.update.email = xstrdup("identity@invalid");
+ set_test_hash(logs[i].value.update.new_hash, i);
+ }
+
+ for (i = 1; i <= N; i++) {
+ struct write_log_arg arg = {
+ .log = &logs[i],
+ .update_index = reftable_stack_next_update_index(st),
+ };
+ int err = reftable_stack_add(st, &write_test_log, &arg);
+ EXPECT_ERR(err);
+ }
+
+ err = reftable_stack_compact_all(st, NULL);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_compact_all(st, &expiry);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_read_log(st, logs[9].refname, &log);
+ EXPECT(err == 1);
+
+ err = reftable_stack_read_log(st, logs[11].refname, &log);
+ EXPECT_ERR(err);
+
+ expiry.min_update_index = 15;
+ err = reftable_stack_compact_all(st, &expiry);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_read_log(st, logs[14].refname, &log);
+ EXPECT(err == 1);
+
+ err = reftable_stack_read_log(st, logs[16].refname, &log);
+ EXPECT_ERR(err);
+
+ /* cleanup */
+ reftable_stack_destroy(st);
+ for (i = 0; i <= N; i++) {
+ reftable_log_record_release(&logs[i]);
+ }
+ clear_dir(dir);
+ reftable_log_record_release(&log);
+}
+
+static int write_nothing(struct reftable_writer *wr, void *arg)
+{
+ reftable_writer_set_limits(wr, 1, 1);
+ return 0;
+}
+
+static void test_empty_add(void)
+{
+ struct reftable_write_options cfg = { 0 };
+ struct reftable_stack *st = NULL;
+ int err;
+ char *dir = get_tmp_dir(__LINE__);
+
+ struct reftable_stack *st2 = NULL;
+
+
+ err = reftable_new_stack(&st, dir, cfg);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_add(st, &write_nothing, NULL);
+ EXPECT_ERR(err);
+
+ err = reftable_new_stack(&st2, dir, cfg);
+ EXPECT_ERR(err);
+ clear_dir(dir);
+ reftable_stack_destroy(st);
+ reftable_stack_destroy(st2);
+}
+
+static int fastlog2(uint64_t sz)
+{
+ int l = 0;
+ if (sz == 0)
+ return 0;
+ for (; sz; sz /= 2)
+ l++;
+ return l - 1;
+}
+
+static void test_reftable_stack_auto_compaction(void)
+{
+ struct reftable_write_options cfg = {
+ .disable_auto_compact = 1,
+ };
+ struct reftable_stack *st = NULL;
+ char *dir = get_tmp_dir(__LINE__);
+
+ int err, i;
+ int N = 100;
+
+ err = reftable_new_stack(&st, dir, cfg);
+ EXPECT_ERR(err);
+
+ for (i = 0; i < N; i++) {
+ char name[100];
+ struct reftable_ref_record ref = {
+ .refname = name,
+ .update_index = reftable_stack_next_update_index(st),
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = "master",
+ };
+ snprintf(name, sizeof(name), "branch%04d", i);
+
+ err = reftable_stack_add(st, &write_test_ref, &ref);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_auto_compact(st);
+ EXPECT_ERR(err);
+ EXPECT(i < 3 || st->merged->stack_len < 2 * fastlog2(i));
+ }
+
+ EXPECT(reftable_stack_compaction_stats(st)->entries_written <
+ (uint64_t)(N * fastlog2(N)));
+
+ reftable_stack_destroy(st);
+ clear_dir(dir);
+}
+
+static void test_reftable_stack_add_performs_auto_compaction(void)
+{
+ struct reftable_write_options cfg = { 0 };
+ struct reftable_stack *st = NULL;
+ struct strbuf refname = STRBUF_INIT;
+ char *dir = get_tmp_dir(__LINE__);
+ int err, i, n = 20;
+
+ err = reftable_new_stack(&st, dir, cfg);
+ EXPECT_ERR(err);
+
+ for (i = 0; i <= n; i++) {
+ struct reftable_ref_record ref = {
+ .update_index = reftable_stack_next_update_index(st),
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = "master",
+ };
+
+ /*
+ * Disable auto-compaction for all but the last runs. Like this
+ * we can ensure that we indeed honor this setting and have
+ * better control over when exactly auto compaction runs.
+ */
+ st->config.disable_auto_compact = i != n;
+
+ strbuf_reset(&refname);
+ strbuf_addf(&refname, "branch-%04d", i);
+ ref.refname = refname.buf;
+
+ err = reftable_stack_add(st, &write_test_ref, &ref);
+ EXPECT_ERR(err);
+
+ /*
+ * The stack length should grow continuously for all runs where
+ * auto compaction is disabled. When enabled, we should merge
+ * all tables in the stack.
+ */
+ if (i != n)
+ EXPECT(st->merged->stack_len == i + 1);
+ else
+ EXPECT(st->merged->stack_len == 1);
+ }
+
+ reftable_stack_destroy(st);
+ strbuf_release(&refname);
+ clear_dir(dir);
+}
+
+static void test_reftable_stack_compaction_concurrent(void)
+{
+ struct reftable_write_options cfg = { 0 };
+ struct reftable_stack *st1 = NULL, *st2 = NULL;
+ char *dir = get_tmp_dir(__LINE__);
+
+ int err, i;
+ int N = 3;
+
+ err = reftable_new_stack(&st1, dir, cfg);
+ EXPECT_ERR(err);
+
+ for (i = 0; i < N; i++) {
+ char name[100];
+ struct reftable_ref_record ref = {
+ .refname = name,
+ .update_index = reftable_stack_next_update_index(st1),
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = "master",
+ };
+ snprintf(name, sizeof(name), "branch%04d", i);
+
+ err = reftable_stack_add(st1, &write_test_ref, &ref);
+ EXPECT_ERR(err);
+ }
+
+ err = reftable_new_stack(&st2, dir, cfg);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_compact_all(st1, NULL);
+ EXPECT_ERR(err);
+
+ reftable_stack_destroy(st1);
+ reftable_stack_destroy(st2);
+
+ EXPECT(count_dir_entries(dir) == 2);
+ clear_dir(dir);
+}
+
+static void unclean_stack_close(struct reftable_stack *st)
+{
+ /* break abstraction boundary to simulate unclean shutdown. */
+ int i = 0;
+ for (; i < st->readers_len; i++) {
+ reftable_reader_free(st->readers[i]);
+ }
+ st->readers_len = 0;
+ FREE_AND_NULL(st->readers);
+}
+
+static void test_reftable_stack_compaction_concurrent_clean(void)
+{
+ struct reftable_write_options cfg = { 0 };
+ struct reftable_stack *st1 = NULL, *st2 = NULL, *st3 = NULL;
+ char *dir = get_tmp_dir(__LINE__);
+
+ int err, i;
+ int N = 3;
+
+ err = reftable_new_stack(&st1, dir, cfg);
+ EXPECT_ERR(err);
+
+ for (i = 0; i < N; i++) {
+ char name[100];
+ struct reftable_ref_record ref = {
+ .refname = name,
+ .update_index = reftable_stack_next_update_index(st1),
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = "master",
+ };
+ snprintf(name, sizeof(name), "branch%04d", i);
+
+ err = reftable_stack_add(st1, &write_test_ref, &ref);
+ EXPECT_ERR(err);
+ }
+
+ err = reftable_new_stack(&st2, dir, cfg);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_compact_all(st1, NULL);
+ EXPECT_ERR(err);
+
+ unclean_stack_close(st1);
+ unclean_stack_close(st2);
+
+ err = reftable_new_stack(&st3, dir, cfg);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_clean(st3);
+ EXPECT_ERR(err);
+ EXPECT(count_dir_entries(dir) == 2);
+
+ reftable_stack_destroy(st1);
+ reftable_stack_destroy(st2);
+ reftable_stack_destroy(st3);
+
+ clear_dir(dir);
+}
+
+int stack_test_main(int argc, const char *argv[])
+{
+ RUN_TEST(test_empty_add);
+ RUN_TEST(test_names_equal);
+ RUN_TEST(test_parse_names);
+ RUN_TEST(test_read_file);
+ RUN_TEST(test_reflog_expire);
+ RUN_TEST(test_reftable_stack_add);
+ RUN_TEST(test_reftable_stack_add_one);
+ RUN_TEST(test_reftable_stack_auto_compaction);
+ RUN_TEST(test_reftable_stack_add_performs_auto_compaction);
+ RUN_TEST(test_reftable_stack_compaction_concurrent);
+ RUN_TEST(test_reftable_stack_compaction_concurrent_clean);
+ RUN_TEST(test_reftable_stack_hash_id);
+ RUN_TEST(test_reftable_stack_lock_failure);
+ RUN_TEST(test_reftable_stack_log_normalize);
+ RUN_TEST(test_reftable_stack_tombstone);
+ RUN_TEST(test_reftable_stack_transaction_api);
+ RUN_TEST(test_reftable_stack_transaction_api_performs_auto_compaction);
+ RUN_TEST(test_reftable_stack_auto_compaction_fails_gracefully);
+ RUN_TEST(test_reftable_stack_update_index_check);
+ RUN_TEST(test_reftable_stack_uptodate);
+ RUN_TEST(test_reftable_stack_validate_refname);
+ RUN_TEST(test_suggest_compaction_segment);
+ RUN_TEST(test_suggest_compaction_segment_nothing);
+ return 0;
+}
diff --git a/reftable/system.h b/reftable/system.h
new file mode 100644
index 0000000..5d8b6de
--- /dev/null
+++ b/reftable/system.h
@@ -0,0 +1,23 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef SYSTEM_H
+#define SYSTEM_H
+
+/* This header glues the reftable library to the rest of Git */
+
+#include "git-compat-util.h"
+#include "lockfile.h"
+#include "strbuf.h"
+#include "tempfile.h"
+#include "hash-ll.h" /* hash ID, sizes.*/
+#include "dir.h" /* remove_dir_recursively, for tests.*/
+
+int hash_size(uint32_t id);
+
+#endif
diff --git a/reftable/test_framework.c b/reftable/test_framework.c
new file mode 100644
index 0000000..4066924
--- /dev/null
+++ b/reftable/test_framework.c
@@ -0,0 +1,27 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "system.h"
+#include "test_framework.h"
+
+
+void set_test_hash(uint8_t *p, int i)
+{
+ memset(p, (uint8_t)i, hash_size(GIT_SHA1_FORMAT_ID));
+}
+
+ssize_t strbuf_add_void(void *b, const void *data, size_t sz)
+{
+ strbuf_add(b, data, sz);
+ return sz;
+}
+
+int noop_flush(void *arg)
+{
+ return 0;
+}
diff --git a/reftable/test_framework.h b/reftable/test_framework.h
new file mode 100644
index 0000000..687390f
--- /dev/null
+++ b/reftable/test_framework.h
@@ -0,0 +1,61 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef TEST_FRAMEWORK_H
+#define TEST_FRAMEWORK_H
+
+#include "system.h"
+#include "reftable-error.h"
+
+#define EXPECT_ERR(c) \
+ do { \
+ if (c != 0) { \
+ fflush(stderr); \
+ fflush(stdout); \
+ fprintf(stderr, "%s: %d: error == %d (%s), want 0\n", \
+ __FILE__, __LINE__, c, reftable_error_str(c)); \
+ abort(); \
+ } \
+ } while (0)
+
+#define EXPECT_STREQ(a, b) \
+ do { \
+ if (strcmp(a, b)) { \
+ fflush(stderr); \
+ fflush(stdout); \
+ fprintf(stderr, "%s:%d: %s (%s) != %s (%s)\n", __FILE__, \
+ __LINE__, #a, a, #b, b); \
+ abort(); \
+ } \
+ } while (0)
+
+#define EXPECT(c) \
+ do { \
+ if (!(c)) { \
+ fflush(stderr); \
+ fflush(stdout); \
+ fprintf(stderr, "%s: %d: failed assertion %s\n", __FILE__, \
+ __LINE__, #c); \
+ abort(); \
+ } \
+ } while (0)
+
+#define RUN_TEST(f) \
+ fprintf(stderr, "running %s\n", #f); \
+ fflush(stderr); \
+ f();
+
+void set_test_hash(uint8_t *p, int i);
+
+/* Like strbuf_add, but suitable for passing to reftable_new_writer
+ */
+ssize_t strbuf_add_void(void *b, const void *data, size_t sz);
+
+int noop_flush(void *);
+
+#endif
diff --git a/reftable/tree.c b/reftable/tree.c
new file mode 100644
index 0000000..528f33a
--- /dev/null
+++ b/reftable/tree.c
@@ -0,0 +1,63 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "system.h"
+#include "tree.h"
+
+#include "basics.h"
+
+struct tree_node *tree_search(void *key, struct tree_node **rootp,
+ int (*compare)(const void *, const void *),
+ int insert)
+{
+ int res;
+ if (!*rootp) {
+ if (!insert) {
+ return NULL;
+ } else {
+ struct tree_node *n;
+ REFTABLE_CALLOC_ARRAY(n, 1);
+ n->key = key;
+ *rootp = n;
+ return *rootp;
+ }
+ }
+
+ res = compare(key, (*rootp)->key);
+ if (res < 0)
+ return tree_search(key, &(*rootp)->left, compare, insert);
+ else if (res > 0)
+ return tree_search(key, &(*rootp)->right, compare, insert);
+ return *rootp;
+}
+
+void infix_walk(struct tree_node *t, void (*action)(void *arg, void *key),
+ void *arg)
+{
+ if (t->left) {
+ infix_walk(t->left, action, arg);
+ }
+ action(arg, t->key);
+ if (t->right) {
+ infix_walk(t->right, action, arg);
+ }
+}
+
+void tree_free(struct tree_node *t)
+{
+ if (!t) {
+ return;
+ }
+ if (t->left) {
+ tree_free(t->left);
+ }
+ if (t->right) {
+ tree_free(t->right);
+ }
+ reftable_free(t);
+}
diff --git a/reftable/tree.h b/reftable/tree.h
new file mode 100644
index 0000000..fbdd002
--- /dev/null
+++ b/reftable/tree.h
@@ -0,0 +1,34 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef TREE_H
+#define TREE_H
+
+/* tree_node is a generic binary search tree. */
+struct tree_node {
+ void *key;
+ struct tree_node *left, *right;
+};
+
+/* looks for `key` in `rootp` using `compare` as comparison function. If insert
+ * is set, insert the key if it's not found. Else, return NULL.
+ */
+struct tree_node *tree_search(void *key, struct tree_node **rootp,
+ int (*compare)(const void *, const void *),
+ int insert);
+
+/* performs an infix walk of the tree. */
+void infix_walk(struct tree_node *t, void (*action)(void *arg, void *key),
+ void *arg);
+
+/*
+ * deallocates the tree nodes recursively. Keys should be deallocated separately
+ * by walking over the tree. */
+void tree_free(struct tree_node *t);
+
+#endif
diff --git a/reftable/tree_test.c b/reftable/tree_test.c
new file mode 100644
index 0000000..6961a65
--- /dev/null
+++ b/reftable/tree_test.c
@@ -0,0 +1,60 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "system.h"
+#include "tree.h"
+
+#include "test_framework.h"
+#include "reftable-tests.h"
+
+static int test_compare(const void *a, const void *b)
+{
+ return (char *)a - (char *)b;
+}
+
+struct curry {
+ void *last;
+};
+
+static void check_increasing(void *arg, void *key)
+{
+ struct curry *c = arg;
+ if (c->last) {
+ EXPECT(test_compare(c->last, key) < 0);
+ }
+ c->last = key;
+}
+
+static void test_tree(void)
+{
+ struct tree_node *root = NULL;
+
+ void *values[11] = { NULL };
+ struct tree_node *nodes[11] = { NULL };
+ int i = 1;
+ struct curry c = { NULL };
+ do {
+ nodes[i] = tree_search(values + i, &root, &test_compare, 1);
+ i = (i * 7) % 11;
+ } while (i != 1);
+
+ for (i = 1; i < ARRAY_SIZE(nodes); i++) {
+ EXPECT(values + i == nodes[i]->key);
+ EXPECT(nodes[i] ==
+ tree_search(values + i, &root, &test_compare, 0));
+ }
+
+ infix_walk(root, check_increasing, &c);
+ tree_free(root);
+}
+
+int tree_test_main(int argc, const char *argv[])
+{
+ RUN_TEST(test_tree);
+ return 0;
+}
diff --git a/reftable/writer.c b/reftable/writer.c
new file mode 100644
index 0000000..1d9ff0f
--- /dev/null
+++ b/reftable/writer.c
@@ -0,0 +1,713 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#include "writer.h"
+
+#include "system.h"
+
+#include "block.h"
+#include "constants.h"
+#include "record.h"
+#include "tree.h"
+#include "reftable-error.h"
+
+/* finishes a block, and writes it to storage */
+static int writer_flush_block(struct reftable_writer *w);
+
+/* deallocates memory related to the index */
+static void writer_clear_index(struct reftable_writer *w);
+
+/* finishes writing a 'r' (refs) or 'g' (reflogs) section */
+static int writer_finish_public_section(struct reftable_writer *w);
+
+static struct reftable_block_stats *
+writer_reftable_block_stats(struct reftable_writer *w, uint8_t typ)
+{
+ switch (typ) {
+ case 'r':
+ return &w->stats.ref_stats;
+ case 'o':
+ return &w->stats.obj_stats;
+ case 'i':
+ return &w->stats.idx_stats;
+ case 'g':
+ return &w->stats.log_stats;
+ }
+ abort();
+ return NULL;
+}
+
+/* write data, queuing the padding for the next write. Returns negative for
+ * error. */
+static int padded_write(struct reftable_writer *w, uint8_t *data, size_t len,
+ int padding)
+{
+ int n = 0;
+ if (w->pending_padding > 0) {
+ uint8_t *zeroed = reftable_calloc(w->pending_padding, sizeof(*zeroed));
+ int n = w->write(w->write_arg, zeroed, w->pending_padding);
+ if (n < 0)
+ return n;
+
+ w->pending_padding = 0;
+ reftable_free(zeroed);
+ }
+
+ w->pending_padding = padding;
+ n = w->write(w->write_arg, data, len);
+ if (n < 0)
+ return n;
+ n += padding;
+ return 0;
+}
+
+static void options_set_defaults(struct reftable_write_options *opts)
+{
+ if (opts->restart_interval == 0) {
+ opts->restart_interval = 16;
+ }
+
+ if (opts->hash_id == 0) {
+ opts->hash_id = GIT_SHA1_FORMAT_ID;
+ }
+ if (opts->block_size == 0) {
+ opts->block_size = DEFAULT_BLOCK_SIZE;
+ }
+}
+
+static int writer_version(struct reftable_writer *w)
+{
+ return (w->opts.hash_id == 0 || w->opts.hash_id == GIT_SHA1_FORMAT_ID) ?
+ 1 :
+ 2;
+}
+
+static int writer_write_header(struct reftable_writer *w, uint8_t *dest)
+{
+ memcpy(dest, "REFT", 4);
+
+ dest[4] = writer_version(w);
+
+ put_be24(dest + 5, w->opts.block_size);
+ put_be64(dest + 8, w->min_update_index);
+ put_be64(dest + 16, w->max_update_index);
+ if (writer_version(w) == 2) {
+ put_be32(dest + 24, w->opts.hash_id);
+ }
+ return header_size(writer_version(w));
+}
+
+static void writer_reinit_block_writer(struct reftable_writer *w, uint8_t typ)
+{
+ int block_start = 0;
+ if (w->next == 0) {
+ block_start = header_size(writer_version(w));
+ }
+
+ strbuf_release(&w->last_key);
+ block_writer_init(&w->block_writer_data, typ, w->block,
+ w->opts.block_size, block_start,
+ hash_size(w->opts.hash_id));
+ w->block_writer = &w->block_writer_data;
+ w->block_writer->restart_interval = w->opts.restart_interval;
+}
+
+static struct strbuf reftable_empty_strbuf = STRBUF_INIT;
+
+struct reftable_writer *
+reftable_new_writer(ssize_t (*writer_func)(void *, const void *, size_t),
+ int (*flush_func)(void *),
+ void *writer_arg, struct reftable_write_options *opts)
+{
+ struct reftable_writer *wp = reftable_calloc(1, sizeof(*wp));
+ strbuf_init(&wp->block_writer_data.last_key, 0);
+ options_set_defaults(opts);
+ if (opts->block_size >= (1 << 24)) {
+ /* TODO - error return? */
+ abort();
+ }
+ wp->last_key = reftable_empty_strbuf;
+ REFTABLE_CALLOC_ARRAY(wp->block, opts->block_size);
+ wp->write = writer_func;
+ wp->write_arg = writer_arg;
+ wp->opts = *opts;
+ wp->flush = flush_func;
+ writer_reinit_block_writer(wp, BLOCK_TYPE_REF);
+
+ return wp;
+}
+
+void reftable_writer_set_limits(struct reftable_writer *w, uint64_t min,
+ uint64_t max)
+{
+ w->min_update_index = min;
+ w->max_update_index = max;
+}
+
+void reftable_writer_free(struct reftable_writer *w)
+{
+ if (!w)
+ return;
+ reftable_free(w->block);
+ reftable_free(w);
+}
+
+struct obj_index_tree_node {
+ struct strbuf hash;
+ uint64_t *offsets;
+ size_t offset_len;
+ size_t offset_cap;
+};
+
+#define OBJ_INDEX_TREE_NODE_INIT \
+ { \
+ .hash = STRBUF_INIT \
+ }
+
+static int obj_index_tree_node_compare(const void *a, const void *b)
+{
+ return strbuf_cmp(&((const struct obj_index_tree_node *)a)->hash,
+ &((const struct obj_index_tree_node *)b)->hash);
+}
+
+static void writer_index_hash(struct reftable_writer *w, struct strbuf *hash)
+{
+ uint64_t off = w->next;
+
+ struct obj_index_tree_node want = { .hash = *hash };
+
+ struct tree_node *node = tree_search(&want, &w->obj_index_tree,
+ &obj_index_tree_node_compare, 0);
+ struct obj_index_tree_node *key = NULL;
+ if (!node) {
+ struct obj_index_tree_node empty = OBJ_INDEX_TREE_NODE_INIT;
+ key = reftable_malloc(sizeof(struct obj_index_tree_node));
+ *key = empty;
+
+ strbuf_reset(&key->hash);
+ strbuf_addbuf(&key->hash, hash);
+ tree_search((void *)key, &w->obj_index_tree,
+ &obj_index_tree_node_compare, 1);
+ } else {
+ key = node->key;
+ }
+
+ if (key->offset_len > 0 && key->offsets[key->offset_len - 1] == off) {
+ return;
+ }
+
+ REFTABLE_ALLOC_GROW(key->offsets, key->offset_len + 1, key->offset_cap);
+ key->offsets[key->offset_len++] = off;
+}
+
+static int writer_add_record(struct reftable_writer *w,
+ struct reftable_record *rec)
+{
+ struct strbuf key = STRBUF_INIT;
+ int err = -1;
+ reftable_record_key(rec, &key);
+ if (strbuf_cmp(&w->last_key, &key) >= 0) {
+ err = REFTABLE_API_ERROR;
+ goto done;
+ }
+
+ strbuf_reset(&w->last_key);
+ strbuf_addbuf(&w->last_key, &key);
+ if (!w->block_writer) {
+ writer_reinit_block_writer(w, reftable_record_type(rec));
+ }
+
+ assert(block_writer_type(w->block_writer) == reftable_record_type(rec));
+
+ if (block_writer_add(w->block_writer, rec) == 0) {
+ err = 0;
+ goto done;
+ }
+
+ err = writer_flush_block(w);
+ if (err < 0) {
+ goto done;
+ }
+
+ writer_reinit_block_writer(w, reftable_record_type(rec));
+ err = block_writer_add(w->block_writer, rec);
+ if (err == -1) {
+ /* we are writing into memory, so an error can only mean it
+ * doesn't fit. */
+ err = REFTABLE_ENTRY_TOO_BIG_ERROR;
+ goto done;
+ }
+
+done:
+ strbuf_release(&key);
+ return err;
+}
+
+int reftable_writer_add_ref(struct reftable_writer *w,
+ struct reftable_ref_record *ref)
+{
+ struct reftable_record rec = {
+ .type = BLOCK_TYPE_REF,
+ .u = {
+ .ref = *ref
+ },
+ };
+ int err = 0;
+
+ if (!ref->refname)
+ return REFTABLE_API_ERROR;
+ if (ref->update_index < w->min_update_index ||
+ ref->update_index > w->max_update_index)
+ return REFTABLE_API_ERROR;
+
+ rec.u.ref.update_index -= w->min_update_index;
+
+ err = writer_add_record(w, &rec);
+ if (err < 0)
+ return err;
+
+ if (!w->opts.skip_index_objects && reftable_ref_record_val1(ref)) {
+ struct strbuf h = STRBUF_INIT;
+ strbuf_add(&h, (char *)reftable_ref_record_val1(ref),
+ hash_size(w->opts.hash_id));
+ writer_index_hash(w, &h);
+ strbuf_release(&h);
+ }
+
+ if (!w->opts.skip_index_objects && reftable_ref_record_val2(ref)) {
+ struct strbuf h = STRBUF_INIT;
+ strbuf_add(&h, reftable_ref_record_val2(ref),
+ hash_size(w->opts.hash_id));
+ writer_index_hash(w, &h);
+ strbuf_release(&h);
+ }
+ return 0;
+}
+
+int reftable_writer_add_refs(struct reftable_writer *w,
+ struct reftable_ref_record *refs, int n)
+{
+ int err = 0;
+ int i = 0;
+ QSORT(refs, n, reftable_ref_record_compare_name);
+ for (i = 0; err == 0 && i < n; i++) {
+ err = reftable_writer_add_ref(w, &refs[i]);
+ }
+ return err;
+}
+
+static int reftable_writer_add_log_verbatim(struct reftable_writer *w,
+ struct reftable_log_record *log)
+{
+ struct reftable_record rec = {
+ .type = BLOCK_TYPE_LOG,
+ .u = {
+ .log = *log,
+ },
+ };
+ if (w->block_writer &&
+ block_writer_type(w->block_writer) == BLOCK_TYPE_REF) {
+ int err = writer_finish_public_section(w);
+ if (err < 0)
+ return err;
+ }
+
+ w->next -= w->pending_padding;
+ w->pending_padding = 0;
+ return writer_add_record(w, &rec);
+}
+
+int reftable_writer_add_log(struct reftable_writer *w,
+ struct reftable_log_record *log)
+{
+ char *input_log_message = NULL;
+ struct strbuf cleaned_message = STRBUF_INIT;
+ int err = 0;
+
+ if (log->value_type == REFTABLE_LOG_DELETION)
+ return reftable_writer_add_log_verbatim(w, log);
+
+ if (!log->refname)
+ return REFTABLE_API_ERROR;
+
+ input_log_message = log->value.update.message;
+ if (!w->opts.exact_log_message && log->value.update.message) {
+ strbuf_addstr(&cleaned_message, log->value.update.message);
+ while (cleaned_message.len &&
+ cleaned_message.buf[cleaned_message.len - 1] == '\n')
+ strbuf_setlen(&cleaned_message,
+ cleaned_message.len - 1);
+ if (strchr(cleaned_message.buf, '\n')) {
+ /* multiple lines not allowed. */
+ err = REFTABLE_API_ERROR;
+ goto done;
+ }
+ strbuf_addstr(&cleaned_message, "\n");
+ log->value.update.message = cleaned_message.buf;
+ }
+
+ err = reftable_writer_add_log_verbatim(w, log);
+ log->value.update.message = input_log_message;
+done:
+ strbuf_release(&cleaned_message);
+ return err;
+}
+
+int reftable_writer_add_logs(struct reftable_writer *w,
+ struct reftable_log_record *logs, int n)
+{
+ int err = 0;
+ int i = 0;
+ QSORT(logs, n, reftable_log_record_compare_key);
+
+ for (i = 0; err == 0 && i < n; i++) {
+ err = reftable_writer_add_log(w, &logs[i]);
+ }
+ return err;
+}
+
+static int writer_finish_section(struct reftable_writer *w)
+{
+ struct reftable_block_stats *bstats = NULL;
+ uint8_t typ = block_writer_type(w->block_writer);
+ uint64_t index_start = 0;
+ int max_level = 0;
+ size_t threshold = w->opts.unpadded ? 1 : 3;
+ int before_blocks = w->stats.idx_stats.blocks;
+ int err;
+
+ err = writer_flush_block(w);
+ if (err < 0)
+ return err;
+
+ /*
+ * When the section we are about to index has a lot of blocks then the
+ * index itself may span across multiple blocks, as well. This would
+ * require a linear scan over index blocks only to find the desired
+ * indexed block, which is inefficient. Instead, we write a multi-level
+ * index where index records of level N+1 will refer to index blocks of
+ * level N. This isn't constant time, either, but at least logarithmic.
+ *
+ * This loop handles writing this multi-level index. Note that we write
+ * the lowest-level index pointing to the indexed blocks first. We then
+ * continue writing additional index levels until the current level has
+ * less blocks than the threshold so that the highest level will be at
+ * the end of the index section.
+ *
+ * Readers are thus required to start reading the index section from
+ * its end, which is why we set `index_start` to the beginning of the
+ * last index section.
+ */
+ while (w->index_len > threshold) {
+ struct reftable_index_record *idx = NULL;
+ size_t i, idx_len;
+
+ max_level++;
+ index_start = w->next;
+ writer_reinit_block_writer(w, BLOCK_TYPE_INDEX);
+
+ idx = w->index;
+ idx_len = w->index_len;
+
+ w->index = NULL;
+ w->index_len = 0;
+ w->index_cap = 0;
+ for (i = 0; i < idx_len; i++) {
+ struct reftable_record rec = {
+ .type = BLOCK_TYPE_INDEX,
+ .u = {
+ .idx = idx[i],
+ },
+ };
+
+ err = writer_add_record(w, &rec);
+ if (err < 0)
+ return err;
+ }
+
+ err = writer_flush_block(w);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < idx_len; i++)
+ strbuf_release(&idx[i].last_key);
+ reftable_free(idx);
+ }
+
+ /*
+ * The index may still contain a number of index blocks lower than the
+ * threshold. Clear it so that these entries don't leak into the next
+ * index section.
+ */
+ writer_clear_index(w);
+
+ bstats = writer_reftable_block_stats(w, typ);
+ bstats->index_blocks = w->stats.idx_stats.blocks - before_blocks;
+ bstats->index_offset = index_start;
+ bstats->max_index_level = max_level;
+
+ /* Reinit lastKey, as the next section can start with any key. */
+ w->last_key.len = 0;
+
+ return 0;
+}
+
+struct common_prefix_arg {
+ struct strbuf *last;
+ int max;
+};
+
+static void update_common(void *void_arg, void *key)
+{
+ struct common_prefix_arg *arg = void_arg;
+ struct obj_index_tree_node *entry = key;
+ if (arg->last) {
+ int n = common_prefix_size(&entry->hash, arg->last);
+ if (n > arg->max) {
+ arg->max = n;
+ }
+ }
+ arg->last = &entry->hash;
+}
+
+struct write_record_arg {
+ struct reftable_writer *w;
+ int err;
+};
+
+static void write_object_record(void *void_arg, void *key)
+{
+ struct write_record_arg *arg = void_arg;
+ struct obj_index_tree_node *entry = key;
+ struct reftable_record
+ rec = { .type = BLOCK_TYPE_OBJ,
+ .u.obj = {
+ .hash_prefix = (uint8_t *)entry->hash.buf,
+ .hash_prefix_len = arg->w->stats.object_id_len,
+ .offsets = entry->offsets,
+ .offset_len = entry->offset_len,
+ } };
+ if (arg->err < 0)
+ goto done;
+
+ arg->err = block_writer_add(arg->w->block_writer, &rec);
+ if (arg->err == 0)
+ goto done;
+
+ arg->err = writer_flush_block(arg->w);
+ if (arg->err < 0)
+ goto done;
+
+ writer_reinit_block_writer(arg->w, BLOCK_TYPE_OBJ);
+ arg->err = block_writer_add(arg->w->block_writer, &rec);
+ if (arg->err == 0)
+ goto done;
+
+ rec.u.obj.offset_len = 0;
+ arg->err = block_writer_add(arg->w->block_writer, &rec);
+
+ /* Should be able to write into a fresh block. */
+ assert(arg->err == 0);
+
+done:;
+}
+
+static void object_record_free(void *void_arg, void *key)
+{
+ struct obj_index_tree_node *entry = key;
+
+ FREE_AND_NULL(entry->offsets);
+ strbuf_release(&entry->hash);
+ reftable_free(entry);
+}
+
+static int writer_dump_object_index(struct reftable_writer *w)
+{
+ struct write_record_arg closure = { .w = w };
+ struct common_prefix_arg common = {
+ .max = 1, /* obj_id_len should be >= 2. */
+ };
+ if (w->obj_index_tree) {
+ infix_walk(w->obj_index_tree, &update_common, &common);
+ }
+ w->stats.object_id_len = common.max + 1;
+
+ writer_reinit_block_writer(w, BLOCK_TYPE_OBJ);
+
+ if (w->obj_index_tree) {
+ infix_walk(w->obj_index_tree, &write_object_record, &closure);
+ }
+
+ if (closure.err < 0)
+ return closure.err;
+ return writer_finish_section(w);
+}
+
+static int writer_finish_public_section(struct reftable_writer *w)
+{
+ uint8_t typ = 0;
+ int err = 0;
+
+ if (!w->block_writer)
+ return 0;
+
+ typ = block_writer_type(w->block_writer);
+ err = writer_finish_section(w);
+ if (err < 0)
+ return err;
+ if (typ == BLOCK_TYPE_REF && !w->opts.skip_index_objects &&
+ w->stats.ref_stats.index_blocks > 0) {
+ err = writer_dump_object_index(w);
+ if (err < 0)
+ return err;
+ }
+
+ if (w->obj_index_tree) {
+ infix_walk(w->obj_index_tree, &object_record_free, NULL);
+ tree_free(w->obj_index_tree);
+ w->obj_index_tree = NULL;
+ }
+
+ w->block_writer = NULL;
+ return 0;
+}
+
+int reftable_writer_close(struct reftable_writer *w)
+{
+ uint8_t footer[72];
+ uint8_t *p = footer;
+ int err = writer_finish_public_section(w);
+ int empty_table = w->next == 0;
+ if (err != 0)
+ goto done;
+ w->pending_padding = 0;
+ if (empty_table) {
+ /* Empty tables need a header anyway. */
+ uint8_t header[28];
+ int n = writer_write_header(w, header);
+ err = padded_write(w, header, n, 0);
+ if (err < 0)
+ goto done;
+ }
+
+ p += writer_write_header(w, footer);
+ put_be64(p, w->stats.ref_stats.index_offset);
+ p += 8;
+ put_be64(p, (w->stats.obj_stats.offset) << 5 | w->stats.object_id_len);
+ p += 8;
+ put_be64(p, w->stats.obj_stats.index_offset);
+ p += 8;
+
+ put_be64(p, w->stats.log_stats.offset);
+ p += 8;
+ put_be64(p, w->stats.log_stats.index_offset);
+ p += 8;
+
+ put_be32(p, crc32(0, footer, p - footer));
+ p += 4;
+
+ err = w->flush(w->write_arg);
+ if (err < 0) {
+ err = REFTABLE_IO_ERROR;
+ goto done;
+ }
+
+ err = padded_write(w, footer, footer_size(writer_version(w)), 0);
+ if (err < 0)
+ goto done;
+
+ if (empty_table) {
+ err = REFTABLE_EMPTY_TABLE_ERROR;
+ goto done;
+ }
+
+done:
+ /* free up memory. */
+ block_writer_release(&w->block_writer_data);
+ writer_clear_index(w);
+ strbuf_release(&w->last_key);
+ return err;
+}
+
+static void writer_clear_index(struct reftable_writer *w)
+{
+ for (size_t i = 0; i < w->index_len; i++)
+ strbuf_release(&w->index[i].last_key);
+ FREE_AND_NULL(w->index);
+ w->index_len = 0;
+ w->index_cap = 0;
+}
+
+static const int debug = 0;
+
+static int writer_flush_nonempty_block(struct reftable_writer *w)
+{
+ uint8_t typ = block_writer_type(w->block_writer);
+ struct reftable_block_stats *bstats =
+ writer_reftable_block_stats(w, typ);
+ uint64_t block_typ_off = (bstats->blocks == 0) ? w->next : 0;
+ int raw_bytes = block_writer_finish(w->block_writer);
+ int padding = 0;
+ int err = 0;
+ struct reftable_index_record ir = { .last_key = STRBUF_INIT };
+ if (raw_bytes < 0)
+ return raw_bytes;
+
+ if (!w->opts.unpadded && typ != BLOCK_TYPE_LOG) {
+ padding = w->opts.block_size - raw_bytes;
+ }
+
+ if (block_typ_off > 0) {
+ bstats->offset = block_typ_off;
+ }
+
+ bstats->entries += w->block_writer->entries;
+ bstats->restarts += w->block_writer->restart_len;
+ bstats->blocks++;
+ w->stats.blocks++;
+
+ if (debug) {
+ fprintf(stderr, "block %c off %" PRIu64 " sz %d (%d)\n", typ,
+ w->next, raw_bytes,
+ get_be24(w->block + w->block_writer->header_off + 1));
+ }
+
+ if (w->next == 0) {
+ writer_write_header(w, w->block);
+ }
+
+ err = padded_write(w, w->block, raw_bytes, padding);
+ if (err < 0)
+ return err;
+
+ REFTABLE_ALLOC_GROW(w->index, w->index_len + 1, w->index_cap);
+
+ ir.offset = w->next;
+ strbuf_reset(&ir.last_key);
+ strbuf_addbuf(&ir.last_key, &w->block_writer->last_key);
+ w->index[w->index_len] = ir;
+
+ w->index_len++;
+ w->next += padding + raw_bytes;
+ w->block_writer = NULL;
+ return 0;
+}
+
+static int writer_flush_block(struct reftable_writer *w)
+{
+ if (!w->block_writer)
+ return 0;
+ if (w->block_writer->entries == 0)
+ return 0;
+ return writer_flush_nonempty_block(w);
+}
+
+const struct reftable_stats *reftable_writer_stats(struct reftable_writer *w)
+{
+ return &w->stats;
+}
diff --git a/reftable/writer.h b/reftable/writer.h
new file mode 100644
index 0000000..8d0df9c
--- /dev/null
+++ b/reftable/writer.h
@@ -0,0 +1,51 @@
+/*
+Copyright 2020 Google LLC
+
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file or at
+https://developers.google.com/open-source/licenses/bsd
+*/
+
+#ifndef WRITER_H
+#define WRITER_H
+
+#include "basics.h"
+#include "block.h"
+#include "tree.h"
+#include "reftable-writer.h"
+
+struct reftable_writer {
+ ssize_t (*write)(void *, const void *, size_t);
+ int (*flush)(void *);
+ void *write_arg;
+ int pending_padding;
+ struct strbuf last_key;
+
+ /* offset of next block to write. */
+ uint64_t next;
+ uint64_t min_update_index, max_update_index;
+ struct reftable_write_options opts;
+
+ /* memory buffer for writing */
+ uint8_t *block;
+
+ /* writer for the current section. NULL or points to
+ * block_writer_data */
+ struct block_writer *block_writer;
+
+ struct block_writer block_writer_data;
+
+ /* pending index records for the current section */
+ struct reftable_index_record *index;
+ size_t index_len;
+ size_t index_cap;
+
+ /*
+ * tree for use with tsearch; used to populate the 'o' inverse OID
+ * map */
+ struct tree_node *obj_index_tree;
+
+ struct reftable_stats stats;
+};
+
+#endif