summaryrefslogtreecommitdiff
path: root/alloc.c
blob: 27f697e4c87a05ef7cc847a17e83e14e9cfd2a4d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
/*
 * alloc.c  - specialized allocator for internal objects
 *
 * Copyright (C) 2006 Linus Torvalds
 *
 * The standard malloc/free wastes too much space for objects, partly because
 * it maintains all the allocation infrastructure, but even more because it ends
 * up with maximal alignment because it doesn't know what the object alignment
 * for the new allocation is.
 */
#include "cache.h"
#include "object.h"
#include "blob.h"
#include "tree.h"
#include "commit.h"
#include "tag.h"
#include "alloc.h"
 
#define BLOCKING 1024
 
union any_object {
	struct object object;
	struct blob blob;
	struct tree tree;
	struct commit commit;
	struct tag tag;
};
 
struct alloc_state {
	int nr;    /* number of nodes left in current allocation */
	void *p;   /* first free node in current allocation */
 
	/* bookkeeping of allocations */
	void **slabs;
	int slab_nr, slab_alloc;
};
 
struct alloc_state *allocate_alloc_state(void)
{
	return xcalloc(1, sizeof(struct alloc_state));
}
 
void clear_alloc_state(struct alloc_state *s)
{
	while (s->slab_nr > 0) {
		s->slab_nr--;
		free(s->slabs[s->slab_nr]);
	}
 
	FREE_AND_NULL(s->slabs);
}
 
static inline void *alloc_node(struct alloc_state *s, size_t node_size)
{
	void *ret;
 
	if (!s->nr) {
		s->nr = BLOCKING;
		s->p = xmalloc(BLOCKING * node_size);
 
		ALLOC_GROW(s->slabs, s->slab_nr + 1, s->slab_alloc);
		s->slabs[s->slab_nr++] = s->p;
	}
	s->nr--;
	ret = s->p;
	s->p = (char *)s->p + node_size;
	memset(ret, 0, node_size);
 
	return ret;
}
 
void *alloc_blob_node(struct repository *r)
{
	struct blob *b = alloc_node(r->parsed_objects->blob_state, sizeof(struct blob));
	b->object.type = OBJ_BLOB;
	return b;
}
 
void *alloc_tree_node(struct repository *r)
{
	struct tree *t = alloc_node(r->parsed_objects->tree_state, sizeof(struct tree));
	t->object.type = OBJ_TREE;
	return t;
}
 
void *alloc_tag_node(struct repository *r)
{
	struct tag *t = alloc_node(r->parsed_objects->tag_state, sizeof(struct tag));
	t->object.type = OBJ_TAG;
	return t;
}
 
void *alloc_object_node(struct repository *r)
{
	struct object *obj = alloc_node(r->parsed_objects->object_state, sizeof(union any_object));
	obj->type = OBJ_NONE;
	return obj;
}
 
/*
 * The returned count is to be used as an index into commit slabs,
 * that are *NOT* maintained per repository, and that is why a single
 * global counter is used.
 */
static unsigned int alloc_commit_index(void)
{
	static unsigned int parsed_commits_count;
	return parsed_commits_count++;
}
 
void init_commit_node(struct commit *c)
{
	c->object.type = OBJ_COMMIT;
	c->index = alloc_commit_index();
}
 
void *alloc_commit_node(struct repository *r)
{
	struct commit *c = alloc_node(r->parsed_objects->commit_state, sizeof(struct commit));
	init_commit_node(c);
	return c;
}