summaryrefslogtreecommitdiff
path: root/hashmap.c
diff options
context:
space:
mode:
authorElijah Newren <newren@gmail.com>2020-11-02 18:55:05 (GMT)
committerJunio C Hamano <gitster@pobox.com>2020-11-02 20:15:50 (GMT)
commit6da1a258142ac2422c8c57c54b92eaed3c86226e (patch)
tree480a35cb1c8754d02f21233d2efd8e5d28ff24d0 /hashmap.c
parent33f20d82177871225e17d9dd44169a52a36c9f1d (diff)
downloadgit-6da1a258142ac2422c8c57c54b92eaed3c86226e.zip
git-6da1a258142ac2422c8c57c54b92eaed3c86226e.tar.gz
git-6da1a258142ac2422c8c57c54b92eaed3c86226e.tar.bz2
hashmap: provide deallocation function names
hashmap_free(), hashmap_free_entries(), and hashmap_free_() have existed for a while, but aren't necessarily the clearest names, especially with hashmap_partial_clear() being added to the mix and lazy-initialization now being supported. Peff suggested we adopt the following names[1]: - hashmap_clear() - remove all entries and de-allocate any hashmap-specific data, but be ready for reuse - hashmap_clear_and_free() - ditto, but free the entries themselves - hashmap_partial_clear() - remove all entries but don't deallocate table - hashmap_partial_clear_and_free() - ditto, but free the entries This patch provides the new names and converts all existing callers over to the new naming scheme. [1] https://lore.kernel.org/git/20201030125059.GA3277724@coredump.intra.peff.net/ Signed-off-by: Elijah Newren <newren@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
Diffstat (limited to 'hashmap.c')
-rw-r--r--hashmap.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/hashmap.c b/hashmap.c
index 922ed07..5009471 100644
--- a/hashmap.c
+++ b/hashmap.c
@@ -183,7 +183,7 @@ static void free_individual_entries(struct hashmap *map, ssize_t entry_offset)
while ((e = hashmap_iter_next(&iter)))
/*
* like container_of, but using caller-calculated
- * offset (caller being hashmap_free_entries)
+ * offset (caller being hashmap_clear_and_free)
*/
free((char *)e - entry_offset);
}
@@ -199,11 +199,11 @@ void hashmap_partial_clear_(struct hashmap *map, ssize_t entry_offset)
map->private_size = 0;
}
-void hashmap_free_(struct hashmap *map, ssize_t entry_offset)
+void hashmap_clear_(struct hashmap *map, ssize_t entry_offset)
{
if (!map || !map->table)
return;
- if (entry_offset >= 0) /* called by hashmap_free_entries */
+ if (entry_offset >= 0) /* called by hashmap_clear_and_free */
free_individual_entries(map, entry_offset);
free(map->table);
memset(map, 0, sizeof(*map));