1 /* alloc-slab.c - Slab debugging allocator
2 * Copyright 2005 srvx Development Team
4 * This file is part of srvx.
6 * srvx is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
20 #if defined(HAVE_SYS_MMAN_H)
21 # include <sys/mman.h>
24 #if !defined(HAVE_MMAP)
25 # error The slab allocator requires that your system have the mmap() system call.
32 #define ALLOC_MAGIC 0x1acf
33 #define FREE_MAGIC 0xfc1d
36 unsigned int file_id : 8;
37 unsigned int size : 24;
38 unsigned int line : 16;
39 unsigned int magic : 16;
42 static const char *file_ids[256];
43 static struct file_id_entry {
47 unsigned int file_ids_used;
50 file_id_cmp(const void *a_, const void *b_)
52 return strcmp(*(const char**)a_, *(const char**)b_);
56 get_file_id(const char *fname)
58 struct file_id_entry *entry;
60 entry = bsearch(&fname, file_id_map, file_ids_used, sizeof(file_id_map[0]), file_id_cmp);
63 entry = file_id_map + file_ids_used;
64 file_ids[file_ids_used] = fname;
66 entry->id = file_ids_used;
67 qsort(file_id_map, ++file_ids_used, sizeof(file_id_map[0]), file_id_cmp);
68 return file_ids_used - 1;
71 typedef struct alloc_header alloc_header_t;
75 typedef size_t alloc_header_t;
80 struct slabset *parent;
93 size_t items_per_slab;
96 #define SLAB_MIN (2 * sizeof(void*))
97 #define SLAB_GRAIN sizeof(void*)
98 #define SLAB_ALIGN SLAB_GRAIN
99 #define SMALL_CUTOFF 582
100 /* Element size < SMALL_CUTOFF -> use small slabs.
101 * Larger elements are allocated directly using mmap(). The largest
102 * regularly allocated struct in srvx 1.x is smaller than
103 * SMALL_CUTOFF, so there is not much point in coding support for
107 static struct slabset little_slabs[SMALL_CUTOFF / SLAB_GRAIN];
108 static struct slab *free_slabs;
109 unsigned long big_alloc_count;
110 unsigned long big_alloc_size;
111 unsigned long slab_count;
112 unsigned long slab_alloc_count;
113 unsigned long slab_alloc_size;
115 #if defined(MAP_ANON)
116 #elif defined(MAP_ANONYMOUS)
117 # define MAP_ANON MAP_ANONYMOUS
125 static size_t pagesize;
127 #if defined(HAVE_GETPAGESIZE)
128 || (pagesize = getpagesize())
130 #if defined(HAVE_SYSCONF) && defined(_SC_PAGESIZE)
131 || (pagesize = sysconf(_SC_PAGESIZE))
133 #if defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
134 || (pagesize = sysconf(_SC_PAGE_SIZE))
137 assert(0 && "unable to find system page size");
138 return pagesize = 4096;
142 slab_round_up(size_t size)
144 return (size + slab_pagesize() - 1) & ~(slab_pagesize() - 1);
148 slab_map(size_t length)
150 static int mmap_fd = -1;
155 mmap_fd = open("/dev/zero", 0);
157 log_module(MAIN_LOG, LOG_FATAL, "Unable to open /dev/zero for mmap: %s", strerror(errno()));
160 res = mmap(0, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, mmap_fd, 0);
161 if (res == MAP_FAILED)
162 log_module(MAIN_LOG, LOG_FATAL, "Unable to mmap %lu bytes (%s).", (unsigned long)length, strerror(errno));
166 static void *slab_alloc(struct slabset *sset);
167 static void slab_unalloc(void *ptr, size_t size);
169 static struct slabset *
170 slabset_create(size_t size)
174 size = (size < SLAB_MIN) ? SLAB_MIN : (size + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
175 idx = size / SLAB_GRAIN;
176 assert(idx < ArrayLength(little_slabs));
177 if (!little_slabs[idx].size) {
178 little_slabs[idx].size = size;
179 little_slabs[idx].items_per_slab = (slab_pagesize() - sizeof(struct slab)) / ((size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1));
181 return &little_slabs[idx];
185 slab_alloc(struct slabset *sset)
190 if (!sset->child || !sset->child->free) {
191 unsigned int ii, step;
193 /* Allocate new slab. */
196 free_slabs = slab->next;
198 item = slab_map(slab_pagesize());
199 slab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
203 /* Populate free list. */
204 step = (sset->size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1);
205 for (ii = 1, item = slab->free = slab->base;
206 ii < sset->items_per_slab;
207 ++ii, item = (*item = (char*)item + step));
210 /* Link to parent slabset. */
212 slab->prev = sset->child;
214 slab->next = slab->prev->next;
215 slab->prev->next = slab;
217 slab->next->prev = slab;
220 assert(!slab->next || slab == slab->next->prev);
221 assert(!slab->prev || slab == slab->prev->next);
225 /* log_module(MAIN_LOG, LOG_DEBUG, "Allocated new %u-slab %p.", sset->size, slab); */
230 assert(((unsigned long)item & (slab_pagesize() - 1))
231 <= (slab_pagesize() - sizeof(*slab) - sset->size));
233 if (++slab->used == sset->items_per_slab) {
234 /* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p is empty.", sset->size, item); */
235 if (sset->child != slab) {
236 /* Unlink slab and reinsert before sset->child. */
238 slab->prev->next = slab->next;
240 slab->next->prev = slab->prev;
241 if ((slab->prev = sset->child->prev))
242 slab->prev->next = slab;
243 if ((slab->next = sset->child))
244 slab->next->prev = slab;
245 assert(!slab->next || slab == slab->next->prev);
246 assert(!slab->prev || slab == slab->prev->next);
247 } else if (slab->next) {
248 /* Advance sset->child to next pointer. */
249 sset->child = slab->next;
253 memset(item, 0, sset->size);
258 slab_unalloc(void *ptr, size_t size)
261 struct slab *slab, *new_next;
264 assert(size < SMALL_CUTOFF);
265 slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
267 memset(item + 1, 0xde, size - sizeof(*item));
269 slab->parent->nallocs--;
271 if (slab->used-- == slab->parent->items_per_slab
272 && slab->parent->child != slab) {
273 /* Unlink from current position, relink as parent's first child. */
274 new_next = slab->parent->child;
275 assert(new_next != NULL);
277 slab->prev->next = slab->next;
279 slab->next->prev = slab->prev;
280 if ((slab->prev = new_next->prev))
281 slab->prev->next = slab;
282 slab->next = new_next;
283 new_next->prev = slab;
284 slab->parent->child = slab;
285 assert(!slab->next || slab == slab->next->prev);
286 assert(!slab->prev || slab == slab->prev->next);
287 /* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p became partial.", slab->parent->size, slab); */
288 } else if (!slab->used) {
289 /* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p became full.", slab->parent->size, slab); */
290 /* Unlink slab from its parent. */
291 slab->parent->nslabs--;
294 slab->prev->next = slab->next;
296 slab->next->prev = slab->prev;
297 new_next = slab->next ? slab->next : slab->prev;
298 if (slab == slab->parent->child)
299 slab->parent->child = new_next;
301 assert(!new_next->next || new_next == new_next->next->prev);
302 assert(!new_next->prev || new_next == new_next->prev->next);
305 /* Link to list of free slabs. */
308 slab->next = free_slabs;
314 slab_malloc(const char *file, unsigned int line, size_t size)
319 assert(size < 1 << 24);
320 real = (size + sizeof(*res) + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
321 if (real < SMALL_CUTOFF) {
322 res = slab_alloc(slabset_create(real));
324 slab_alloc_size += size;
326 res = slab_map(slab_round_up(real));
328 big_alloc_size += size;
331 res->file_id = get_file_id(file);
334 res->magic = ALLOC_MAGIC;
337 (void)file; (void)line;
343 slab_realloc(const char *file, unsigned int line, void *ptr, size_t size)
345 alloc_header_t *orig;
350 return slab_malloc(file, line, size);
353 orig = (alloc_header_t*)ptr - 1;
361 newblock = slab_malloc(file, line, size);
362 memcpy(newblock, ptr, osize);
367 slab_strdup(const char *file, unsigned int line, const char *src)
372 len = strlen(src) + 1;
373 target = slab_malloc(file, line, len);
374 memcpy(target, src, len);
379 slab_free(UNUSED_ARG(const char *file), UNUSED_ARG(unsigned int line), void *ptr)
387 hdr = (alloc_header_t*)ptr - 1;
389 hdr->magic = FREE_MAGIC;
390 real = hdr->size + sizeof(*hdr);
392 real = *hdr + sizeof(*hdr);
394 real = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
395 if (real < SMALL_CUTOFF) {
396 slab_unalloc(hdr, real);
398 slab_alloc_size -= real - sizeof(*hdr);
400 munmap(hdr, slab_round_up(real));
402 big_alloc_size -= real - sizeof(*hdr);
407 verify(const void *ptr)
415 hdr = (alloc_header_t*)ptr - 1;
417 real = hdr->size + sizeof(*hdr);
418 assert(hdr->file_id < file_ids_used);
419 assert(hdr->magic == ALLOC_MAGIC);
421 real = *hdr + sizeof(*hdr);
423 real = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
425 if (real >= SMALL_CUTOFF)
426 assert(((unsigned long)ptr & (slab_pagesize() - 1)) == sizeof(*hdr));
431 expected = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
432 slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
433 assert(slab->parent->size == expected);