1 /* alloc-slab.c - Slab debugging allocator
2 * Copyright 2005 srvx Development Team
4 * This file is part of srvx.
6 * srvx is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
20 #if defined(HAVE_SYS_MMAN_H)
21 # include <sys/mman.h>
24 #if !defined(HAVE_MMAP)
25 # error The slab allocator requires that your system have the mmap() system call.
28 #if !defined(SLAB_DEBUG)
32 #if !defined(SLAB_RESERVE)
33 # define SLAB_RESERVE 0
36 #if !defined(MAX_SLAB_FREE)
37 # define MAX_SLAB_FREE 1024
42 #define ALLOC_MAGIC 0x1a
43 #define FREE_MAGIC 0xcf
46 unsigned int size : 24;
47 unsigned int magic : 8;
48 unsigned int file_id : 8;
49 unsigned int line : 16;
52 static const char *file_ids[256];
53 static struct file_id_entry {
57 unsigned int file_ids_used;
60 file_id_cmp(const void *a_, const void *b_)
62 return strcmp(*(const char**)a_, *(const char**)b_);
66 get_file_id(const char *fname)
68 struct file_id_entry *entry;
70 entry = bsearch(&fname, file_id_map, file_ids_used, sizeof(file_id_map[0]), file_id_cmp);
73 entry = file_id_map + file_ids_used;
74 file_ids[file_ids_used] = fname;
76 entry->id = file_ids_used;
77 qsort(file_id_map, ++file_ids_used, sizeof(file_id_map[0]), file_id_cmp);
78 return file_ids_used - 1;
81 typedef struct alloc_header alloc_header_t;
85 typedef size_t alloc_header_t;
90 struct slabset *parent;
103 size_t items_per_slab;
106 #define SLAB_MIN (2 * sizeof(void*))
107 #define SLAB_GRAIN sizeof(void*)
108 #define SLAB_ALIGN SLAB_GRAIN
109 #define SMALL_CUTOFF 576
110 /* Element size < SMALL_CUTOFF -> use small slabs.
111 * Larger elements are allocated directly using mmap(). The largest
112 * regularly allocated struct in srvx 1.x is smaller than
113 * SMALL_CUTOFF, so there is not much point in coding support for
117 static struct slabset little_slabs[SMALL_CUTOFF / SLAB_GRAIN];
118 static struct slab *free_slab_head;
119 static struct slab *free_slab_tail;
120 unsigned long free_slab_count;
121 unsigned long big_alloc_count;
122 unsigned long big_alloc_size;
123 unsigned long slab_count;
124 unsigned long slab_alloc_count;
125 unsigned long slab_alloc_size;
127 #if defined(MAP_ANON)
128 #elif defined(MAP_ANONYMOUS)
129 # define MAP_ANON MAP_ANONYMOUS
137 static size_t pagesize;
139 #if defined(HAVE_GETPAGESIZE)
140 || (pagesize = getpagesize())
142 #if defined(HAVE_SYSCONF) && defined(_SC_PAGESIZE)
143 || (pagesize = sysconf(_SC_PAGESIZE))
145 #if defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
146 || (pagesize = sysconf(_SC_PAGE_SIZE))
149 assert(0 && "unable to find system page size");
150 return pagesize = 4096;
154 slab_round_up(size_t size)
156 return (size + slab_pagesize() - 1) & ~(slab_pagesize() - 1);
160 slab_map(size_t length)
162 static int mmap_fd = -1;
167 mmap_fd = open("/dev/zero", 0);
169 log_module(MAIN_LOG, LOG_FATAL, "Unable to open /dev/zero for mmap: %s", strerror(errno()));
172 res = mmap(0, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, mmap_fd, 0);
173 if (res == MAP_FAILED)
174 log_module(MAIN_LOG, LOG_FATAL, "Unable to mmap %lu bytes (%s).", (unsigned long)length, strerror(errno));
178 static void *slab_alloc(struct slabset *sset);
179 static void slab_unalloc(void *ptr, size_t size);
181 static struct slabset *
182 slabset_create(size_t size)
186 size = (size < SLAB_MIN) ? SLAB_MIN : (size + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
187 idx = size / SLAB_GRAIN;
188 assert(idx < ArrayLength(little_slabs));
189 if (!little_slabs[idx].size) {
190 little_slabs[idx].size = size;
191 little_slabs[idx].items_per_slab = (slab_pagesize() - sizeof(struct slab)) / ((size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1));
193 return &little_slabs[idx];
197 slab_alloc(struct slabset *sset)
202 if (!sset->child || !sset->child->free) {
203 unsigned int ii, step;
205 /* Allocate new slab. */
206 if (free_slab_head) {
207 slab = free_slab_head;
208 if (!(free_slab_head = slab->next))
209 free_slab_tail = NULL;
211 item = slab_map(slab_pagesize());
212 slab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
217 /* Populate free list. */
218 step = (sset->size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1);
219 for (ii = 1, item = slab->free = slab->base;
220 ii < sset->items_per_slab;
221 ++ii, item = (*item = (char*)item + step));
224 /* Link to parent slabset. */
226 slab->prev = sset->child;
228 slab->next = slab->prev->next;
229 slab->prev->next = slab;
231 slab->next->prev = slab;
234 assert(!slab->next || slab == slab->next->prev);
235 assert(!slab->prev || slab == slab->prev->next);
242 assert(((unsigned long)item & (slab_pagesize() - 1))
243 <= (slab_pagesize() - sizeof(*slab) - sset->size));
245 if (++slab->used == sset->items_per_slab) {
246 if (sset->child != slab) {
247 /* Unlink slab and reinsert before sset->child. */
249 slab->prev->next = slab->next;
251 slab->next->prev = slab->prev;
252 if ((slab->prev = sset->child->prev))
253 slab->prev->next = slab;
254 if ((slab->next = sset->child))
255 slab->next->prev = slab;
256 assert(!slab->next || slab == slab->next->prev);
257 assert(!slab->prev || slab == slab->prev->next);
258 } else if (slab->next) {
259 /* Advance sset->child to next pointer. */
260 sset->child = slab->next;
264 memset(item, 0, sset->size);
269 slab_unalloc(void *ptr, size_t size)
271 struct slab *slab, *new_next;
273 assert(size < SMALL_CUTOFF);
274 slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
275 *(void**)ptr = slab->free;
277 slab->parent->nallocs--;
279 if (slab->used-- == slab->parent->items_per_slab
280 && slab->parent->child != slab) {
281 /* Unlink from current position, relink as parent's first child. */
282 new_next = slab->parent->child;
283 assert(new_next != NULL);
285 slab->prev->next = slab->next;
287 slab->next->prev = slab->prev;
288 if ((slab->prev = new_next->prev))
289 slab->prev->next = slab;
290 slab->next = new_next;
291 new_next->prev = slab;
292 slab->parent->child = slab;
293 assert(!slab->next || slab == slab->next->prev);
294 assert(!slab->prev || slab == slab->prev->next);
295 } else if (!slab->used) {
296 /* Unlink slab from its parent. */
297 slab->parent->nslabs--;
299 slab->prev->next = slab->next;
301 slab->next->prev = slab->prev;
302 new_next = slab->next ? slab->next : slab->prev;
303 if (slab == slab->parent->child)
304 slab->parent->child = new_next;
306 assert(!new_next->next || new_next == new_next->next->prev);
307 assert(!new_next->prev || new_next == new_next->prev->next);
311 /* Make sure we have enough free slab pages. */
312 while (free_slab_count < SLAB_RESERVE) {
316 item = slab_map(slab_pagesize());
317 tslab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
319 tslab->prev = free_slab_tail;
320 free_slab_tail = tslab;
322 free_slab_head = tslab;
324 tslab->prev->next = tslab;
330 /* Link to list of free slabs. */
333 slab->prev = free_slab_tail;
334 free_slab_tail = slab;
336 slab->prev->next = slab;
338 free_slab_head = slab;
341 #if MAX_SLAB_FREE >= 0
342 /* Unlink and unmap old slabs, so accesses to stale-enough
343 * pointers will fault. */
344 while (free_slab_count > MAX_SLAB_FREE) {
347 tslab = free_slab_tail;
348 free_slab_tail = tslab->prev;
350 tslab->prev->next = NULL;
352 free_slab_head = NULL;
355 munmap(slab->base, slab_pagesize());
363 slab_malloc(const char *file, unsigned int line, size_t size)
368 assert(size < 1 << 24);
369 real = (size + sizeof(*res) + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
370 if (real < SMALL_CUTOFF) {
371 res = slab_alloc(slabset_create(real));
373 slab_alloc_size += size;
375 res = slab_map(slab_round_up(real));
377 big_alloc_size += size;
380 res->file_id = get_file_id(file);
383 res->magic = ALLOC_MAGIC;
386 (void)file; (void)line;
392 slab_realloc(const char *file, unsigned int line, void *ptr, size_t size)
394 alloc_header_t *orig;
399 return slab_malloc(file, line, size);
402 orig = (alloc_header_t*)ptr - 1;
410 newblock = slab_malloc(file, line, size);
411 memcpy(newblock, ptr, osize);
412 slab_free(file, line, ptr);
417 slab_strdup(const char *file, unsigned int line, const char *src)
422 len = strlen(src) + 1;
423 target = slab_malloc(file, line, len);
424 memcpy(target, src, len);
429 slab_free(const char *file, unsigned int line, void *ptr)
437 hdr = (alloc_header_t*)ptr - 1;
439 hdr->file_id = get_file_id(file);
441 hdr->magic = FREE_MAGIC;
445 (void)file; (void)line;
447 real = (user + sizeof(*hdr) + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
448 if (real < SMALL_CUTOFF) {
449 memset(hdr + 1, 0xde, real - sizeof(*hdr));
450 slab_unalloc(hdr, real);
452 slab_alloc_size -= user;
454 munmap(hdr, slab_round_up(real));
456 big_alloc_size -= user;
460 /* Undefine the verify macro in case we're not debugging. */
463 verify(const void *ptr)
471 hdr = (alloc_header_t*)ptr - 1;
473 real = hdr->size + sizeof(*hdr);
474 assert(hdr->file_id < file_ids_used);
475 assert(hdr->magic == ALLOC_MAGIC);
477 real = *hdr + sizeof(*hdr);
479 real = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
481 if (real >= SMALL_CUTOFF)
482 assert(((unsigned long)ptr & (slab_pagesize() - 1)) == sizeof(*hdr));
487 expected = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
488 slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
489 assert(slab->parent->size == expected);