1 /* alloc-slab.c - Slab debugging allocator
2 * Copyright 2005 srvx Development Team
4 * This file is part of srvx.
6 * srvx is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
20 #if defined(HAVE_SYS_MMAN_H)
21 # include <sys/mman.h>
24 #if !defined(HAVE_MMAP)
25 # error The slab allocator requires that your system have the mmap() system call.
29 #define SLAB_RESERVE 1024
33 #define ALLOC_MAGIC 0x1acf
34 #define FREE_MAGIC 0xfc1d
37 unsigned int file_id : 8;
38 unsigned int size : 24;
39 unsigned int line : 16;
40 unsigned int magic : 16;
43 static const char *file_ids[256];
44 static struct file_id_entry {
48 unsigned int file_ids_used;
51 file_id_cmp(const void *a_, const void *b_)
53 return strcmp(*(const char**)a_, *(const char**)b_);
57 get_file_id(const char *fname)
59 struct file_id_entry *entry;
61 entry = bsearch(&fname, file_id_map, file_ids_used, sizeof(file_id_map[0]), file_id_cmp);
64 entry = file_id_map + file_ids_used;
65 file_ids[file_ids_used] = fname;
67 entry->id = file_ids_used;
68 qsort(file_id_map, ++file_ids_used, sizeof(file_id_map[0]), file_id_cmp);
69 return file_ids_used - 1;
72 typedef struct alloc_header alloc_header_t;
76 typedef size_t alloc_header_t;
81 struct slabset *parent;
94 size_t items_per_slab;
97 #define SLAB_MIN (2 * sizeof(void*))
98 #define SLAB_GRAIN sizeof(void*)
99 #define SLAB_ALIGN SLAB_GRAIN
100 #define SMALL_CUTOFF 582
101 /* Element size < SMALL_CUTOFF -> use small slabs.
102 * Larger elements are allocated directly using mmap(). The largest
103 * regularly allocated struct in srvx 1.x is smaller than
104 * SMALL_CUTOFF, so there is not much point in coding support for
108 static struct slabset little_slabs[SMALL_CUTOFF / SLAB_GRAIN];
109 static struct slab *free_slab_head;
110 static struct slab *free_slab_tail;
111 unsigned long free_slab_count;
112 unsigned long big_alloc_count;
113 unsigned long big_alloc_size;
114 unsigned long slab_count;
115 unsigned long slab_alloc_count;
116 unsigned long slab_alloc_size;
118 #if defined(MAP_ANON)
119 #elif defined(MAP_ANONYMOUS)
120 # define MAP_ANON MAP_ANONYMOUS
128 static size_t pagesize;
130 #if defined(HAVE_GETPAGESIZE)
131 || (pagesize = getpagesize())
133 #if defined(HAVE_SYSCONF) && defined(_SC_PAGESIZE)
134 || (pagesize = sysconf(_SC_PAGESIZE))
136 #if defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
137 || (pagesize = sysconf(_SC_PAGE_SIZE))
140 assert(0 && "unable to find system page size");
141 return pagesize = 4096;
145 slab_round_up(size_t size)
147 return (size + slab_pagesize() - 1) & ~(slab_pagesize() - 1);
151 slab_map(size_t length)
153 static int mmap_fd = -1;
158 mmap_fd = open("/dev/zero", 0);
160 log_module(MAIN_LOG, LOG_FATAL, "Unable to open /dev/zero for mmap: %s", strerror(errno()));
163 res = mmap(0, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, mmap_fd, 0);
164 if (res == MAP_FAILED)
165 log_module(MAIN_LOG, LOG_FATAL, "Unable to mmap %lu bytes (%s).", (unsigned long)length, strerror(errno));
169 static void *slab_alloc(struct slabset *sset);
170 static void slab_unalloc(void *ptr, size_t size);
172 static struct slabset *
173 slabset_create(size_t size)
177 size = (size < SLAB_MIN) ? SLAB_MIN : (size + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
178 idx = size / SLAB_GRAIN;
179 assert(idx < ArrayLength(little_slabs));
180 if (!little_slabs[idx].size) {
181 little_slabs[idx].size = size;
182 little_slabs[idx].items_per_slab = (slab_pagesize() - sizeof(struct slab)) / ((size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1));
184 return &little_slabs[idx];
188 slab_alloc(struct slabset *sset)
193 if (!sset->child || !sset->child->free) {
194 unsigned int ii, step;
196 /* Allocate new slab. */
197 if (free_slab_head) {
198 slab = free_slab_head;
199 if (!(free_slab_head = slab->next))
200 free_slab_tail = NULL;
202 item = slab_map(slab_pagesize());
203 slab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
208 /* Populate free list. */
209 step = (sset->size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1);
210 for (ii = 1, item = slab->free = slab->base;
211 ii < sset->items_per_slab;
212 ++ii, item = (*item = (char*)item + step));
215 /* Link to parent slabset. */
217 slab->prev = sset->child;
219 slab->next = slab->prev->next;
220 slab->prev->next = slab;
222 slab->next->prev = slab;
225 assert(!slab->next || slab == slab->next->prev);
226 assert(!slab->prev || slab == slab->prev->next);
229 /* log_module(MAIN_LOG, LOG_DEBUG, "Allocated new %u-slab %p.", sset->size, slab); */
234 assert(((unsigned long)item & (slab_pagesize() - 1))
235 <= (slab_pagesize() - sizeof(*slab) - sset->size));
237 if (++slab->used == sset->items_per_slab) {
238 /* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p is empty.", sset->size, item); */
239 if (sset->child != slab) {
240 /* Unlink slab and reinsert before sset->child. */
242 slab->prev->next = slab->next;
244 slab->next->prev = slab->prev;
245 if ((slab->prev = sset->child->prev))
246 slab->prev->next = slab;
247 if ((slab->next = sset->child))
248 slab->next->prev = slab;
249 assert(!slab->next || slab == slab->next->prev);
250 assert(!slab->prev || slab == slab->prev->next);
251 } else if (slab->next) {
252 /* Advance sset->child to next pointer. */
253 sset->child = slab->next;
257 memset(item, 0, sset->size);
262 slab_unalloc(void *ptr, size_t size)
265 struct slab *slab, *new_next;
268 assert(size < SMALL_CUTOFF);
269 slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
271 memset(item + 1, 0xde, size - sizeof(*item));
273 slab->parent->nallocs--;
275 if (slab->used-- == slab->parent->items_per_slab
276 && slab->parent->child != slab) {
277 /* Unlink from current position, relink as parent's first child. */
278 new_next = slab->parent->child;
279 assert(new_next != NULL);
281 slab->prev->next = slab->next;
283 slab->next->prev = slab->prev;
284 if ((slab->prev = new_next->prev))
285 slab->prev->next = slab;
286 slab->next = new_next;
287 new_next->prev = slab;
288 slab->parent->child = slab;
289 assert(!slab->next || slab == slab->next->prev);
290 assert(!slab->prev || slab == slab->prev->next);
291 /* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p became partial.", slab->parent->size, slab); */
292 } else if (!slab->used) {
293 /* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p became full.", slab->parent->size, slab); */
294 /* Unlink slab from its parent. */
295 slab->parent->nslabs--;
298 slab->prev->next = slab->next;
300 slab->next->prev = slab->prev;
301 new_next = slab->next ? slab->next : slab->prev;
302 if (slab == slab->parent->child)
303 slab->parent->child = new_next;
305 assert(!new_next->next || new_next == new_next->next->prev);
306 assert(!new_next->prev || new_next == new_next->prev->next);
310 /* Make sure we have enough free slab pages. */
311 while (free_slab_count < SLAB_RESERVE) {
313 item = slab_map(slab_pagesize());
314 tslab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
316 tslab->prev = free_slab_tail;
317 free_slab_tail = tslab;
319 free_slab_head = tslab;
324 /* Unmap old slab, so accesses to stale pointers will fault. */
325 munmap(slab->base, slab_pagesize());
328 /* Link to list of free slabs. */
330 slab->prev = free_slab_tail;
332 free_slab_tail = slab;
334 free_slab_head = slab;
341 slab_malloc(const char *file, unsigned int line, size_t size)
346 assert(size < 1 << 24);
347 real = (size + sizeof(*res) + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
348 if (real < SMALL_CUTOFF) {
349 res = slab_alloc(slabset_create(real));
351 slab_alloc_size += size;
353 res = slab_map(slab_round_up(real));
355 big_alloc_size += size;
358 res->file_id = get_file_id(file);
361 res->magic = ALLOC_MAGIC;
364 (void)file; (void)line;
370 slab_realloc(const char *file, unsigned int line, void *ptr, size_t size)
372 alloc_header_t *orig;
377 return slab_malloc(file, line, size);
380 orig = (alloc_header_t*)ptr - 1;
388 newblock = slab_malloc(file, line, size);
389 memcpy(newblock, ptr, osize);
394 slab_strdup(const char *file, unsigned int line, const char *src)
399 len = strlen(src) + 1;
400 target = slab_malloc(file, line, len);
401 memcpy(target, src, len);
406 slab_free(UNUSED_ARG(const char *file), UNUSED_ARG(unsigned int line), void *ptr)
414 hdr = (alloc_header_t*)ptr - 1;
416 hdr->magic = FREE_MAGIC;
417 real = hdr->size + sizeof(*hdr);
419 real = *hdr + sizeof(*hdr);
421 real = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
422 if (real < SMALL_CUTOFF) {
423 slab_unalloc(hdr, real);
425 slab_alloc_size -= real - sizeof(*hdr);
427 munmap(hdr, slab_round_up(real));
429 big_alloc_size -= real - sizeof(*hdr);
434 verify(const void *ptr)
442 hdr = (alloc_header_t*)ptr - 1;
444 real = hdr->size + sizeof(*hdr);
445 assert(hdr->file_id < file_ids_used);
446 assert(hdr->magic == ALLOC_MAGIC);
448 real = *hdr + sizeof(*hdr);
450 real = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
452 if (real >= SMALL_CUTOFF)
453 assert(((unsigned long)ptr & (slab_pagesize() - 1)) == sizeof(*hdr));
458 expected = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
459 slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
460 assert(slab->parent->size == expected);