1 /* alloc-slab.c - Slab debugging allocator
2 * Copyright 2005 srvx Development Team
4 * This file is part of srvx.
6 * srvx is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
20 #if defined(HAVE_SYS_MMAN_H)
21 # include <sys/mman.h>
24 #if !defined(HAVE_MMAP)
25 # error The slab allocator requires that your system have the mmap() system call.
32 #define ALLOC_MAGIC 0x1acf
33 #define FREE_MAGIC 0xfc1d
36 unsigned int file_id : 8;
37 unsigned int size : 24;
38 unsigned int line : 16;
39 unsigned int magic : 16;
42 static const char *file_ids[256];
43 static struct file_id_entry {
47 unsigned int file_ids_used;
50 file_id_cmp(const void *a_, const void *b_)
52 return strcmp(*(const char**)a_, *(const char**)b_);
56 get_file_id(const char *fname)
58 struct file_id_entry *entry;
60 entry = bsearch(&fname, file_id_map, file_ids_used, sizeof(file_id_map[0]), file_id_cmp);
63 entry = file_id_map + file_ids_used;
64 file_ids[file_ids_used] = fname;
66 entry->id = file_ids_used;
67 qsort(file_id_map, ++file_ids_used, sizeof(file_id_map[0]), file_id_cmp);
68 return file_ids_used - 1;
71 typedef struct alloc_header alloc_header_t;
75 typedef size_t alloc_header_t;
80 struct slabset *parent;
92 size_t items_per_slab;
95 #define SLAB_MIN (2 * sizeof(void*))
96 #define SLAB_GRAIN sizeof(void*)
97 #define SLAB_ALIGN SLAB_GRAIN
98 #define SMALL_CUTOFF 512
99 /* Element size < SMALL_CUTOFF -> use small slabs.
100 * Larger elements are allocated directly using mmap(). The largest
101 * regularly allocated struct in srvx 1.x is smaller than
102 * SMALL_CUTOFF, so there is not much point in coding support for
106 static struct slabset *little_slabs[SMALL_CUTOFF / SLAB_GRAIN];
107 static struct slabset slabset_slabs;
108 unsigned long alloc_count;
109 unsigned long alloc_size;
111 #if defined(MAP_ANON)
112 #elif defined(MAP_ANONYMOUS)
113 # define MAP_ANON MAP_ANONYMOUS
121 static size_t pagesize;
123 #if defined(HAVE_GETPAGESIZE)
124 || (pagesize = getpagesize())
126 #if defined(HAVE_SYSCONF) && defined(_SC_PAGESIZE)
127 || (pagesize = sysconf(_SC_PAGESIZE))
129 #if defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
130 || (pagesize = sysconf(_SC_PAGE_SIZE))
133 assert(0 && "unable to find system page size");
134 return pagesize = 4096;
138 slab_round_up(size_t size)
140 return (size + slab_pagesize() - 1) & ~(slab_pagesize() - 1);
144 slab_map(size_t length)
146 static int mmap_fd = -1;
151 mmap_fd = open("/dev/zero", 0);
153 log_module(MAIN_LOG, LOG_FATAL, "Unable to open /dev/zero for mmap: %s", strerror(errno()));
156 res = mmap(0, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, mmap_fd, 0);
157 if (res == MAP_FAILED)
158 log_module(MAIN_LOG, LOG_FATAL, "Unable to mmap %lu bytes (%s).", (unsigned long)length, strerror(errno));
162 static void *slab_alloc(struct slabset *sset);
163 static void slab_unalloc(void *ptr, size_t size);
165 static struct slabset *
166 slabset_create(size_t size)
170 size = (size < SLAB_MIN) ? SLAB_MIN : (size + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
171 idx = size / SLAB_GRAIN;
172 assert(idx < ArrayLength(little_slabs));
173 if (!little_slabs[idx]) {
174 if (!slabset_slabs.size) {
175 unsigned int idx2 = (sizeof(struct slabset) + SLAB_GRAIN - 1) / SLAB_GRAIN;
176 slabset_slabs.size = sizeof(struct slabset);
177 slabset_slabs.items_per_slab = (slab_pagesize() - sizeof(struct slab)) / ((sizeof(struct slabset) + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1));
178 little_slabs[idx2] = &slabset_slabs;
180 return &slabset_slabs;
182 little_slabs[idx] = slab_alloc(&slabset_slabs);
183 little_slabs[idx]->size = size;
184 little_slabs[idx]->items_per_slab = (slab_pagesize() - sizeof(struct slab)) / ((size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1));
186 return little_slabs[idx];
190 slab_alloc(struct slabset *sset)
195 if (!sset->child || !sset->child->free) {
196 unsigned int ii, step;
198 /* Allocate new slab. */
199 item = slab_map(slab_pagesize());
200 slab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
203 /* Populate free list. */
204 step = (sset->size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1);
205 for (ii = 1, item = slab->free = slab->base;
206 ii < sset->items_per_slab;
207 ++ii, item = (*item = (char*)item + step));
209 /* Link to parent slabset. */
211 if ((slab->prev = sset->child)) {
212 slab->next = slab->prev->next;
213 slab->prev->next = slab;
215 slab->next->prev = slab;
223 if (slab->used++ == sset->items_per_slab) {
224 if (sset->child != slab) {
225 /* Unlink slab and reinsert before sset->child. */
227 slab->prev->next = slab->next;
229 slab->next->prev = slab->prev;
230 if ((slab->prev = sset->child->prev))
231 slab->prev->next = slab;
232 if ((slab->next = sset->child))
233 slab->next->prev = slab;
234 } else if (slab->next) {
235 /* Advance sset->child to next pointer. */
236 sset->child = slab->next;
239 memset(item, 0, sset->size);
244 slab_unalloc(void *ptr, size_t size)
247 struct slab *slab, *new_next;
250 assert(size < SMALL_CUTOFF);
251 slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
253 memset(item + 1, 0xde, size - sizeof(*item));
256 if (slab->used-- == slab->parent->items_per_slab
257 && slab->parent->child != slab) {
258 new_next = slab->parent->child;
259 slab->parent->child = slab;
260 } else if (!slab->used) {
261 for (new_next = slab;
262 new_next->next && new_next->next->used;
263 new_next = new_next->next) ;
264 new_next = new_next->next;
270 slab->prev->next = slab->next;
272 slab->next->prev = slab->prev;
273 if ((slab->prev = new_next->prev))
274 slab->prev->next = slab;
275 if ((slab->next = new_next->next))
276 slab->next->prev = slab;
281 slab_malloc(const char *file, unsigned int line, size_t size)
286 assert(size < 1 << 24);
287 real = (size + sizeof(*res) + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
288 if (real < SMALL_CUTOFF)
289 res = slab_alloc(slabset_create(real));
291 res = slab_map(slab_round_up(real));
293 res->file_id = get_file_id(file);
296 res->magic = ALLOC_MAGIC;
299 (void)file; (void)line;
307 slab_realloc(const char *file, unsigned int line, void *ptr, size_t size)
309 alloc_header_t *orig;
314 return slab_malloc(file, line, size);
317 orig = (alloc_header_t*)ptr - 1;
325 newblock = slab_malloc(file, line, size);
326 memcpy(newblock, ptr, size);
331 slab_strdup(const char *file, unsigned int line, const char *src)
336 len = strlen(src) + 1;
337 target = slab_malloc(file, line, len);
338 memcpy(target, src, len);
343 slab_free(UNUSED_ARG(const char *file), UNUSED_ARG(unsigned int line), void *ptr)
351 hdr = (alloc_header_t*)ptr - 1;
353 hdr->magic = FREE_MAGIC;
354 real = hdr->size + sizeof(*hdr);
356 real = *hdr + sizeof(*hdr);
358 real = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
359 if (real < SMALL_CUTOFF)
360 slab_unalloc(hdr, real);
362 munmap(hdr, slab_round_up(real));
364 alloc_size -= real - sizeof(*hdr);
368 verify(const void *ptr)
376 hdr = (alloc_header_t*)ptr - 1;
378 real = hdr->size + sizeof(*hdr);
379 assert(hdr->file_id < file_ids_used);
380 assert(hdr->magic == ALLOC_MAGIC);
382 real = *hdr + sizeof(*hdr);
384 real = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
386 if (real >= SMALL_CUTOFF)
387 assert(((unsigned long)ptr & (slab_pagesize() - 1)) == sizeof(*hdr));
392 expected = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
393 slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
394 assert(slab->parent->size == expected);