1 /* alloc-slab.c - Slab debugging allocator
2 * Copyright 2005 srvx Development Team
4 * This file is part of srvx.
6 * srvx is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
20 #if defined(HAVE_SYS_MMAN_H)
21 # include <sys/mman.h>
24 #if !defined(HAVE_MMAP)
25 # error The slab allocator requires that your system have the mmap() system call.
29 struct slabset *parent;
41 size_t items_per_slab;
44 #define SLAB_MIN (2 * sizeof(void*))
45 #define SLAB_GRAIN sizeof(void*)
46 #define SLAB_ALIGN SLAB_GRAIN
47 #define SMALL_CUTOFF 512
48 /* Element size < SMALL_CUTOFF -> use small slabs.
49 * Larger elements are allocated directly using mmap(). The largest
50 * regularly allocated struct in srvx 1.x is smaller than
51 * SMALL_CUTOFF, so there is not much point in coding support for
55 static struct slabset *little_slabs[SMALL_CUTOFF / SLAB_GRAIN];
56 static struct slabset slabset_slabs;
57 unsigned long alloc_count;
58 unsigned long alloc_size;
61 #elif defined(MAP_ANONYMOUS)
62 # define MAP_ANON MAP_ANONYMOUS
70 static size_t pagesize;
72 #if defined(HAVE_GETPAGESIZE)
73 || (pagesize = getpagesize())
75 #if defined(HAVE_SYSCONF) && defined(_SC_PAGESIZE)
76 || (pagesize = sysconf(_SC_PAGESIZE))
78 #if defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
79 || (pagesize = sysconf(_SC_PAGE_SIZE))
82 assert(0 && "unable to find system page size");
83 return pagesize = 4096;
87 slab_round_up(size_t size)
89 return (size + slab_pagesize() - 1) & ~(slab_pagesize() - 1);
93 slab_map(size_t length)
95 static int mmap_fd = -1;
100 mmap_fd = open("/dev/zero", 0);
102 log_module(MAIN_LOG, LOG_FATAL, "Unable to open /dev/zero for mmap: %s", strerror(errno()));
105 res = mmap(0, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, mmap_fd, 0);
106 if (res == MAP_FAILED)
107 log_module(MAIN_LOG, LOG_FATAL, "Unable to mmap %lu bytes (%s).", (unsigned long)length, strerror(errno));
111 static void *slab_alloc(struct slabset *sset);
112 static void slab_unalloc(void *ptr, size_t size);
114 static struct slabset *
115 slabset_create(size_t size)
119 size = (size < SLAB_MIN) ? SLAB_MIN : (size + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
120 idx = size / SLAB_GRAIN;
121 assert(idx < ArrayLength(little_slabs));
122 if (!little_slabs[idx]) {
123 if (!slabset_slabs.size) {
124 unsigned int idx2 = (sizeof(struct slabset) + SLAB_GRAIN - 1) / SLAB_GRAIN;
125 slabset_slabs.size = sizeof(struct slabset);
126 slabset_slabs.items_per_slab = (slab_pagesize() - sizeof(struct slab)) / ((sizeof(struct slabset) + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1));
127 little_slabs[idx2] = &slabset_slabs;
129 return &slabset_slabs;
131 little_slabs[idx] = slab_alloc(&slabset_slabs);
132 little_slabs[idx]->size = size;
133 little_slabs[idx]->items_per_slab = (slab_pagesize() - sizeof(struct slab)) / ((size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1));
135 return little_slabs[idx];
139 slab_alloc(struct slabset *sset)
144 if (!sset->child || !sset->child->free) {
145 unsigned int ii, step;
147 /* Allocate new slab. */
148 item = slab_map(slab_pagesize());
149 slab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
152 /* Populate free list. */
153 step = (sset->size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1);
154 for (ii = 1, item = slab->free = slab->base;
155 ii < sset->items_per_slab;
156 ++ii, item = (*item = (char*)item + step));
158 /* Link to parent slabset. */
160 if ((slab->prev = sset->child)) {
161 slab->next = slab->prev->next;
162 slab->prev->next = slab;
164 slab->next->prev = slab;
172 if (slab->used++ == sset->items_per_slab) {
173 if (sset->child != slab) {
174 /* Unlink slab and reinsert before sset->child. */
176 slab->prev->next = slab->next;
178 slab->next->prev = slab->prev;
179 if ((slab->prev = sset->child->prev))
180 slab->prev->next = slab;
181 if ((slab->next = sset->child))
182 slab->next->prev = slab;
183 } else if (slab->next) {
184 /* Advance sset->child to next pointer. */
185 sset->child = slab->next;
188 memset(item, 0, sset->size);
193 slab_unalloc(void *ptr, size_t size)
196 struct slab *slab, *new_next;
199 assert(size < SMALL_CUTOFF);
200 slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
204 if (slab->used-- == slab->parent->items_per_slab
205 && slab->parent->child != slab) {
206 new_next = slab->parent->child;
207 slab->parent->child = slab;
208 } else if (!slab->used) {
209 for (new_next = slab;
210 new_next->next && new_next->next->used;
211 new_next = new_next->next) ;
212 new_next = new_next->next;
218 slab->prev->next = slab->next;
220 slab->next->prev = slab->prev;
221 if ((slab->prev = new_next->prev))
222 slab->prev->next = slab;
223 if ((slab->next = new_next->next))
224 slab->next->prev = slab;
229 slab_malloc(UNUSED_ARG(const char *file), UNUSED_ARG(unsigned int line), size_t size)
233 real = size + sizeof(size_t);
234 if (real < SMALL_CUTOFF)
235 res = slab_alloc(slabset_create(real));
237 res = slab_map(slab_round_up(real));
243 slab_realloc(const char *file, unsigned int line, void *ptr, size_t size)
245 size_t orig, *newblock;
248 return slab_malloc(file, line, size);
251 orig = ((size_t*)ptr)[-1];
254 newblock = slab_malloc(file, line, size);
255 memcpy(newblock, ptr, orig);
260 slab_strdup(const char *file, unsigned int line, const char *src)
265 len = strlen(src) + 1;
266 target = slab_malloc(file, line, len);
267 memcpy(target, src, len);
272 slab_free(UNUSED_ARG(const char *file), UNUSED_ARG(unsigned int line), void *ptr)
279 size = (size_t*)ptr - 1;
280 real = *size + sizeof(size_t);
281 if (real < SMALL_CUTOFF)
282 slab_unalloc(size, real);
284 munmap(size, slab_round_up(real));
288 verify(const void *ptr)
294 else if ((size = ((size_t*)ptr)[-1] + sizeof(size_t)) >= SMALL_CUTOFF)
295 assert(((unsigned long)ptr & (slab_pagesize() - 1)) == sizeof(size_t));
300 expected = (size + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
301 slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
302 assert(slab->parent->size == expected);