/* alloc-slab.c - Slab debugging allocator
- * Copyright 2005 srvx Development Team
+ * Copyright 2005,2007 srvx Development Team
*
* This file is part of srvx.
*
# error The slab allocator requires that your system have the mmap() system call.
#endif
-#define SLAB_DEBUG 0
+#define SLAB_DEBUG_HEADER 1
+#define SLAB_DEBUG_LOG 2
+#define SLAB_DEBUG_PERMS 4
-#if SLAB_DEBUG
+#if !defined(SLAB_DEBUG)
+# define SLAB_DEBUG 0
+#endif
+
+#if !defined(SLAB_RESERVE)
+# define SLAB_RESERVE 0
+#endif
+
+#if !defined(MAX_SLAB_FREE)
+# define MAX_SLAB_FREE 1024
+#endif
-#define ALLOC_MAGIC 0x1acf
-#define FREE_MAGIC 0xfc1d
+#if SLAB_DEBUG & SLAB_DEBUG_HEADER
+
+#define ALLOC_MAGIC 0x1a
+#define FREE_MAGIC 0xcf
struct alloc_header {
- unsigned int file_id : 8;
unsigned int size : 24;
+ unsigned int magic : 8;
+ unsigned int file_id : 8;
unsigned int line : 16;
- unsigned int magic : 16;
};
static const char *file_ids[256];
#define SLAB_MIN (2 * sizeof(void*))
#define SLAB_GRAIN sizeof(void*)
#define SLAB_ALIGN SLAB_GRAIN
-#define SMALL_CUTOFF 582
+#define SMALL_CUTOFF 576
/* Element size < SMALL_CUTOFF -> use small slabs.
* Larger elements are allocated directly using mmap(). The largest
* regularly allocated struct in srvx 1.x is smaller than
*/
static struct slabset little_slabs[SMALL_CUTOFF / SLAB_GRAIN];
-static struct slab *free_slabs;
+static struct slab *free_slab_head;
+static struct slab *free_slab_tail;
+unsigned long free_slab_count;
unsigned long big_alloc_count;
unsigned long big_alloc_size;
unsigned long slab_count;
# define MAP_ANON 0
#endif
+#if SLAB_DEBUG & SLAB_DEBUG_LOG
+
+FILE *slab_log;
+
+struct slab_log_entry
+{
+ struct timeval tv;
+ void *slab;
+ ssize_t size;
+};
+
+static void
+close_slab_log(void)
+{
+ fclose(slab_log);
+}
+
+static void
+slab_log_alloc(void *slab, size_t size)
+{
+ struct slab_log_entry sle;
+
+ gettimeofday(&sle.tv, NULL);
+ sle.slab = slab;
+ sle.size = (ssize_t)size;
+
+ if (!slab_log)
+ {
+ const char *fname;
+ fname = getenv("SLAB_LOG_FILE");
+ if (!fname)
+ fname = "slab.log";
+ slab_log = fopen(fname, "w");
+ atexit(close_slab_log);
+ }
+
+ fwrite(&sle, sizeof(sle), 1, slab_log);
+}
+
+static void
+slab_log_free(void *slab, size_t size)
+{
+ struct slab_log_entry sle;
+
+ gettimeofday(&sle.tv, NULL);
+ sle.slab = slab;
+ sle.size = -(ssize_t)size;
+ fwrite(&sle, sizeof(sle), 1, slab_log);
+}
+
+static void
+slab_log_unmap(void *slab)
+{
+ struct slab_log_entry sle;
+
+ gettimeofday(&sle.tv, NULL);
+ sle.slab = slab;
+ sle.size = 0;
+ fwrite(&sle, sizeof(sle), 1, slab_log);
+}
+
+#else
+# define slab_log_alloc(SLAB, SIZE)
+# define slab_log_free(SLAB, SIZE)
+# define slab_log_unmap(SLAB)
+#endif
+
+#if (SLAB_DEBUG & SLAB_DEBUG_PERMS) && defined(HAVE_MPROTECT)
+
+static void
+slab_protect(struct slab *slab)
+{
+ mprotect(slab, (char*)(slab + 1) - (char*)slab->base, PROT_NONE);
+}
+
+static void
+slab_unprotect(struct slab *slab)
+{
+ mprotect(slab, (char*)(slab + 1) - (char*)slab->base, PROT_READ | PROT_WRITE);
+}
+
+#else
+# define slab_protect(SLAB) (void)(SLAB)
+# define slab_unprotect(SLAB) (void)(SLAB)
+#endif
+
static size_t
slab_pagesize(void)
{
unsigned int ii, step;
/* Allocate new slab. */
- if (free_slabs) {
- slab = free_slabs;
- free_slabs = slab->next;
+ if (free_slab_head) {
+ slab = free_slab_head;
+ slab_unprotect(slab);
+ if (!(free_slab_head = slab->next))
+ free_slab_tail = NULL;
} else {
item = slab_map(slab_pagesize());
slab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
slab->base = item;
+ slab_count++;
}
+ slab_log_alloc(slab, sset->size);
/* Populate free list. */
step = (sset->size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1);
assert(!slab->prev || slab == slab->prev->next);
sset->child = slab;
sset->nslabs++;
- slab_count++;
- /* log_module(MAIN_LOG, LOG_DEBUG, "Allocated new %u-slab %p.", sset->size, slab); */
}
slab = sset->child;
<= (slab_pagesize() - sizeof(*slab) - sset->size));
slab->free = *item;
if (++slab->used == sset->items_per_slab) {
- /* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p is empty.", sset->size, item); */
if (sset->child != slab) {
/* Unlink slab and reinsert before sset->child. */
if (slab->prev)
static void
slab_unalloc(void *ptr, size_t size)
{
- void **item;
struct slab *slab, *new_next;
- item = ptr;
assert(size < SMALL_CUTOFF);
slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
- *item = slab->free;
- memset(item + 1, 0xde, size - sizeof(*item));
- slab->free = item;
+ *(void**)ptr = slab->free;
+ slab->free = ptr;
slab->parent->nallocs--;
if (slab->used-- == slab->parent->items_per_slab
slab->parent->child = slab;
assert(!slab->next || slab == slab->next->prev);
assert(!slab->prev || slab == slab->prev->next);
- /* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p became partial.", slab->parent->size, slab); */
} else if (!slab->used) {
- /* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p became full.", slab->parent->size, slab); */
+ slab_log_free(slab, size);
+
/* Unlink slab from its parent. */
slab->parent->nslabs--;
- slab_count--;
if (slab->prev)
slab->prev->next = slab->next;
if (slab->next)
assert(!new_next->prev || new_next == new_next->prev->next);
}
+#if SLAB_RESERVE
+ /* Make sure we have enough free slab pages. */
+ while (free_slab_count < SLAB_RESERVE) {
+ struct slab *tslab;
+ void *item;
+
+ item = slab_map(slab_pagesize());
+ tslab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
+ tslab->base = item;
+ tslab->prev = free_slab_tail;
+ free_slab_tail = tslab;
+ if (!free_slab_head)
+ free_slab_head = tslab;
+ else {
+ slab_unprotect(tslab->prev);
+ tslab->prev->next = tslab;
+ slab_protect(tslab->prev);
+ }
+ free_slab_count++;
+ slab_count++;
+ }
+#endif
+
/* Link to list of free slabs. */
- slab->prev = NULL;
slab->parent = NULL;
- slab->next = free_slabs;
- free_slabs = slab;
+ slab->next = NULL;
+ slab->prev = free_slab_tail;
+ if (slab->prev) {
+ slab_unprotect(slab->prev);
+ slab->prev->next = slab;
+ slab_protect(slab->prev);
+ } else
+ free_slab_head = slab;
+ slab_protect(slab);
+ free_slab_tail = slab;
+ free_slab_count++;
+
+#if MAX_SLAB_FREE >= 0
+ /* Unlink and unmap old slabs, so accesses to stale-enough
+ * pointers will fault. */
+ while (free_slab_count > MAX_SLAB_FREE) {
+ struct slab *tslab;
+
+ tslab = free_slab_tail;
+ slab_unprotect(tslab);
+ free_slab_tail = tslab->prev;
+ if (tslab->prev) {
+ slab_unprotect(tslab->prev);
+ tslab->prev->next = NULL;
+ slab_protect(tslab->prev);
+ } else
+ free_slab_head = NULL;
+ free_slab_count--;
+ slab_count--;
+ slab_log_unmap(slab);
+ munmap(slab->base, slab_pagesize());
+ }
+#endif
}
+ (void)size;
}
void *
res = slab_map(slab_round_up(real));
big_alloc_count++;
big_alloc_size += size;
+ slab_log_alloc(res, size);
}
-#if SLAB_DEBUG
+#if SLAB_DEBUG & SLAB_DEBUG_HEADER
res->file_id = get_file_id(file);
res->size = size;
res->line = line;
verify(ptr);
orig = (alloc_header_t*)ptr - 1;
-#if SLAB_DEBUG
+#if SLAB_DEBUG & SLAB_DEBUG_HEADER
osize = orig->size;
#else
osize = *orig;
return ptr;
newblock = slab_malloc(file, line, size);
memcpy(newblock, ptr, osize);
+ slab_free(file, line, ptr);
return newblock;
}
}
void
-slab_free(UNUSED_ARG(const char *file), UNUSED_ARG(unsigned int line), void *ptr)
+slab_free(const char *file, unsigned int line, void *ptr)
{
alloc_header_t *hdr;
- size_t real;
+ size_t user, real;
if (!ptr)
return;
verify(ptr);
hdr = (alloc_header_t*)ptr - 1;
-#if SLAB_DEBUG
+#if SLAB_DEBUG & SLAB_DEBUG_HEADER
+ hdr->file_id = get_file_id(file);
+ hdr->line = line;
hdr->magic = FREE_MAGIC;
- real = hdr->size + sizeof(*hdr);
+ user = hdr->size;
#else
- real = *hdr + sizeof(*hdr);
+ user = *hdr;
+ (void)file; (void)line;
#endif
- real = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
+ real = (user + sizeof(*hdr) + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
if (real < SMALL_CUTOFF) {
+ memset(hdr + 1, 0xde, real - sizeof(*hdr));
slab_unalloc(hdr, real);
slab_alloc_count--;
- slab_alloc_size -= real - sizeof(*hdr);
+ slab_alloc_size -= user;
} else {
munmap(hdr, slab_round_up(real));
big_alloc_count--;
- big_alloc_size -= real - sizeof(*hdr);
+ big_alloc_size -= user;
+ slab_log_unmap(hdr);
}
}
+/* Undefine the verify macro in case we're not debugging. */
+#undef verify
void
verify(const void *ptr)
{
return;
hdr = (alloc_header_t*)ptr - 1;
-#if SLAB_DEBUG
+#if SLAB_DEBUG & SLAB_DEBUG_HEADER
real = hdr->size + sizeof(*hdr);
assert(hdr->file_id < file_ids_used);
assert(hdr->magic == ALLOC_MAGIC);