# error The slab allocator requires that your system have the mmap() system call.
#endif
-#define SLAB_DEBUG 1
+#define SLAB_DEBUG_HEADER 1
+#define SLAB_DEBUG_LOG 2
+#define SLAB_DEBUG_PERMS 4
-#if SLAB_DEBUG
+#if !defined(SLAB_DEBUG)
+# define SLAB_DEBUG 0
+#endif
+
+#if !defined(SLAB_RESERVE)
+# define SLAB_RESERVE 0
+#endif
+
+#if !defined(MAX_SLAB_FREE)
+# define MAX_SLAB_FREE 1024
+#endif
-#define ALLOC_MAGIC 0x1acf
-#define FREE_MAGIC 0xfc1d
+#if SLAB_DEBUG & SLAB_DEBUG_HEADER
+
+#define ALLOC_MAGIC 0x1a
+#define FREE_MAGIC 0xcf
struct alloc_header {
- unsigned int file_id : 8;
unsigned int size : 24;
+ unsigned int magic : 8;
+ unsigned int file_id : 8;
unsigned int line : 16;
- unsigned int magic : 16;
};
static const char *file_ids[256];
};
struct slabset {
- struct slabset *next;
struct slab *child;
+ size_t nslabs;
+ size_t nallocs;
size_t size;
size_t items_per_slab;
};
#define SLAB_MIN (2 * sizeof(void*))
#define SLAB_GRAIN sizeof(void*)
#define SLAB_ALIGN SLAB_GRAIN
-#define SMALL_CUTOFF 512
+#define SMALL_CUTOFF 576
/* Element size < SMALL_CUTOFF -> use small slabs.
* Larger elements are allocated directly using mmap(). The largest
* regularly allocated struct in srvx 1.x is smaller than
* larger slabs.
*/
-static struct slabset *little_slabs[SMALL_CUTOFF / SLAB_GRAIN];
-static struct slabset slabset_slabs;
-unsigned long alloc_count;
-unsigned long alloc_size;
+static struct slabset little_slabs[SMALL_CUTOFF / SLAB_GRAIN];
+static struct slab *free_slab_head;
+static struct slab *free_slab_tail;
+unsigned long free_slab_count;
+unsigned long big_alloc_count;
+unsigned long big_alloc_size;
+unsigned long slab_count;
+unsigned long slab_alloc_count;
+unsigned long slab_alloc_size;
#if defined(MAP_ANON)
#elif defined(MAP_ANONYMOUS)
# define MAP_ANON 0
#endif
+#if SLAB_DEBUG & SLAB_DEBUG_LOG
+
+FILE *slab_log;
+
+struct slab_log_entry
+{
+ struct timeval tv;
+ void *slab;
+ ssize_t size;
+};
+
+static void
+close_slab_log(void)
+{
+ fclose(slab_log);
+}
+
+static void
+slab_log_alloc(void *slab, size_t size)
+{
+ struct slab_log_entry sle;
+
+ gettimeofday(&sle.tv, NULL);
+ sle.slab = slab;
+ sle.size = (ssize_t)size;
+
+ if (!slab_log)
+ {
+ const char *fname;
+ fname = getenv("SLAB_LOG_FILE");
+ if (!fname)
+ fname = "slab.log";
+ slab_log = fopen(fname, "w");
+ atexit(close_slab_log);
+ }
+
+ fwrite(&sle, sizeof(sle), 1, slab_log);
+}
+
+static void
+slab_log_free(void *slab, size_t size)
+{
+ struct slab_log_entry sle;
+
+ gettimeofday(&sle.tv, NULL);
+ sle.slab = slab;
+ sle.size = -(ssize_t)size;
+ fwrite(&sle, sizeof(sle), 1, slab_log);
+}
+
+static void
+slab_log_unmap(void *slab)
+{
+ struct slab_log_entry sle;
+
+ gettimeofday(&sle.tv, NULL);
+ sle.slab = slab;
+ sle.size = 0;
+ fwrite(&sle, sizeof(sle), 1, slab_log);
+}
+
+#else
+# define slab_log_alloc(SLAB, SIZE)
+# define slab_log_free(SLAB, SIZE)
+# define slab_log_unmap(SLAB)
+#endif
+
+#if (SLAB_DEBUG & SLAB_DEBUG_PERMS) && defined(HAVE_MPROTECT)
+
+static void
+slab_protect(struct slab *slab)
+{
+ mprotect(slab, (char*)(slab + 1) - (char*)slab->base, PROT_NONE);
+}
+
+static void
+slab_unprotect(struct slab *slab)
+{
+ mprotect(slab, (char*)(slab + 1) - (char*)slab->base, PROT_READ | PROT_WRITE);
+}
+
+#else
+# define slab_protect(SLAB) (void)(SLAB)
+# define slab_unprotect(SLAB) (void)(SLAB)
+#endif
+
static size_t
slab_pagesize(void)
{
size = (size < SLAB_MIN) ? SLAB_MIN : (size + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
idx = size / SLAB_GRAIN;
assert(idx < ArrayLength(little_slabs));
- if (!little_slabs[idx]) {
- if (!slabset_slabs.size) {
- unsigned int idx2 = (sizeof(struct slabset) + SLAB_GRAIN - 1) / SLAB_GRAIN;
- slabset_slabs.size = sizeof(struct slabset);
- slabset_slabs.items_per_slab = (slab_pagesize() - sizeof(struct slab)) / ((sizeof(struct slabset) + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1));
- little_slabs[idx2] = &slabset_slabs;
- if (idx == idx2)
- return &slabset_slabs;
- }
- little_slabs[idx] = slab_alloc(&slabset_slabs);
- little_slabs[idx]->size = size;
- little_slabs[idx]->items_per_slab = (slab_pagesize() - sizeof(struct slab)) / ((size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1));
+ if (!little_slabs[idx].size) {
+ little_slabs[idx].size = size;
+ little_slabs[idx].items_per_slab = (slab_pagesize() - sizeof(struct slab)) / ((size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1));
}
- return little_slabs[idx];
+ return &little_slabs[idx];
}
static void *
unsigned int ii, step;
/* Allocate new slab. */
- item = slab_map(slab_pagesize());
- slab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
- slab->base = item;
+ if (free_slab_head) {
+ slab = free_slab_head;
+ slab_unprotect(slab);
+ if (!(free_slab_head = slab->next))
+ free_slab_tail = NULL;
+ } else {
+ item = slab_map(slab_pagesize());
+ slab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
+ slab->base = item;
+ slab_count++;
+ }
+ slab_log_alloc(slab, sset->size);
/* Populate free list. */
step = (sset->size + SLAB_ALIGN - 1) & ~(SLAB_ALIGN - 1);
for (ii = 1, item = slab->free = slab->base;
ii < sset->items_per_slab;
++ii, item = (*item = (char*)item + step));
+ *item = NULL;
/* Link to parent slabset. */
slab->parent = sset;
- if ((slab->prev = sset->child)) {
+ slab->prev = sset->child;
+ if (slab->prev) {
slab->next = slab->prev->next;
slab->prev->next = slab;
if (slab->next)
slab->next->prev = slab;
- }
+ } else
+ slab->next = NULL;
+ assert(!slab->next || slab == slab->next->prev);
+ assert(!slab->prev || slab == slab->prev->next);
sset->child = slab;
+ sset->nslabs++;
}
slab = sset->child;
item = slab->free;
+ assert(((unsigned long)item & (slab_pagesize() - 1))
+ <= (slab_pagesize() - sizeof(*slab) - sset->size));
slab->free = *item;
- if (slab->used++ == sset->items_per_slab) {
+ if (++slab->used == sset->items_per_slab) {
if (sset->child != slab) {
/* Unlink slab and reinsert before sset->child. */
if (slab->prev)
slab->prev->next = slab;
if ((slab->next = sset->child))
slab->next->prev = slab;
+ assert(!slab->next || slab == slab->next->prev);
+ assert(!slab->prev || slab == slab->prev->next);
} else if (slab->next) {
/* Advance sset->child to next pointer. */
sset->child = slab->next;
}
}
+ sset->nallocs++;
memset(item, 0, sset->size);
return item;
}
static void
slab_unalloc(void *ptr, size_t size)
{
- void **item;
struct slab *slab, *new_next;
- item = ptr;
assert(size < SMALL_CUTOFF);
slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
- *item = slab->free;
- memset(item + 1, 0xde, size - sizeof(*item));
- slab->free = item;
+ *(void**)ptr = slab->free;
+ slab->free = ptr;
+ slab->parent->nallocs--;
if (slab->used-- == slab->parent->items_per_slab
&& slab->parent->child != slab) {
+ /* Unlink from current position, relink as parent's first child. */
new_next = slab->parent->child;
+ assert(new_next != NULL);
+ if (slab->prev)
+ slab->prev->next = slab->next;
+ if (slab->next)
+ slab->next->prev = slab->prev;
+ if ((slab->prev = new_next->prev))
+ slab->prev->next = slab;
+ slab->next = new_next;
+ new_next->prev = slab;
slab->parent->child = slab;
+ assert(!slab->next || slab == slab->next->prev);
+ assert(!slab->prev || slab == slab->prev->next);
} else if (!slab->used) {
- for (new_next = slab;
- new_next->next && new_next->next->used;
- new_next = new_next->next) ;
- new_next = new_next->next;
- } else
- new_next = NULL;
-
- if (new_next) {
+ slab_log_free(slab, size);
+
+ /* Unlink slab from its parent. */
+ slab->parent->nslabs--;
if (slab->prev)
slab->prev->next = slab->next;
if (slab->next)
slab->next->prev = slab->prev;
- if ((slab->prev = new_next->prev))
+ new_next = slab->next ? slab->next : slab->prev;
+ if (slab == slab->parent->child)
+ slab->parent->child = new_next;
+ if (new_next) {
+ assert(!new_next->next || new_next == new_next->next->prev);
+ assert(!new_next->prev || new_next == new_next->prev->next);
+ }
+
+#if SLAB_RESERVE
+ /* Make sure we have enough free slab pages. */
+ while (free_slab_count < SLAB_RESERVE) {
+ struct slab *tslab;
+ void *item;
+
+ item = slab_map(slab_pagesize());
+ tslab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
+ tslab->base = item;
+ tslab->prev = free_slab_tail;
+ free_slab_tail = tslab;
+ if (!free_slab_head)
+ free_slab_head = tslab;
+ else {
+ slab_unprotect(tslab->prev);
+ tslab->prev->next = tslab;
+ slab_protect(tslab->prev);
+ }
+ free_slab_count++;
+ slab_count++;
+ }
+#endif
+
+ /* Link to list of free slabs. */
+ slab->parent = NULL;
+ slab->next = NULL;
+ slab->prev = free_slab_tail;
+ if (slab->prev) {
+ slab_unprotect(slab->prev);
slab->prev->next = slab;
- if ((slab->next = new_next->next))
- slab->next->prev = slab;
+ slab_protect(slab->prev);
+ } else
+ free_slab_head = slab;
+ slab_protect(slab);
+ free_slab_tail = slab;
+ free_slab_count++;
+
+#if MAX_SLAB_FREE >= 0
+ /* Unlink and unmap old slabs, so accesses to stale-enough
+ * pointers will fault. */
+ while (free_slab_count > MAX_SLAB_FREE) {
+ struct slab *tslab;
+
+ tslab = free_slab_tail;
+ slab_unprotect(tslab);
+ free_slab_tail = tslab->prev;
+ if (tslab->prev) {
+ slab_unprotect(tslab->prev);
+ tslab->prev->next = NULL;
+ slab_protect(tslab->prev);
+ } else
+ free_slab_head = NULL;
+ free_slab_count--;
+ slab_count--;
+ slab_log_unmap(slab);
+ munmap(slab->base, slab_pagesize());
+ }
+#endif
}
+ (void)size;
}
void *
assert(size < 1 << 24);
real = (size + sizeof(*res) + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
- if (real < SMALL_CUTOFF)
+ if (real < SMALL_CUTOFF) {
res = slab_alloc(slabset_create(real));
- else
+ slab_alloc_count++;
+ slab_alloc_size += size;
+ } else {
res = slab_map(slab_round_up(real));
-#if SLAB_DEBUG
+ big_alloc_count++;
+ big_alloc_size += size;
+ slab_log_alloc(res, size);
+ }
+#if SLAB_DEBUG & SLAB_DEBUG_HEADER
res->file_id = get_file_id(file);
res->size = size;
res->line = line;
*res = size;
(void)file; (void)line;
#endif
- alloc_count++;
- alloc_size += size;
return res + 1;
}
verify(ptr);
orig = (alloc_header_t*)ptr - 1;
-#if SLAB_DEBUG
+#if SLAB_DEBUG & SLAB_DEBUG_HEADER
osize = orig->size;
#else
osize = *orig;
if (osize >= size)
return ptr;
newblock = slab_malloc(file, line, size);
- memcpy(newblock, ptr, size);
+ memcpy(newblock, ptr, osize);
+ slab_free(file, line, ptr);
return newblock;
}
}
void
-slab_free(UNUSED_ARG(const char *file), UNUSED_ARG(unsigned int line), void *ptr)
+slab_free(const char *file, unsigned int line, void *ptr)
{
alloc_header_t *hdr;
- size_t real;
+ size_t user, real;
if (!ptr)
return;
verify(ptr);
hdr = (alloc_header_t*)ptr - 1;
-#if SLAB_DEBUG
+#if SLAB_DEBUG & SLAB_DEBUG_HEADER
+ hdr->file_id = get_file_id(file);
+ hdr->line = line;
hdr->magic = FREE_MAGIC;
- real = hdr->size + sizeof(*hdr);
+ user = hdr->size;
#else
- real = *hdr + sizeof(*hdr);
+ user = *hdr;
+ (void)file; (void)line;
#endif
- real = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
- if (real < SMALL_CUTOFF)
+ real = (user + sizeof(*hdr) + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
+ if (real < SMALL_CUTOFF) {
+ memset(hdr + 1, 0xde, real - sizeof(*hdr));
slab_unalloc(hdr, real);
- else
+ slab_alloc_count--;
+ slab_alloc_size -= user;
+ } else {
munmap(hdr, slab_round_up(real));
- alloc_count--;
- alloc_size -= real - sizeof(*hdr);
+ big_alloc_count--;
+ big_alloc_size -= user;
+ slab_log_unmap(hdr);
+ }
}
+/* Undefine the verify macro in case we're not debugging. */
+#undef verify
void
verify(const void *ptr)
{
return;
hdr = (alloc_header_t*)ptr - 1;
-#if SLAB_DEBUG
+#if SLAB_DEBUG & SLAB_DEBUG_HEADER
real = hdr->size + sizeof(*hdr);
assert(hdr->file_id < file_ids_used);
assert(hdr->magic == ALLOC_MAGIC);