# arch-tag: automatic-ChangeLog--srvx@srvx.net--2005-srvx/srvx--devo--1.3
#
+2005-02-05 03:52:51 GMT Michael Poole <mdpoole@troilus.org> patch-14
+
+ Summary:
+ Even more slab allocator updates.
+ Revision:
+ srvx--devo--1.3--patch-14
+
+ src/slab-alloc.c (SLAB_DEBUG): Default to on.
+ (SMALL_CUTOFF): Fix default value (must be a multiple of 4).
+ (slab_unalloc): Fix slab counting. When SLAB_RESERVE, allocate the
+ set of pages in a burst, rather than supplementing them as we unmap.
+ (slab_realloc): Fix a rather embarassing (and LARGE) memory leak.
+
+ modified files:
+ ChangeLog src/alloc-slab.c
+
+
2005-02-04 16:36:40 GMT Michael Poole <mdpoole@troilus.org> patch-13
Summary:
# error The slab allocator requires that your system have the mmap() system call.
#endif
-#define SLAB_DEBUG 0
+#define SLAB_DEBUG 1
#define SLAB_RESERVE 1024
#if SLAB_DEBUG
#define SLAB_MIN (2 * sizeof(void*))
#define SLAB_GRAIN sizeof(void*)
#define SLAB_ALIGN SLAB_GRAIN
-#define SMALL_CUTOFF 582
+#define SMALL_CUTOFF 580
/* Element size < SMALL_CUTOFF -> use small slabs.
* Larger elements are allocated directly using mmap(). The largest
* regularly allocated struct in srvx 1.x is smaller than
/* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p became full.", slab->parent->size, slab); */
/* Unlink slab from its parent. */
slab->parent->nslabs--;
- slab_count--;
if (slab->prev)
slab->prev->next = slab->next;
if (slab->next)
}
#if SLAB_RESERVE
- /* Make sure we have enough free slab pages. */
- while (free_slab_count < SLAB_RESERVE) {
- struct slab *tslab;
- item = slab_map(slab_pagesize());
- tslab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
- tslab->base = item;
- tslab->prev = free_slab_tail;
- free_slab_tail = tslab;
- if (!free_slab_head)
- free_slab_head = tslab;
- free_slab_count++;
- slab_count++;
+ if (!free_slab_count) {
+ /* Make sure we have enough free slab pages. */
+ while (free_slab_count < SLAB_RESERVE) {
+ struct slab *tslab;
+ item = slab_map(slab_pagesize());
+ tslab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
+ tslab->base = item;
+ tslab->prev = free_slab_tail;
+ free_slab_tail = tslab;
+ if (!free_slab_head)
+ free_slab_head = tslab;
+ free_slab_count++;
+ slab_count++;
+ }
}
/* Unmap old slab, so accesses to stale pointers will fault. */
return ptr;
newblock = slab_malloc(file, line, size);
memcpy(newblock, ptr, osize);
+ slab_free(file, line, ptr);
return newblock;
}
}
void
-slab_free(UNUSED_ARG(const char *file), UNUSED_ARG(unsigned int line), void *ptr)
+slab_free(const char *file, unsigned int line, void *ptr)
{
alloc_header_t *hdr;
size_t real;
verify(ptr);
hdr = (alloc_header_t*)ptr - 1;
#if SLAB_DEBUG
+ hdr->file_id = get_file_id(file);
+ hdr->line = line;
hdr->magic = FREE_MAGIC;
real = hdr->size + sizeof(*hdr);
#else
real = *hdr + sizeof(*hdr);
+ (void)file; (void)line;
#endif
real = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
if (real < SMALL_CUTOFF) {