# arch-tag: automatic-ChangeLog--srvx@srvx.net--2005-srvx/srvx--devo--1.3
#
+2005-02-05 13:03:21 GMT Michael Poole <mdpoole@troilus.org> patch-15
+
+ Summary:
+ Rearrange slab allocation header; minor slab fixes.
+ Revision:
+ srvx--devo--1.3--patch-15
+
+ src/alloc-slab.c (ALLOC_MAGIC, FREE_MAGIC): Replace with single-byte values.
+ (struct alloc_header): Move file_id and line into second 32-bit word
+ of header.
+ (slab_alloc): Remove commented-out debug statements.
+ (slab_unalloc): Remove memset() of freed block and commented-out debug
+ statements.
+ (slab_free): memset() freed blocks here instead. Try to fix
+ *_alloc_size counting errors (use the originally requested length
+ rather than rounded-up length)
+
+ modified files:
+ ChangeLog src/alloc-slab.c
+
+
2005-02-05 03:52:51 GMT Michael Poole <mdpoole@troilus.org> patch-14
Summary:
#if SLAB_DEBUG
-#define ALLOC_MAGIC 0x1acf
-#define FREE_MAGIC 0xfc1d
+#define ALLOC_MAGIC 0x1a
+#define FREE_MAGIC 0xcf
struct alloc_header {
- unsigned int file_id : 8;
unsigned int size : 24;
+ unsigned int magic : 8;
+ unsigned int file_id : 8;
unsigned int line : 16;
- unsigned int magic : 16;
};
static const char *file_ids[256];
assert(!slab->prev || slab == slab->prev->next);
sset->child = slab;
sset->nslabs++;
- /* log_module(MAIN_LOG, LOG_DEBUG, "Allocated new %u-slab %p.", sset->size, slab); */
}
slab = sset->child;
<= (slab_pagesize() - sizeof(*slab) - sset->size));
slab->free = *item;
if (++slab->used == sset->items_per_slab) {
- /* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p is empty.", sset->size, item); */
if (sset->child != slab) {
/* Unlink slab and reinsert before sset->child. */
if (slab->prev)
static void
slab_unalloc(void *ptr, size_t size)
{
- void **item;
struct slab *slab, *new_next;
- item = ptr;
assert(size < SMALL_CUTOFF);
slab = (struct slab*)((((unsigned long)ptr | (slab_pagesize() - 1)) + 1) - sizeof(*slab));
- *item = slab->free;
- memset(item + 1, 0xde, size - sizeof(*item));
- slab->free = item;
+ *(void**)ptr = slab->free;
+ slab->free = ptr;
slab->parent->nallocs--;
if (slab->used-- == slab->parent->items_per_slab
slab->parent->child = slab;
assert(!slab->next || slab == slab->next->prev);
assert(!slab->prev || slab == slab->prev->next);
- /* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p became partial.", slab->parent->size, slab); */
} else if (!slab->used) {
- /* log_module(MAIN_LOG, LOG_DEBUG, "%u-slab %p became full.", slab->parent->size, slab); */
/* Unlink slab from its parent. */
slab->parent->nslabs--;
if (slab->prev)
/* Make sure we have enough free slab pages. */
while (free_slab_count < SLAB_RESERVE) {
struct slab *tslab;
+ void *item;
+
item = slab_map(slab_pagesize());
tslab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
tslab->base = item;
slab_free(const char *file, unsigned int line, void *ptr)
{
alloc_header_t *hdr;
- size_t real;
+ size_t user, real;
if (!ptr)
return;
hdr->file_id = get_file_id(file);
hdr->line = line;
hdr->magic = FREE_MAGIC;
- real = hdr->size + sizeof(*hdr);
+ user = hdr->size;
#else
- real = *hdr + sizeof(*hdr);
+ user = *hdr;
(void)file; (void)line;
#endif
- real = (real + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
+ real = (user + sizeof(*hdr) + SLAB_GRAIN - 1) & ~(SLAB_GRAIN - 1);
if (real < SMALL_CUTOFF) {
+ memset(hdr + 1, 0xde, real - sizeof(*hdr));
slab_unalloc(hdr, real);
slab_alloc_count--;
- slab_alloc_size -= real - sizeof(*hdr);
+ slab_alloc_size -= user;
} else {
munmap(hdr, slab_round_up(real));
big_alloc_count--;
- big_alloc_size -= real - sizeof(*hdr);
+ big_alloc_size -= user;
}
}