Fix some memory leaks in the slab allocator (funny, isn't it?).
authorMichael Poole <mdpoole@troilus.org>
Fri, 14 Dec 2007 03:56:03 +0000 (22:56 -0500)
committerMichael Poole <mdpoole@troilus.org>
Fri, 14 Dec 2007 03:56:03 +0000 (22:56 -0500)
alloc-slab.c (SLAB_DEBUG): Allow environment to override definition.
  (SLAB_RESERVE): Likewise.
  (MAX_SLAB_FREE): New preprocessor macro, used to cap free_slab_count.
  (slab_unalloc): Reindent SLAB_RESERVE logic.  Unconditionally link the
    free slab to the list of free slabs.  Add code to check MAX_SLAB_FREE.

src/alloc-slab.c

index 919ffe1e2ab2b999534ac28a2bcdff7d10ec8983..43c857549c30715732e5ab0b843f3e523c701f7c 100644 (file)
 # error The slab allocator requires that your system have the mmap() system call.
 #endif
 
-#define SLAB_DEBUG 1
-#define SLAB_RESERVE 1024
+#if !defined(SLAB_DEBUG)
+# define SLAB_DEBUG 1
+#endif
+
+#if !defined(SLAB_RESERVE)
+# define SLAB_RESERVE 0
+#endif
+
+#if !defined(MAX_SLAB_FREE)
+# define MAX_SLAB_FREE 1024
+#endif
 
 #if SLAB_DEBUG
 
@@ -299,36 +308,52 @@ slab_unalloc(void *ptr, size_t size)
         }
 
 #if SLAB_RESERVE
-        if (!free_slab_count) {
-            /* Make sure we have enough free slab pages. */
-            while (free_slab_count < SLAB_RESERVE) {
-                struct slab *tslab;
-                void *item;
-
-                item = slab_map(slab_pagesize());
-                tslab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
-                tslab->base = item;
-                tslab->prev = free_slab_tail;
-                free_slab_tail = tslab;
-                if (!free_slab_head)
-                    free_slab_head = tslab;
-                free_slab_count++;
-                slab_count++;
-            }
+        /* Make sure we have enough free slab pages. */
+        while (free_slab_count < SLAB_RESERVE) {
+            struct slab *tslab;
+            void *item;
+
+            item = slab_map(slab_pagesize());
+            tslab = (struct slab*)((char*)item + slab_pagesize() - sizeof(*slab));
+            tslab->base = item;
+            tslab->prev = free_slab_tail;
+            free_slab_tail = tslab;
+            if (!free_slab_head)
+                free_slab_head = tslab;
+            else
+                tslab->prev->next = tslab;
+            free_slab_count++;
+            slab_count++;
         }
+#endif
 
-        /* Unmap old slab, so accesses to stale pointers will fault. */
-        munmap(slab->base, slab_pagesize());
-        slab_count--;
-#else
         /* Link to list of free slabs. */
         slab->parent = NULL;
-        slab->prev = free_slab_tail;
         slab->next = NULL;
+        slab->prev = free_slab_tail;
         free_slab_tail = slab;
-        if (!free_slab_head)
+        if (free_slab_head)
+            slab->prev->next = slab;
+        else
             free_slab_head = slab;
         free_slab_count++;
+
+#if MAX_SLAB_FREE >= 0
+        /* Unlink and unmap old slabs, so accesses to stale-enough
+         * pointers will fault. */
+        while (free_slab_count > MAX_SLAB_FREE) {
+            struct slab *tslab;
+
+            tslab = free_slab_tail;
+            free_slab_tail = tslab->prev;
+            if (tslab->prev)
+                tslab->prev->next = NULL;
+            else
+                free_slab_head = NULL;
+            free_slab_count--;
+            slab_count--;
+            munmap(slab->base, slab_pagesize());
+        }
 #endif
     }
     (void)size;