
From: Manfred Spraul <manfred@colorfullife.com>

below is the promised patch for better slab debugging, against 2.5.68-mm4:

Changes:

- enable redzoning and last user accounting even for large objects, if
  that doesn't waste too much memory

- document why FORCED_DEBUG doesn't enable redzoning&last user accounting
  for some caches.

- check the validity of the bufctl chains in a slab in __free_blocks. 
  This detects double-free error for the caches without redzoning.



 25-akpm/mm/slab.c |   19 ++++++++++++++-----
 1 files changed, 14 insertions(+), 5 deletions(-)

diff -puN mm/slab.c~slab-debugging-improvement mm/slab.c
--- 25/mm/slab.c~slab-debugging-improvement	Fri May  2 14:40:21 2003
+++ 25-akpm/mm/slab.c	Fri May  2 14:40:21 2003
@@ -920,12 +920,19 @@ kmem_cache_create (const char *name, siz
 	}
 
 #if FORCED_DEBUG
-	if ((size < (PAGE_SIZE>>3)) && !(flags & SLAB_MUST_HWCACHE_ALIGN))
-		/*
-		 * do not red zone large object, causes severe
-		 * fragmentation.
-		 */
+	/*
+	 * Enable redzoning and last user accounting, except
+	 * - for caches with forced alignment: redzoning would violate the
+	 *   alignment
+	 * - for caches with large objects, if the increased size would
+	 *   increase the object size above the next power of two: caches
+	 *   with object sizes just above a power of two have a significant
+	 *   amount of internal fragmentation
+	 */
+	if ((size < (PAGE_SIZE>>3) || fls(size-1) == fls(size-1+3*BYTES_PER_WORD))
+			&& !(flags & SLAB_MUST_HWCACHE_ALIGN)) {
 		flags |= SLAB_RED_ZONE|SLAB_STORE_USER;
+	}
 	flags |= SLAB_POISON;
 #endif
 #endif
@@ -1784,10 +1791,12 @@ static void free_block(kmem_cache_t *cac
 		slabp = GET_PAGE_SLAB(virt_to_page(objp));
 		list_del(&slabp->list);
 		objnr = (objp - slabp->s_mem) / cachep->objsize;
+		check_slabp(cachep, slabp);
 		slab_bufctl(slabp)[objnr] = slabp->free;
 		slabp->free = objnr;
 		STATS_DEC_ACTIVE(cachep);
 		slabp->inuse--;
+		check_slabp(cachep, slabp);
 
 		/* fixup slab chains */
 		if (slabp->inuse == 0) {

_
