]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/slab.c
[NETFILTER]: Fix missing src port initialization in tftp expectation mask
[linux-2.6-omap-h63xx.git] / mm / slab.c
index e5ec26e0c4603c9ee19a7ea5878426ea2420cddd..71370256a7eb11e8b6eeb0c88cbd0e23955ab163 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -55,7 +55,7 @@
  *
  * SMP synchronization:
  *  constructors and destructors are called without any locking.
- *  Several members in kmem_cache_t and struct slab never change, they
+ *  Several members in struct kmem_cache and struct slab never change, they
  *     are accessed without any locking.
  *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
  *     and local interrupts are disabled so slab code is preempt-safe.
@@ -68,7 +68,7 @@
  * Further notes from the original documentation:
  *
  * 11 April '97.  Started multi-threading - markhe
- *     The global cache-chain is protected by the semaphore 'cache_chain_sem'.
+ *     The global cache-chain is protected by the mutex 'cache_chain_mutex'.
  *     The sem is only needed when accessing/extending the cache-chain, which
  *     can never happen inside an interrupt (kmem_cache_create(),
  *     kmem_cache_shrink() and kmem_cache_reap()).
 #include       <linux/rcupdate.h>
 #include       <linux/string.h>
 #include       <linux/nodemask.h>
+#include       <linux/mempolicy.h>
+#include       <linux/mutex.h>
 
 #include       <asm/uaccess.h>
 #include       <asm/cacheflush.h>
 #define        FORCED_DEBUG    0
 #endif
 
-
 /* Shouldn't this be in a header file somewhere? */
 #define        BYTES_PER_WORD          sizeof(void *)
 
@@ -217,12 +218,12 @@ static unsigned long offslab_limit;
  * Slabs are chained into three list: fully used, partial, fully free slabs.
  */
 struct slab {
-       struct list_head        list;
-       unsigned long           colouroff;
-       void                    *s_mem;         /* including colour offset */
-       unsigned int            inuse;          /* num of objs active in slab */
-       kmem_bufctl_t           free;
-       unsigned short          nodeid;
+       struct list_head list;
+       unsigned long colouroff;
+       void *s_mem;            /* including colour offset */
+       unsigned int inuse;     /* num of objs active in slab */
+       kmem_bufctl_t free;
+       unsigned short nodeid;
 };
 
 /*
@@ -242,9 +243,9 @@ struct slab {
  * We assume struct slab_rcu can overlay struct slab when destroying.
  */
 struct slab_rcu {
-       struct rcu_head         head;
-       kmem_cache_t            *cachep;
-       void                    *addr;
+       struct rcu_head head;
+       struct kmem_cache *cachep;
+       void *addr;
 };
 
 /*
@@ -279,23 +280,23 @@ struct array_cache {
 #define BOOT_CPUCACHE_ENTRIES  1
 struct arraycache_init {
        struct array_cache cache;
-       void * entries[BOOT_CPUCACHE_ENTRIES];
+       void *entries[BOOT_CPUCACHE_ENTRIES];
 };
 
 /*
  * The slab lists for all objects.
  */
 struct kmem_list3 {
-       struct list_head        slabs_partial;  /* partial list first, better asm code */
-       struct list_head        slabs_full;
-       struct list_head        slabs_free;
-       unsigned long   free_objects;
-       unsigned long   next_reap;
-       int             free_touched;
-       unsigned int    free_limit;
-       spinlock_t      list_lock;
-       struct array_cache      *shared;        /* shared per node */
-       struct array_cache      **alien;        /* on other nodes */
+       struct list_head slabs_partial; /* partial list first, better asm code */
+       struct list_head slabs_full;
+       struct list_head slabs_free;
+       unsigned long free_objects;
+       unsigned long next_reap;
+       int free_touched;
+       unsigned int free_limit;
+       spinlock_t list_lock;
+       struct array_cache *shared;     /* shared per node */
+       struct array_cache **alien;     /* on other nodes */
 };
 
 /*
@@ -315,6 +316,8 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
  */
 static __always_inline int index_of(const size_t size)
 {
+       extern void __bad_size(void);
+
        if (__builtin_constant_p(size)) {
                int i = 0;
 
@@ -325,19 +328,16 @@ static __always_inline int index_of(const size_t size)
                i++;
 #include "linux/kmalloc_sizes.h"
 #undef CACHE
-               {
-                       extern void __bad_size(void);
-                       __bad_size();
-               }
+               __bad_size();
        } else
-               BUG();
+               __bad_size();
        return 0;
 }
 
 #define INDEX_AC index_of(sizeof(struct arraycache_init))
 #define INDEX_L3 index_of(sizeof(struct kmem_list3))
 
-static inline void kmem_list3_init(struct kmem_list3 *parent)
+static void kmem_list3_init(struct kmem_list3 *parent)
 {
        INIT_LIST_HEAD(&parent->slabs_full);
        INIT_LIST_HEAD(&parent->slabs_partial);
@@ -363,67 +363,73 @@ static inline void kmem_list3_init(struct kmem_list3 *parent)
        } while (0)
 
 /*
- * kmem_cache_t
+ * struct kmem_cache
  *
  * manages a cache.
  */
-       
+
 struct kmem_cache {
 /* 1) per-cpu data, touched during every alloc/free */
-       struct array_cache      *array[NR_CPUS];
-       unsigned int            batchcount;
-       unsigned int            limit;
-       unsigned int            shared;
-       unsigned int            objsize;
+       struct array_cache *array[NR_CPUS];
+       unsigned int batchcount;
+       unsigned int limit;
+       unsigned int shared;
+       unsigned int buffer_size;
 /* 2) touched by every alloc & free from the backend */
-       struct kmem_list3       *nodelists[MAX_NUMNODES];
-       unsigned int            flags;  /* constant flags */
-       unsigned int            num;    /* # of objs per slab */
-       spinlock_t              spinlock;
+       struct kmem_list3 *nodelists[MAX_NUMNODES];
+       unsigned int flags;     /* constant flags */
+       unsigned int num;       /* # of objs per slab */
+       spinlock_t spinlock;
 
 /* 3) cache_grow/shrink */
        /* order of pgs per slab (2^n) */
-       unsigned int            gfporder;
+       unsigned int gfporder;
 
        /* force GFP flags, e.g. GFP_DMA */
-       gfp_t                   gfpflags;
+       gfp_t gfpflags;
 
-       size_t                  colour;         /* cache colouring range */
-       unsigned int            colour_off;     /* colour offset */
-       unsigned int            colour_next;    /* cache colouring */
-       kmem_cache_t            *slabp_cache;
-       unsigned int            slab_size;
-       unsigned int            dflags;         /* dynamic flags */
+       size_t colour;          /* cache colouring range */
+       unsigned int colour_off;        /* colour offset */
+       unsigned int colour_next;       /* cache colouring */
+       struct kmem_cache *slabp_cache;
+       unsigned int slab_size;
+       unsigned int dflags;    /* dynamic flags */
 
        /* constructor func */
-       void (*ctor)(void *, kmem_cache_t *, unsigned long);
+       void (*ctor) (void *, struct kmem_cache *, unsigned long);
 
        /* de-constructor func */
-       void (*dtor)(void *, kmem_cache_t *, unsigned long);
+       void (*dtor) (void *, struct kmem_cache *, unsigned long);
 
 /* 4) cache creation/removal */
-       const char              *name;
-       struct list_head        next;
+       const char *name;
+       struct list_head next;
 
 /* 5) statistics */
 #if STATS
-       unsigned long           num_active;
-       unsigned long           num_allocations;
-       unsigned long           high_mark;
-       unsigned long           grown;
-       unsigned long           reaped;
-       unsigned long           errors;
-       unsigned long           max_freeable;
-       unsigned long           node_allocs;
-       unsigned long           node_frees;
-       atomic_t                allochit;
-       atomic_t                allocmiss;
-       atomic_t                freehit;
-       atomic_t                freemiss;
+       unsigned long num_active;
+       unsigned long num_allocations;
+       unsigned long high_mark;
+       unsigned long grown;
+       unsigned long reaped;
+       unsigned long errors;
+       unsigned long max_freeable;
+       unsigned long node_allocs;
+       unsigned long node_frees;
+       atomic_t allochit;
+       atomic_t allocmiss;
+       atomic_t freehit;
+       atomic_t freemiss;
 #endif
 #if DEBUG
-       int                     dbghead;
-       int                     reallen;
+       /*
+        * If debugging is enabled, then the allocator can add additional
+        * fields and/or padding to every object. buffer_size contains the total
+        * object size including these internal fields, the following two
+        * variables contain the offset to the user object and its size.
+        */
+       int obj_offset;
+       int obj_size;
 #endif
 };
 
@@ -494,49 +500,50 @@ struct kmem_cache {
 
 /* memory layout of objects:
  * 0           : objp
- * 0 .. cachep->dbghead - BYTES_PER_WORD - 1: padding. This ensures that
+ * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
  *             the end of an object is aligned with the end of the real
  *             allocation. Catches writes behind the end of the allocation.
- * cachep->dbghead - BYTES_PER_WORD .. cachep->dbghead - 1:
+ * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
  *             redzone word.
- * cachep->dbghead: The real object.
- * cachep->objsize - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
- * cachep->objsize - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
+ * cachep->obj_offset: The real object.
+ * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
+ * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
  */
-static int obj_dbghead(kmem_cache_t *cachep)
+static int obj_offset(struct kmem_cache *cachep)
 {
-       return cachep->dbghead;
+       return cachep->obj_offset;
 }
 
-static int obj_reallen(kmem_cache_t *cachep)
+static int obj_size(struct kmem_cache *cachep)
 {
-       return cachep->reallen;
+       return cachep->obj_size;
 }
 
-static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp)
+static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
 {
        BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
-       return (unsigned long*) (objp+obj_dbghead(cachep)-BYTES_PER_WORD);
+       return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
 }
 
-static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
+static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
 {
        BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
        if (cachep->flags & SLAB_STORE_USER)
-               return (unsigned long*) (objp+cachep->objsize-2*BYTES_PER_WORD);
-       return (unsigned long*) (objp+cachep->objsize-BYTES_PER_WORD);
+               return (unsigned long *)(objp + cachep->buffer_size -
+                                        2 * BYTES_PER_WORD);
+       return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
 }
 
-static void **dbg_userword(kmem_cache_t *cachep, void *objp)
+static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 {
        BUG_ON(!(cachep->flags & SLAB_STORE_USER));
-       return (void**)(objp+cachep->objsize-BYTES_PER_WORD);
+       return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
 }
 
 #else
 
-#define obj_dbghead(x)                 0
-#define obj_reallen(cachep)            (cachep->objsize)
+#define obj_offset(x)                  0
+#define obj_size(cachep)               (cachep->buffer_size)
 #define dbg_redzone1(cachep, objp)     ({BUG(); (unsigned long *)NULL;})
 #define dbg_redzone2(cachep, objp)     ({BUG(); (unsigned long *)NULL;})
 #define dbg_userword(cachep, objp)     ({BUG(); (void **)NULL;})
@@ -589,6 +596,18 @@ static inline struct slab *page_get_slab(struct page *page)
        return (struct slab *)page->lru.prev;
 }
 
+static inline struct kmem_cache *virt_to_cache(const void *obj)
+{
+       struct page *page = virt_to_page(obj);
+       return page_get_cache(page);
+}
+
+static inline struct slab *virt_to_slab(const void *obj)
+{
+       struct page *page = virt_to_page(obj);
+       return page_get_slab(page);
+}
+
 /* These are the default caches for kmalloc. Custom caches can have other sizes. */
 struct cache_sizes malloc_sizes[] = {
 #define CACHE(x) { .cs_size = (x) },
@@ -607,31 +626,31 @@ struct cache_names {
 static struct cache_names __initdata cache_names[] = {
 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
 #include <linux/kmalloc_sizes.h>
-       { NULL, }
+       {NULL,}
 #undef CACHE
 };
 
 static struct arraycache_init initarray_cache __initdata =
-       { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
+    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
 static struct arraycache_init initarray_generic =
-       { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
+    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
 
 /* internal cache of cache description objs */
-static kmem_cache_t cache_cache = {
-       .batchcount     = 1,
-       .limit          = BOOT_CPUCACHE_ENTRIES,
-       .shared         = 1,
-       .objsize        = sizeof(kmem_cache_t),
-       .flags          = SLAB_NO_REAP,
-       .spinlock       = SPIN_LOCK_UNLOCKED,
-       .name           = "kmem_cache",
+static struct kmem_cache cache_cache = {
+       .batchcount = 1,
+       .limit = BOOT_CPUCACHE_ENTRIES,
+       .shared = 1,
+       .buffer_size = sizeof(struct kmem_cache),
+       .flags = SLAB_NO_REAP,
+       .spinlock = SPIN_LOCK_UNLOCKED,
+       .name = "kmem_cache",
 #if DEBUG
-       .reallen        = sizeof(kmem_cache_t),
+       .obj_size = sizeof(struct kmem_cache),
 #endif
 };
 
 /* Guard access to the cache-chain. */
-static struct semaphore        cache_chain_sem;
+static DEFINE_MUTEX(cache_chain_mutex);
 static struct list_head cache_chain;
 
 /*
@@ -655,25 +674,25 @@ static enum {
 
 static DEFINE_PER_CPU(struct work_struct, reap_work);
 
-static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node);
-static void enable_cpucache (kmem_cache_t *cachep);
-static void cache_reap (void *unused);
-static int __node_shrink(kmem_cache_t *cachep, int node);
+static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node);
+static void enable_cpucache(struct kmem_cache *cachep);
+static void cache_reap(void *unused);
+static int __node_shrink(struct kmem_cache *cachep, int node);
 
-static inline struct array_cache *ac_data(kmem_cache_t *cachep)
+static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
 {
        return cachep->array[smp_processor_id()];
 }
 
-static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags)
+static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags)
 {
        struct cache_sizes *csizep = malloc_sizes;
 
 #if DEBUG
        /* This happens if someone tries to call
-       * kmem_cache_create(), or __kmalloc(), before
-       * the generic caches are initialized.
-       */
+        * kmem_cache_create(), or __kmalloc(), before
+        * the generic caches are initialized.
+        */
        BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
 #endif
        while (size > csizep->cs_size)
@@ -689,46 +708,83 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags)
        return csizep->cs_cachep;
 }
 
-kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
+struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
 {
        return __find_general_cachep(size, gfpflags);
 }
 EXPORT_SYMBOL(kmem_find_general_cachep);
 
-/* Cal the num objs, wastage, and bytes left over for a given slab size. */
-static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
-                int flags, size_t *left_over, unsigned int *num)
+static size_t slab_mgmt_size(size_t nr_objs, size_t align)
 {
-       int i;
-       size_t wastage = PAGE_SIZE<<gfporder;
-       size_t extra = 0;
-       size_t base = 0;
+       return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
+}
 
-       if (!(flags & CFLGS_OFF_SLAB)) {
-               base = sizeof(struct slab);
-               extra = sizeof(kmem_bufctl_t);
-       }
-       i = 0;
-       while (i*size + ALIGN(base+i*extra, align) <= wastage)
-               i++;
-       if (i > 0)
-               i--;
+/* Calculate the number of objects and left-over bytes for a given
+   buffer size. */
+static void cache_estimate(unsigned long gfporder, size_t buffer_size,
+                          size_t align, int flags, size_t *left_over,
+                          unsigned int *num)
+{
+       int nr_objs;
+       size_t mgmt_size;
+       size_t slab_size = PAGE_SIZE << gfporder;
+
+       /*
+        * The slab management structure can be either off the slab or
+        * on it. For the latter case, the memory allocated for a
+        * slab is used for:
+        *
+        * - The struct slab
+        * - One kmem_bufctl_t for each object
+        * - Padding to respect alignment of @align
+        * - @buffer_size bytes for each object
+        *
+        * If the slab management structure is off the slab, then the
+        * alignment will already be calculated into the size. Because
+        * the slabs are all pages aligned, the objects will be at the
+        * correct alignment when allocated.
+        */
+       if (flags & CFLGS_OFF_SLAB) {
+               mgmt_size = 0;
+               nr_objs = slab_size / buffer_size;
+
+               if (nr_objs > SLAB_LIMIT)
+                       nr_objs = SLAB_LIMIT;
+       } else {
+               /*
+                * Ignore padding for the initial guess. The padding
+                * is at most @align-1 bytes, and @buffer_size is at
+                * least @align. In the worst case, this result will
+                * be one greater than the number of objects that fit
+                * into the memory allocation when taking the padding
+                * into account.
+                */
+               nr_objs = (slab_size - sizeof(struct slab)) /
+                         (buffer_size + sizeof(kmem_bufctl_t));
 
-       if (i > SLAB_LIMIT)
-               i = SLAB_LIMIT;
+               /*
+                * This calculated number will be either the right
+                * amount, or one greater than what we want.
+                */
+               if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
+                      > slab_size)
+                       nr_objs--;
+
+               if (nr_objs > SLAB_LIMIT)
+                       nr_objs = SLAB_LIMIT;
 
-       *num = i;
-       wastage -= i*size;
-       wastage -= ALIGN(base+i*extra, align);
-       *left_over = wastage;
+               mgmt_size = slab_mgmt_size(nr_objs, align);
+       }
+       *num = nr_objs;
+       *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
 }
 
 #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
 
-static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg)
+static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg)
 {
        printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
-               function, cachep->name, msg);
+              function, cachep->name, msg);
        dump_stack();
 }
 
@@ -755,9 +811,9 @@ static void __devinit start_cpu_timer(int cpu)
 }
 
 static struct array_cache *alloc_arraycache(int node, int entries,
-                                               int batchcount)
+                                           int batchcount)
 {
-       int memsize = sizeof(void*)*entries+sizeof(struct array_cache);
+       int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
        struct array_cache *nc = NULL;
 
        nc = kmalloc_node(memsize, GFP_KERNEL, node);
@@ -772,10 +828,12 @@ static struct array_cache *alloc_arraycache(int node, int entries,
 }
 
 #ifdef CONFIG_NUMA
-static inline struct array_cache **alloc_alien_cache(int node, int limit)
+static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
+
+static struct array_cache **alloc_alien_cache(int node, int limit)
 {
        struct array_cache **ac_ptr;
-       int memsize = sizeof(void*)*MAX_NUMNODES;
+       int memsize = sizeof(void *) * MAX_NUMNODES;
        int i;
 
        if (limit > 1)
@@ -789,7 +847,7 @@ static inline struct array_cache **alloc_alien_cache(int node, int limit)
                        }
                        ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
                        if (!ac_ptr[i]) {
-                               for (i--; i <=0; i--)
+                               for (i--; i <= 0; i--)
                                        kfree(ac_ptr[i]);
                                kfree(ac_ptr);
                                return NULL;
@@ -799,7 +857,7 @@ static inline struct array_cache **alloc_alien_cache(int node, int limit)
        return ac_ptr;
 }
 
-static inline void free_alien_cache(struct array_cache **ac_ptr)
+static void free_alien_cache(struct array_cache **ac_ptr)
 {
        int i;
 
@@ -807,12 +865,13 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
                return;
 
        for_each_node(i)
-               kfree(ac_ptr[i]);
+           kfree(ac_ptr[i]);
 
        kfree(ac_ptr);
 }
 
-static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache *ac, int node)
+static void __drain_alien_cache(struct kmem_cache *cachep,
+                               struct array_cache *ac, int node)
 {
        struct kmem_list3 *rl3 = cachep->nodelists[node];
 
@@ -824,9 +883,9 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache
        }
 }
 
-static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3)
+static void drain_alien_cache(struct kmem_cache *cachep, struct kmem_list3 *l3)
 {
-       int i=0;
+       int i = 0;
        struct array_cache *ac;
        unsigned long flags;
 
@@ -846,18 +905,17 @@ static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3)
 #endif
 
 static int __devinit cpuup_callback(struct notifier_block *nfb,
-                                 unsigned long action, void *hcpu)
+                                   unsigned long action, void *hcpu)
 {
        long cpu = (long)hcpu;
-       kmem_cache_t* cachep;
+       struct kmem_cache *cachep;
        struct kmem_list3 *l3 = NULL;
        int node = cpu_to_node(cpu);
        int memsize = sizeof(struct kmem_list3);
-       struct array_cache *nc = NULL;
 
        switch (action) {
        case CPU_UP_PREPARE:
-               down(&cache_chain_sem);
+               mutex_lock(&cache_chain_mutex);
                /* we need to do this right in the beginning since
                 * alloc_arraycache's are going to use this list.
                 * kmalloc_node allows us to add the slab to the right
@@ -871,27 +929,29 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
                         */
                        if (!cachep->nodelists[node]) {
                                if (!(l3 = kmalloc_node(memsize,
-                                               GFP_KERNEL, node)))
+                                                       GFP_KERNEL, node)))
                                        goto bad;
                                kmem_list3_init(l3);
                                l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
-                                 ((unsigned long)cachep)%REAPTIMEOUT_LIST3;
+                                   ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
 
                                cachep->nodelists[node] = l3;
                        }
 
                        spin_lock_irq(&cachep->nodelists[node]->list_lock);
                        cachep->nodelists[node]->free_limit =
-                               (1 + nr_cpus_node(node)) *
-                               cachep->batchcount + cachep->num;
+                           (1 + nr_cpus_node(node)) *
+                           cachep->batchcount + cachep->num;
                        spin_unlock_irq(&cachep->nodelists[node]->list_lock);
                }
 
                /* Now we can go ahead with allocating the shared array's
-                 & array cache's */
+                  & array cache's */
                list_for_each_entry(cachep, &cache_chain, next) {
+                       struct array_cache *nc;
+
                        nc = alloc_arraycache(node, cachep->limit,
-                                       cachep->batchcount);
+                                             cachep->batchcount);
                        if (!nc)
                                goto bad;
                        cachep->array[cpu] = nc;
@@ -900,16 +960,17 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
                        BUG_ON(!l3);
                        if (!l3->shared) {
                                if (!(nc = alloc_arraycache(node,
-                                       cachep->shared*cachep->batchcount,
-                                       0xbaadf00d)))
-                                       goto  bad;
+                                                           cachep->shared *
+                                                           cachep->batchcount,
+                                                           0xbaadf00d)))
+                                       goto bad;
 
                                /* we are serialised from CPU_DEAD or
-                                 CPU_UP_CANCELLED by the cpucontrol lock */
+                                  CPU_UP_CANCELLED by the cpucontrol lock */
                                l3->shared = nc;
                        }
                }
-               up(&cache_chain_sem);
+               mutex_unlock(&cache_chain_mutex);
                break;
        case CPU_ONLINE:
                start_cpu_timer(cpu);
@@ -918,7 +979,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
        case CPU_DEAD:
                /* fall thru */
        case CPU_UP_CANCELED:
-               down(&cache_chain_sem);
+               mutex_lock(&cache_chain_mutex);
 
                list_for_each_entry(cachep, &cache_chain, next) {
                        struct array_cache *nc;
@@ -942,13 +1003,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
                                free_block(cachep, nc->entry, nc->avail, node);
 
                        if (!cpus_empty(mask)) {
-                                spin_unlock(&l3->list_lock);
-                                goto unlock_cache;
-                        }
+                               spin_unlock(&l3->list_lock);
+                               goto unlock_cache;
+                       }
 
                        if (l3->shared) {
                                free_block(cachep, l3->shared->entry,
-                                               l3->shared->avail, node);
+                                          l3->shared->avail, node);
                                kfree(l3->shared);
                                l3->shared = NULL;
                        }
@@ -966,17 +1027,17 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
                        } else {
                                spin_unlock(&l3->list_lock);
                        }
-unlock_cache:
+                     unlock_cache:
                        spin_unlock_irq(&cachep->spinlock);
                        kfree(nc);
                }
-               up(&cache_chain_sem);
+               mutex_unlock(&cache_chain_mutex);
                break;
 #endif
        }
        return NOTIFY_OK;
-bad:
-       up(&cache_chain_sem);
+      bad:
+       mutex_unlock(&cache_chain_mutex);
        return NOTIFY_BAD;
 }
 
@@ -985,8 +1046,7 @@ static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
 /*
  * swap the static kmem_list3 with kmalloced memory
  */
-static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list,
-               int nodeid)
+static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int nodeid)
 {
        struct kmem_list3 *ptr;
 
@@ -1026,14 +1086,14 @@ void __init kmem_cache_init(void)
 
        /* Bootstrap is tricky, because several objects are allocated
         * from caches that do not exist yet:
-        * 1) initialize the cache_cache cache: it contains the kmem_cache_t
+        * 1) initialize the cache_cache cache: it contains the struct kmem_cache
         *    structures of all caches, except cache_cache itself: cache_cache
         *    is statically allocated.
         *    Initially an __init data area is used for the head array and the
         *    kmem_list3 structures, it's replaced with a kmalloc allocated
         *    array at the end of the bootstrap.
         * 2) Create the first kmalloc cache.
-        *    The kmem_cache_t for the new cache is allocated normally.
+        *    The struct kmem_cache for the new cache is allocated normally.
         *    An __init data area is used for the head array.
         * 3) Create the remaining kmalloc caches, with minimally sized
         *    head arrays.
@@ -1045,24 +1105,23 @@ void __init kmem_cache_init(void)
         */
 
        /* 1) create the cache_cache */
-       init_MUTEX(&cache_chain_sem);
        INIT_LIST_HEAD(&cache_chain);
        list_add(&cache_cache.next, &cache_chain);
        cache_cache.colour_off = cache_line_size();
        cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
        cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];
 
-       cache_cache.objsize = ALIGN(cache_cache.objsize, cache_line_size());
+       cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size());
 
-       cache_estimate(0, cache_cache.objsize, cache_line_size(), 0,
-                               &left_over, &cache_cache.num);
+       cache_estimate(0, cache_cache.buffer_size, cache_line_size(), 0,
+                      &left_over, &cache_cache.num);
        if (!cache_cache.num)
                BUG();
 
-       cache_cache.colour = left_over/cache_cache.colour_off;
+       cache_cache.colour = left_over / cache_cache.colour_off;
        cache_cache.colour_next = 0;
-       cache_cache.slab_size = ALIGN(cache_cache.num*sizeof(kmem_bufctl_t) +
-                               sizeof(struct slab), cache_line_size());
+       cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
+                                     sizeof(struct slab), cache_line_size());
 
        /* 2+3) create the kmalloc caches */
        sizes = malloc_sizes;
@@ -1074,14 +1133,18 @@ void __init kmem_cache_init(void)
         */
 
        sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
-                               sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN,
-                               (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
+                                                     sizes[INDEX_AC].cs_size,
+                                                     ARCH_KMALLOC_MINALIGN,
+                                                     (ARCH_KMALLOC_FLAGS |
+                                                      SLAB_PANIC), NULL, NULL);
 
        if (INDEX_AC != INDEX_L3)
                sizes[INDEX_L3].cs_cachep =
-                       kmem_cache_create(names[INDEX_L3].name,
-                               sizes[INDEX_L3].cs_size, ARCH_KMALLOC_MINALIGN,
-                               (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
+                   kmem_cache_create(names[INDEX_L3].name,
+                                     sizes[INDEX_L3].cs_size,
+                                     ARCH_KMALLOC_MINALIGN,
+                                     (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL,
+                                     NULL);
 
        while (sizes->cs_size != ULONG_MAX) {
                /*
@@ -1091,47 +1154,53 @@ void __init kmem_cache_init(void)
                 * Note for systems short on memory removing the alignment will
                 * allow tighter packing of the smaller caches.
                 */
-               if(!sizes->cs_cachep)
+               if (!sizes->cs_cachep)
                        sizes->cs_cachep = kmem_cache_create(names->name,
-                               sizes->cs_size, ARCH_KMALLOC_MINALIGN,
-                               (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
+                                                            sizes->cs_size,
+                                                            ARCH_KMALLOC_MINALIGN,
+                                                            (ARCH_KMALLOC_FLAGS
+                                                             | SLAB_PANIC),
+                                                            NULL, NULL);
 
                /* Inc off-slab bufctl limit until the ceiling is hit. */
                if (!(OFF_SLAB(sizes->cs_cachep))) {
-                       offslab_limit = sizes->cs_size-sizeof(struct slab);
+                       offslab_limit = sizes->cs_size - sizeof(struct slab);
                        offslab_limit /= sizeof(kmem_bufctl_t);
                }
 
                sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
-                       sizes->cs_size, ARCH_KMALLOC_MINALIGN,
-                       (ARCH_KMALLOC_FLAGS | SLAB_CACHE_DMA | SLAB_PANIC),
-                       NULL, NULL);
+                                                       sizes->cs_size,
+                                                       ARCH_KMALLOC_MINALIGN,
+                                                       (ARCH_KMALLOC_FLAGS |
+                                                        SLAB_CACHE_DMA |
+                                                        SLAB_PANIC), NULL,
+                                                       NULL);
 
                sizes++;
                names++;
        }
        /* 4) Replace the bootstrap head arrays */
        {
-               void * ptr;
+               void *ptr;
 
                ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
 
                local_irq_disable();
-               BUG_ON(ac_data(&cache_cache) != &initarray_cache.cache);
-               memcpy(ptr, ac_data(&cache_cache),
-                               sizeof(struct arraycache_init));
+               BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
+               memcpy(ptr, cpu_cache_get(&cache_cache),
+                      sizeof(struct arraycache_init));
                cache_cache.array[smp_processor_id()] = ptr;
                local_irq_enable();
 
                ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
 
                local_irq_disable();
-               BUG_ON(ac_data(malloc_sizes[INDEX_AC].cs_cachep)
-                               != &initarray_generic.cache);
-               memcpy(ptr, ac_data(malloc_sizes[INDEX_AC].cs_cachep),
-                               sizeof(struct arraycache_init));
+               BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
+                      != &initarray_generic.cache);
+               memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
+                      sizeof(struct arraycache_init));
                malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
-                                               ptr;
+                   ptr;
                local_irq_enable();
        }
        /* 5) Replace the bootstrap kmem_list3's */
@@ -1139,34 +1208,34 @@ void __init kmem_cache_init(void)
                int node;
                /* Replace the static kmem_list3 structures for the boot cpu */
                init_list(&cache_cache, &initkmem_list3[CACHE_CACHE],
-                               numa_node_id());
+                         numa_node_id());
 
                for_each_online_node(node) {
                        init_list(malloc_sizes[INDEX_AC].cs_cachep,
-                                       &initkmem_list3[SIZE_AC+node], node);
+                                 &initkmem_list3[SIZE_AC + node], node);
 
                        if (INDEX_AC != INDEX_L3) {
                                init_list(malloc_sizes[INDEX_L3].cs_cachep,
-                                               &initkmem_list3[SIZE_L3+node],
-                                               node);
+                                         &initkmem_list3[SIZE_L3 + node],
+                                         node);
                        }
                }
        }
 
        /* 6) resize the head arrays to their final sizes */
        {
-               kmem_cache_t *cachep;
-               down(&cache_chain_sem);
+               struct kmem_cache *cachep;
+               mutex_lock(&cache_chain_mutex);
                list_for_each_entry(cachep, &cache_chain, next)
-                       enable_cpucache(cachep);
-               up(&cache_chain_sem);
+                   enable_cpucache(cachep);
+               mutex_unlock(&cache_chain_mutex);
        }
 
        /* Done! */
        g_cpucache_up = FULL;
 
        /* Register a cpu startup notifier callback
-        * that initializes ac_data for all new cpus
+        * that initializes cpu_cache_get for all new cpus
         */
        register_cpu_notifier(&cpucache_notifier);
 
@@ -1184,7 +1253,7 @@ static int __init cpucache_init(void)
         * pages to gfp.
         */
        for_each_online_cpu(cpu)
-               start_cpu_timer(cpu);
+           start_cpu_timer(cpu);
 
        return 0;
 }
@@ -1198,7 +1267,7 @@ __initcall(cpucache_init);
  * did not request dmaable memory, we might get it, but that
  * would be relatively rare and ignorable.
  */
-static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
        struct page *page;
        void *addr;
@@ -1224,9 +1293,9 @@ static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid)
 /*
  * Interface to system's page release.
  */
-static void kmem_freepages(kmem_cache_t *cachep, void *addr)
+static void kmem_freepages(struct kmem_cache *cachep, void *addr)
 {
-       unsigned long i = (1<<cachep->gfporder);
+       unsigned long i = (1 << cachep->gfporder);
        struct page *page = virt_to_page(addr);
        const unsigned long nr_freed = i;
 
@@ -1239,14 +1308,14 @@ static void kmem_freepages(kmem_cache_t *cachep, void *addr)
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += nr_freed;
        free_pages((unsigned long)addr, cachep->gfporder);
-       if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 
-               atomic_sub(1<<cachep->gfporder, &slab_reclaim_pages);
+       if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
+               atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages);
 }
 
 static void kmem_rcu_free(struct rcu_head *head)
 {
-       struct slab_rcu *slab_rcu = (struct slab_rcu *) head;
-       kmem_cache_t *cachep = slab_rcu->cachep;
+       struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
+       struct kmem_cache *cachep = slab_rcu->cachep;
 
        kmem_freepages(cachep, slab_rcu->addr);
        if (OFF_SLAB(cachep))
@@ -1256,20 +1325,20 @@ static void kmem_rcu_free(struct rcu_head *head)
 #if DEBUG
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
-static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
-                               unsigned long caller)
+static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
+                           unsigned long caller)
 {
-       int size = obj_reallen(cachep);
+       int size = obj_size(cachep);
 
-       addr = (unsigned long *)&((char*)addr)[obj_dbghead(cachep)];
+       addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
 
-       if (size < 5*sizeof(unsigned long))
+       if (size < 5 * sizeof(unsigned long))
                return;
 
-       *addr++=0x12345678;
-       *addr++=caller;
-       *addr++=smp_processor_id();
-       size -= 3*sizeof(unsigned long);
+       *addr++ = 0x12345678;
+       *addr++ = caller;
+       *addr++ = smp_processor_id();
+       size -= 3 * sizeof(unsigned long);
        {
                unsigned long *sptr = &caller;
                unsigned long svalue;
@@ -1277,7 +1346,7 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
                while (!kstack_end(sptr)) {
                        svalue = *sptr++;
                        if (kernel_text_address(svalue)) {
-                               *addr++=svalue;
+                               *addr++ = svalue;
                                size -= sizeof(unsigned long);
                                if (size <= sizeof(unsigned long))
                                        break;
@@ -1285,25 +1354,25 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
                }
 
        }
-       *addr++=0x87654321;
+       *addr++ = 0x87654321;
 }
 #endif
 
-static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val)
+static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
 {
-       int size = obj_reallen(cachep);
-       addr = &((char*)addr)[obj_dbghead(cachep)];
+       int size = obj_size(cachep);
+       addr = &((char *)addr)[obj_offset(cachep)];
 
        memset(addr, val, size);
-       *(unsigned char *)(addr+size-1) = POISON_END;
+       *(unsigned char *)(addr + size - 1) = POISON_END;
 }
 
 static void dump_line(char *data, int offset, int limit)
 {
        int i;
        printk(KERN_ERR "%03x:", offset);
-       for (i=0;i<limit;i++) {
-               printk(" %02x", (unsigned char)data[offset+i]);
+       for (i = 0; i < limit; i++) {
+               printk(" %02x", (unsigned char)data[offset + i]);
        }
        printk("\n");
 }
@@ -1311,62 +1380,63 @@ static void dump_line(char *data, int offset, int limit)
 
 #if DEBUG
 
-static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines)
+static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
 {
        int i, size;
        char *realobj;
 
        if (cachep->flags & SLAB_RED_ZONE) {
                printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n",
-                       *dbg_redzone1(cachep, objp),
-                       *dbg_redzone2(cachep, objp));
+                      *dbg_redzone1(cachep, objp),
+                      *dbg_redzone2(cachep, objp));
        }
 
        if (cachep->flags & SLAB_STORE_USER) {
                printk(KERN_ERR "Last user: [<%p>]",
-                               *dbg_userword(cachep, objp));
+                      *dbg_userword(cachep, objp));
                print_symbol("(%s)",
-                               (unsigned long)*dbg_userword(cachep, objp));
+                            (unsigned long)*dbg_userword(cachep, objp));
                printk("\n");
        }
-       realobj = (char*)objp+obj_dbghead(cachep);
-       size = obj_reallen(cachep);
-       for (i=0; i<size && lines;i+=16, lines--) {
+       realobj = (char *)objp + obj_offset(cachep);
+       size = obj_size(cachep);
+       for (i = 0; i < size && lines; i += 16, lines--) {
                int limit;
                limit = 16;
-               if (i+limit > size)
-                       limit = size-i;
+               if (i + limit > size)
+                       limit = size - i;
                dump_line(realobj, i, limit);
        }
 }
 
-static void check_poison_obj(kmem_cache_t *cachep, void *objp)
+static void check_poison_obj(struct kmem_cache *cachep, void *objp)
 {
        char *realobj;
        int size, i;
        int lines = 0;
 
-       realobj = (char*)objp+obj_dbghead(cachep);
-       size = obj_reallen(cachep);
+       realobj = (char *)objp + obj_offset(cachep);
+       size = obj_size(cachep);
 
-       for (i=0;i<size;i++) {
+       for (i = 0; i < size; i++) {
                char exp = POISON_FREE;
-               if (i == size-1)
+               if (i == size - 1)
                        exp = POISON_END;
                if (realobj[i] != exp) {
                        int limit;
                        /* Mismatch ! */
                        /* Print header */
                        if (lines == 0) {
-                               printk(KERN_ERR "Slab corruption: start=%p, len=%d\n",
-                                               realobj, size);
+                               printk(KERN_ERR
+                                      "Slab corruption: start=%p, len=%d\n",
+                                      realobj, size);
                                print_objinfo(cachep, objp, 0);
                        }
                        /* Hexdump the affected line */
-                       i = (i/16)*16;
+                       i = (i / 16) * 16;
                        limit = 16;
-                       if (i+limit > size)
-                               limit = size-i;
+                       if (i + limit > size)
+                               limit = size - i;
                        dump_line(realobj, i, limit);
                        i += 16;
                        lines++;
@@ -1379,45 +1449,46 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
                /* Print some data about the neighboring objects, if they
                 * exist:
                 */
-               struct slab *slabp = page_get_slab(virt_to_page(objp));
+               struct slab *slabp = virt_to_slab(objp);
                int objnr;
 
-               objnr = (objp-slabp->s_mem)/cachep->objsize;
+               objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
                if (objnr) {
-                       objp = slabp->s_mem+(objnr-1)*cachep->objsize;
-                       realobj = (char*)objp+obj_dbghead(cachep);
+                       objp = slabp->s_mem + (objnr - 1) * cachep->buffer_size;
+                       realobj = (char *)objp + obj_offset(cachep);
                        printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
-                                               realobj, size);
+                              realobj, size);
                        print_objinfo(cachep, objp, 2);
                }
-               if (objnr+1 < cachep->num) {
-                       objp = slabp->s_mem+(objnr+1)*cachep->objsize;
-                       realobj = (char*)objp+obj_dbghead(cachep);
+               if (objnr + 1 < cachep->num) {
+                       objp = slabp->s_mem + (objnr + 1) * cachep->buffer_size;
+                       realobj = (char *)objp + obj_offset(cachep);
                        printk(KERN_ERR "Next obj: start=%p, len=%d\n",
-                                               realobj, size);
+                              realobj, size);
                        print_objinfo(cachep, objp, 2);
                }
        }
 }
 #endif
 
-/* Destroy all the objs in a slab, and release the mem back to the system.
- * Before calling the slab must have been unlinked from the cache.
- * The cache-lock is not held/needed.
+#if DEBUG
+/**
+ * slab_destroy_objs - call the registered destructor for each object in
+ *      a slab that is to be destroyed.
  */
-static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
+static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
 {
-       void *addr = slabp->s_mem - slabp->colouroff;
-
-#if DEBUG
        int i;
        for (i = 0; i < cachep->num; i++) {
-               void *objp = slabp->s_mem + cachep->objsize * i;
+               void *objp = slabp->s_mem + cachep->buffer_size * i;
 
                if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
-                       if ((cachep->objsize%PAGE_SIZE)==0 && OFF_SLAB(cachep))
-                               kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE,1);
+                       if ((cachep->buffer_size % PAGE_SIZE) == 0
+                           && OFF_SLAB(cachep))
+                               kernel_map_pages(virt_to_page(objp),
+                                                cachep->buffer_size / PAGE_SIZE,
+                                                1);
                        else
                                check_poison_obj(cachep, objp);
 #else
@@ -1427,28 +1498,42 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
                if (cachep->flags & SLAB_RED_ZONE) {
                        if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
                                slab_error(cachep, "start of a freed object "
-                                                       "was overwritten");
+                                          "was overwritten");
                        if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
                                slab_error(cachep, "end of a freed object "
-                                                       "was overwritten");
+                                          "was overwritten");
                }
                if (cachep->dtor && !(cachep->flags & SLAB_POISON))
-                       (cachep->dtor)(objp+obj_dbghead(cachep), cachep, 0);
+                       (cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
        }
+}
 #else
+static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
+{
        if (cachep->dtor) {
                int i;
                for (i = 0; i < cachep->num; i++) {
-                       void* objp = slabp->s_mem+cachep->objsize*i;
-                       (cachep->dtor)(objp, cachep, 0);
+                       void *objp = slabp->s_mem + cachep->buffer_size * i;
+                       (cachep->dtor) (objp, cachep, 0);
                }
        }
+}
 #endif
 
+/**
+ * Destroy all the objs in a slab, and release the mem back to the system.
+ * Before calling the slab must have been unlinked from the cache.
+ * The cache-lock is not held/needed.
+ */
+static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
+{
+       void *addr = slabp->s_mem - slabp->colouroff;
+
+       slab_destroy_objs(cachep, slabp);
        if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
                struct slab_rcu *slab_rcu;
 
-               slab_rcu = (struct slab_rcu *) slabp;
+               slab_rcu = (struct slab_rcu *)slabp;
                slab_rcu->cachep = cachep;
                slab_rcu->addr = addr;
                call_rcu(&slab_rcu->head, kmem_rcu_free);
@@ -1459,20 +1544,72 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
        }
 }
 
-/* For setting up all the kmem_list3s for cache whose objsize is same
+/* For setting up all the kmem_list3s for cache whose buffer_size is same
    as size of kmem_list3. */
-static inline void set_up_list3s(kmem_cache_t *cachep, int index)
+static void set_up_list3s(struct kmem_cache *cachep, int index)
 {
        int node;
 
        for_each_online_node(node) {
-               cachep->nodelists[node] = &initkmem_list3[index+node];
+               cachep->nodelists[node] = &initkmem_list3[index + node];
                cachep->nodelists[node]->next_reap = jiffies +
-                       REAPTIMEOUT_LIST3 +
-                       ((unsigned long)cachep)%REAPTIMEOUT_LIST3;
+                   REAPTIMEOUT_LIST3 +
+                   ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
        }
 }
 
+/**
+ * calculate_slab_order - calculate size (page order) of slabs
+ * @cachep: pointer to the cache that is being created
+ * @size: size of objects to be created in this cache.
+ * @align: required alignment for the objects.
+ * @flags: slab allocation flags
+ *
+ * Also calculates the number of objects per slab.
+ *
+ * This could be made much more intelligent.  For now, try to avoid using
+ * high order pages for slabs.  When the gfp() functions are more friendly
+ * towards high-order requests, this should be changed.
+ */
+static inline size_t calculate_slab_order(struct kmem_cache *cachep,
+                       size_t size, size_t align, unsigned long flags)
+{
+       size_t left_over = 0;
+
+       for (;; cachep->gfporder++) {
+               unsigned int num;
+               size_t remainder;
+
+               if (cachep->gfporder > MAX_GFP_ORDER) {
+                       cachep->num = 0;
+                       break;
+               }
+
+               cache_estimate(cachep->gfporder, size, align, flags,
+                              &remainder, &num);
+               if (!num)
+                       continue;
+               /* More than offslab_limit objects will cause problems */
+               if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit)
+                       break;
+
+               cachep->num = num;
+               left_over = remainder;
+
+               /*
+                * Large number of objects is good, but very large slabs are
+                * currently bad for the gfp()s.
+                */
+               if (cachep->gfporder >= slab_break_gfp_order)
+                       break;
+
+               if ((left_over * 8) <= (PAGE_SIZE << cachep->gfporder))
+                       /* Acceptable internal fragmentation */
+                       break;
+       }
+       return left_over;
+}
+
 /**
  * kmem_cache_create - Create a cache.
  * @name: A string which is used in /proc/slabinfo to identify this cache.
@@ -1506,32 +1643,31 @@ static inline void set_up_list3s(kmem_cache_t *cachep, int index)
  * cacheline.  This can be beneficial if you're counting cycles as closely
  * as davem.
  */
-kmem_cache_t *
+struct kmem_cache *
 kmem_cache_create (const char *name, size_t size, size_t align,
-       unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long),
-       void (*dtor)(void*, kmem_cache_t *, unsigned long))
+       unsigned long flags, void (*ctor)(void*, struct kmem_cache *, unsigned long),
+       void (*dtor)(void*, struct kmem_cache *, unsigned long))
 {
        size_t left_over, slab_size, ralign;
-       kmem_cache_t *cachep = NULL;
+       struct kmem_cache *cachep = NULL;
        struct list_head *p;
 
        /*
         * Sanity checks... these are all serious usage bugs.
         */
        if ((!name) ||
-               in_interrupt() ||
-               (size < BYTES_PER_WORD) ||
-               (size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) ||
-               (dtor && !ctor)) {
-                       printk(KERN_ERR "%s: Early error in slab %s\n",
-                                       __FUNCTION__, name);
-                       BUG();
-               }
+           in_interrupt() ||
+           (size < BYTES_PER_WORD) ||
+           (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
+               printk(KERN_ERR "%s: Early error in slab %s\n",
+                      __FUNCTION__, name);
+               BUG();
+       }
 
-       down(&cache_chain_sem);
+       mutex_lock(&cache_chain_mutex);
 
        list_for_each(p, &cache_chain) {
-               kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
+               struct kmem_cache *pc = list_entry(p, struct kmem_cache, next);
                mm_segment_t old_fs = get_fs();
                char tmp;
                int res;
@@ -1546,11 +1682,11 @@ kmem_cache_create (const char *name, size_t size, size_t align,
                set_fs(old_fs);
                if (res) {
                        printk("SLAB: cache with size %d has lost its name\n",
-                                       pc->objsize);
+                              pc->buffer_size);
                        continue;
                }
 
-               if (!strcmp(pc->name,name)) {
+               if (!strcmp(pc->name, name)) {
                        printk("kmem_cache_create: duplicate cache %s\n", name);
                        dump_stack();
                        goto oops;
@@ -1562,10 +1698,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
        if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
                /* No constructor, but inital state check requested */
                printk(KERN_ERR "%s: No con, but init state check "
-                               "requested - %s\n", __FUNCTION__, name);
+                      "requested - %s\n", __FUNCTION__, name);
                flags &= ~SLAB_DEBUG_INITIAL;
        }
-
 #if FORCED_DEBUG
        /*
         * Enable redzoning and last user accounting, except for caches with
@@ -1573,8 +1708,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
         * above the next power of two: caches with object sizes just above a
         * power of two have a significant amount of internal fragmentation.
         */
-       if ((size < 4096 || fls(size-1) == fls(size-1+3*BYTES_PER_WORD)))
-               flags |= SLAB_RED_ZONE|SLAB_STORE_USER;
+       if ((size < 4096
+            || fls(size - 1) == fls(size - 1 + 3 * BYTES_PER_WORD)))
+               flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
        if (!(flags & SLAB_DESTROY_BY_RCU))
                flags |= SLAB_POISON;
 #endif
@@ -1595,9 +1731,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
         * unaligned accesses for some archs when redzoning is used, and makes
         * sure any on-slab bufctl's are also correctly aligned.
         */
-       if (size & (BYTES_PER_WORD-1)) {
-               size += (BYTES_PER_WORD-1);
-               size &= ~(BYTES_PER_WORD-1);
+       if (size & (BYTES_PER_WORD - 1)) {
+               size += (BYTES_PER_WORD - 1);
+               size &= ~(BYTES_PER_WORD - 1);
        }
 
        /* calculate out the final buffer alignment: */
@@ -1608,7 +1744,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
                 * objects into one cacheline.
                 */
                ralign = cache_line_size();
-               while (size <= ralign/2)
+               while (size <= ralign / 2)
                        ralign /= 2;
        } else {
                ralign = BYTES_PER_WORD;
@@ -1617,13 +1753,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
        if (ralign < ARCH_SLAB_MINALIGN) {
                ralign = ARCH_SLAB_MINALIGN;
                if (ralign > BYTES_PER_WORD)
-                       flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
+                       flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
        }
        /* 3) caller mandated alignment: disables debug if necessary */
        if (ralign < align) {
                ralign = align;
                if (ralign > BYTES_PER_WORD)
-                       flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
+                       flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
        }
        /* 4) Store it. Note that the debug code below can reduce
         *    the alignment to BYTES_PER_WORD.
@@ -1631,21 +1767,21 @@ kmem_cache_create (const char *name, size_t size, size_t align,
        align = ralign;
 
        /* Get cache's description obj. */
-       cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
+       cachep = kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
        if (!cachep)
                goto oops;
-       memset(cachep, 0, sizeof(kmem_cache_t));
+       memset(cachep, 0, sizeof(struct kmem_cache));
 
 #if DEBUG
-       cachep->reallen = size;
+       cachep->obj_size = size;
 
        if (flags & SLAB_RED_ZONE) {
                /* redzoning only works with word aligned caches */
                align = BYTES_PER_WORD;
 
                /* add space for red zone words */
-               cachep->dbghead += BYTES_PER_WORD;
-               size += 2*BYTES_PER_WORD;
+               cachep->obj_offset += BYTES_PER_WORD;
+               size += 2 * BYTES_PER_WORD;
        }
        if (flags & SLAB_STORE_USER) {
                /* user store requires word alignment and
@@ -1656,15 +1792,16 @@ kmem_cache_create (const char *name, size_t size, size_t align,
                size += BYTES_PER_WORD;
        }
 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
-       if (size >= malloc_sizes[INDEX_L3+1].cs_size && cachep->reallen > cache_line_size() && size < PAGE_SIZE) {
-               cachep->dbghead += PAGE_SIZE - size;
+       if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
+           && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
+               cachep->obj_offset += PAGE_SIZE - size;
                size = PAGE_SIZE;
        }
 #endif
 #endif
 
        /* Determine if the slab management is 'on' or 'off' slab. */
-       if (size >= (PAGE_SIZE>>3))
+       if (size >= (PAGE_SIZE >> 3))
                /*
                 * Size is large, assume best to place the slab management obj
                 * off-slab (should allow better packing of objs).
@@ -1681,47 +1818,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
                 */
                cachep->gfporder = 0;
                cache_estimate(cachep->gfporder, size, align, flags,
-                                       &left_over, &cachep->num);
-       } else {
-               /*
-                * Calculate size (in pages) of slabs, and the num of objs per
-                * slab.  This could be made much more intelligent.  For now,
-                * try to avoid using high page-orders for slabs.  When the
-                * gfp() funcs are more friendly towards high-order requests,
-                * this should be changed.
-                */
-               do {
-                       unsigned int break_flag = 0;
-cal_wastage:
-                       cache_estimate(cachep->gfporder, size, align, flags,
-                                               &left_over, &cachep->num);
-                       if (break_flag)
-                               break;
-                       if (cachep->gfporder >= MAX_GFP_ORDER)
-                               break;
-                       if (!cachep->num)
-                               goto next;
-                       if (flags & CFLGS_OFF_SLAB &&
-                                       cachep->num > offslab_limit) {
-                               /* This num of objs will cause problems. */
-                               cachep->gfporder--;
-                               break_flag++;
-                               goto cal_wastage;
-                       }
-
-                       /*
-                        * Large num of objs is good, but v. large slabs are
-                        * currently bad for the gfp()s.
-                        */
-                       if (cachep->gfporder >= slab_break_gfp_order)
-                               break;
-
-                       if ((left_over*8) <= (PAGE_SIZE<<cachep->gfporder))
-                               break;  /* Acceptable internal fragmentation. */
-next:
-                       cachep->gfporder++;
-               } while (1);
-       }
+                              &left_over, &cachep->num);
+       } else
+               left_over = calculate_slab_order(cachep, size, align, flags);
 
        if (!cachep->num) {
                printk("kmem_cache_create: couldn't create cache %s.\n", name);
@@ -1729,8 +1828,8 @@ next:
                cachep = NULL;
                goto oops;
        }
-       slab_size = ALIGN(cachep->num*sizeof(kmem_bufctl_t)
-                               + sizeof(struct slab), align);
+       slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
+                         + sizeof(struct slab), align);
 
        /*
         * If the slab has been placed off-slab, and we have enough space then
@@ -1743,21 +1842,22 @@ next:
 
        if (flags & CFLGS_OFF_SLAB) {
                /* really off slab. No need for manual alignment */
-               slab_size = cachep->num*sizeof(kmem_bufctl_t)+sizeof(struct slab);
+               slab_size =
+                   cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
        }
 
        cachep->colour_off = cache_line_size();
        /* Offset must be a multiple of the alignment. */
        if (cachep->colour_off < align)
                cachep->colour_off = align;
-       cachep->colour = left_over/cachep->colour_off;
+       cachep->colour = left_over / cachep->colour_off;
        cachep->slab_size = slab_size;
        cachep->flags = flags;
        cachep->gfpflags = 0;
        if (flags & SLAB_CACHE_DMA)
                cachep->gfpflags |= GFP_DMA;
        spin_lock_init(&cachep->spinlock);
-       cachep->objsize = size;
+       cachep->buffer_size = size;
 
        if (flags & CFLGS_OFF_SLAB)
                cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
@@ -1777,7 +1877,7 @@ next:
                         * the creation of further caches will BUG().
                         */
                        cachep->array[smp_processor_id()] =
-                               &initarray_generic.cache;
+                           &initarray_generic.cache;
 
                        /* If the cache that's used by
                         * kmalloc(sizeof(kmem_list3)) is the first cache,
@@ -1791,8 +1891,7 @@ next:
                                g_cpucache_up = PARTIAL_AC;
                } else {
                        cachep->array[smp_processor_id()] =
-                               kmalloc(sizeof(struct arraycache_init),
-                                               GFP_KERNEL);
+                           kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
 
                        if (g_cpucache_up == PARTIAL_AC) {
                                set_up_list3s(cachep, SIZE_L3);
@@ -1802,34 +1901,36 @@ next:
                                for_each_online_node(node) {
 
                                        cachep->nodelists[node] =
-                                               kmalloc_node(sizeof(struct kmem_list3),
-                                                               GFP_KERNEL, node);
+                                           kmalloc_node(sizeof
+                                                        (struct kmem_list3),
+                                                        GFP_KERNEL, node);
                                        BUG_ON(!cachep->nodelists[node]);
-                                       kmem_list3_init(cachep->nodelists[node]);
+                                       kmem_list3_init(cachep->
+                                                       nodelists[node]);
                                }
                        }
                }
                cachep->nodelists[numa_node_id()]->next_reap =
-                       jiffies + REAPTIMEOUT_LIST3 +
-                       ((unsigned long)cachep)%REAPTIMEOUT_LIST3;
-
-               BUG_ON(!ac_data(cachep));
-               ac_data(cachep)->avail = 0;
-               ac_data(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
-               ac_data(cachep)->batchcount = 1;
-               ac_data(cachep)->touched = 0;
+                   jiffies + REAPTIMEOUT_LIST3 +
+                   ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+
+               BUG_ON(!cpu_cache_get(cachep));
+               cpu_cache_get(cachep)->avail = 0;
+               cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
+               cpu_cache_get(cachep)->batchcount = 1;
+               cpu_cache_get(cachep)->touched = 0;
                cachep->batchcount = 1;
                cachep->limit = BOOT_CPUCACHE_ENTRIES;
-       } 
+       }
 
        /* cache setup completed, link it into the list */
        list_add(&cachep->next, &cache_chain);
        unlock_cpu_hotplug();
-oops:
+      oops:
        if (!cachep && (flags & SLAB_PANIC))
                panic("kmem_cache_create(): failed to create slab `%s'\n",
-                       name);
-       up(&cache_chain_sem);
+                     name);
+       mutex_unlock(&cache_chain_mutex);
        return cachep;
 }
 EXPORT_SYMBOL(kmem_cache_create);
@@ -1845,7 +1946,7 @@ static void check_irq_on(void)
        BUG_ON(irqs_disabled());
 }
 
-static void check_spinlock_acquired(kmem_cache_t *cachep)
+static void check_spinlock_acquired(struct kmem_cache *cachep)
 {
 #ifdef CONFIG_SMP
        check_irq_off();
@@ -1853,7 +1954,7 @@ static void check_spinlock_acquired(kmem_cache_t *cachep)
 #endif
 }
 
-static inline void check_spinlock_acquired_node(kmem_cache_t *cachep, int node)
+static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
 {
 #ifdef CONFIG_SMP
        check_irq_off();
@@ -1871,7 +1972,7 @@ static inline void check_spinlock_acquired_node(kmem_cache_t *cachep, int node)
 /*
  * Waits for all CPUs to execute func().
  */
-static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
+static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg)
 {
        check_irq_on();
        preempt_disable();
@@ -1886,24 +1987,24 @@ static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
        preempt_enable();
 }
 
-static void drain_array_locked(kmem_cache_t* cachep,
-                               struct array_cache *ac, int force, int node);
+static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
+                               int force, int node);
 
 static void do_drain(void *arg)
 {
-       kmem_cache_t *cachep = (kmem_cache_t*)arg;
+       struct kmem_cache *cachep = (struct kmem_cache *) arg;
        struct array_cache *ac;
        int node = numa_node_id();
 
        check_irq_off();
-       ac = ac_data(cachep);
+       ac = cpu_cache_get(cachep);
        spin_lock(&cachep->nodelists[node]->list_lock);
        free_block(cachep, ac->entry, ac->avail, node);
        spin_unlock(&cachep->nodelists[node]->list_lock);
        ac->avail = 0;
 }
 
-static void drain_cpu_caches(kmem_cache_t *cachep)
+static void drain_cpu_caches(struct kmem_cache *cachep)
 {
        struct kmem_list3 *l3;
        int node;
@@ -1911,7 +2012,7 @@ static void drain_cpu_caches(kmem_cache_t *cachep)
        smp_call_function_all_cpus(do_drain, cachep);
        check_irq_on();
        spin_lock_irq(&cachep->spinlock);
-       for_each_online_node(node)  {
+       for_each_online_node(node) {
                l3 = cachep->nodelists[node];
                if (l3) {
                        spin_lock(&l3->list_lock);
@@ -1924,7 +2025,7 @@ static void drain_cpu_caches(kmem_cache_t *cachep)
        spin_unlock_irq(&cachep->spinlock);
 }
 
-static int __node_shrink(kmem_cache_t *cachep, int node)
+static int __node_shrink(struct kmem_cache *cachep, int node)
 {
        struct slab *slabp;
        struct kmem_list3 *l3 = cachep->nodelists[node];
@@ -1949,12 +2050,11 @@ static int __node_shrink(kmem_cache_t *cachep, int node)
                slab_destroy(cachep, slabp);
                spin_lock_irq(&l3->list_lock);
        }
-       ret = !list_empty(&l3->slabs_full) ||
-               !list_empty(&l3->slabs_partial);
+       ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial);
        return ret;
 }
 
-static int __cache_shrink(kmem_cache_t *cachep)
+static int __cache_shrink(struct kmem_cache *cachep)
 {
        int ret = 0, i = 0;
        struct kmem_list3 *l3;
@@ -1980,7 +2080,7 @@ static int __cache_shrink(kmem_cache_t *cachep)
  * Releases as many slabs as possible for a cache.
  * To help debugging, a zero exit status indicates all slabs were released.
  */
-int kmem_cache_shrink(kmem_cache_t *cachep)
+int kmem_cache_shrink(struct kmem_cache *cachep)
 {
        if (!cachep || in_interrupt())
                BUG();
@@ -1993,7 +2093,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
  * kmem_cache_destroy - delete a cache
  * @cachep: the cache to destroy
  *
- * Remove a kmem_cache_t object from the slab cache.
+ * Remove a struct kmem_cache object from the slab cache.
  * Returns 0 on success.
  *
  * It is expected this function will be called by a module when it is
@@ -2006,7 +2106,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
  * The caller must guarantee that noone will allocate memory from the cache
  * during the kmem_cache_destroy().
  */
-int kmem_cache_destroy(kmem_cache_t * cachep)
+int kmem_cache_destroy(struct kmem_cache *cachep)
 {
        int i;
        struct kmem_list3 *l3;
@@ -2018,18 +2118,18 @@ int kmem_cache_destroy(kmem_cache_t * cachep)
        lock_cpu_hotplug();
 
        /* Find the cache in the chain of caches. */
-       down(&cache_chain_sem);
+       mutex_lock(&cache_chain_mutex);
        /*
         * the chain is never empty, cache_cache is never destroyed
         */
        list_del(&cachep->next);
-       up(&cache_chain_sem);
+       mutex_unlock(&cache_chain_mutex);
 
        if (__cache_shrink(cachep)) {
                slab_error(cachep, "Can't free all objects");
-               down(&cache_chain_sem);
-               list_add(&cachep->next,&cache_chain);
-               up(&cache_chain_sem);
+               mutex_lock(&cache_chain_mutex);
+               list_add(&cachep->next, &cache_chain);
+               mutex_unlock(&cache_chain_mutex);
                unlock_cpu_hotplug();
                return 1;
        }
@@ -2038,7 +2138,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep)
                synchronize_rcu();
 
        for_each_online_cpu(i)
-               kfree(cachep->array[i]);
+           kfree(cachep->array[i]);
 
        /* NUMA: free the list3 structures */
        for_each_online_node(i) {
@@ -2057,39 +2157,39 @@ int kmem_cache_destroy(kmem_cache_t * cachep)
 EXPORT_SYMBOL(kmem_cache_destroy);
 
 /* Get the memory for a slab management obj. */
-static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp,
-                       int colour_off, gfp_t local_flags)
+static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
+                                  int colour_off, gfp_t local_flags)
 {
        struct slab *slabp;
-       
+
        if (OFF_SLAB(cachep)) {
                /* Slab management obj is off-slab. */
                slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);
                if (!slabp)
                        return NULL;
        } else {
-               slabp = objp+colour_off;
+               slabp = objp + colour_off;
                colour_off += cachep->slab_size;
        }
        slabp->inuse = 0;
        slabp->colouroff = colour_off;
-       slabp->s_mem = objp+colour_off;
+       slabp->s_mem = objp + colour_off;
 
        return slabp;
 }
 
 static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
 {
-       return (kmem_bufctl_t *)(slabp+1);
+       return (kmem_bufctl_t *) (slabp + 1);
 }
 
-static void cache_init_objs(kmem_cache_t *cachep,
-                       struct slab *slabp, unsigned long ctor_flags)
+static void cache_init_objs(struct kmem_cache *cachep,
+                           struct slab *slabp, unsigned long ctor_flags)
 {
        int i;
 
        for (i = 0; i < cachep->num; i++) {
-               void *objp = slabp->s_mem+cachep->objsize*i;
+               void *objp = slabp->s_mem + cachep->buffer_size * i;
 #if DEBUG
                /* need to poison the objs? */
                if (cachep->flags & SLAB_POISON)
@@ -2107,29 +2207,32 @@ static void cache_init_objs(kmem_cache_t *cachep,
                 * Otherwise, deadlock. They must also be threaded.
                 */
                if (cachep->ctor && !(cachep->flags & SLAB_POISON))
-                       cachep->ctor(objp+obj_dbghead(cachep), cachep, ctor_flags);
+                       cachep->ctor(objp + obj_offset(cachep), cachep,
+                                    ctor_flags);
 
                if (cachep->flags & SLAB_RED_ZONE) {
                        if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
                                slab_error(cachep, "constructor overwrote the"
-                                                       " end of an object");
+                                          " end of an object");
                        if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
                                slab_error(cachep, "constructor overwrote the"
-                                                       " start of an object");
+                                          " start of an object");
                }
-               if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
-                       kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0);
+               if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
+                   && cachep->flags & SLAB_POISON)
+                       kernel_map_pages(virt_to_page(objp),
+                                        cachep->buffer_size / PAGE_SIZE, 0);
 #else
                if (cachep->ctor)
                        cachep->ctor(objp, cachep, ctor_flags);
 #endif
-               slab_bufctl(slabp)[i] = i+1;
+               slab_bufctl(slabp)[i] = i + 1;
        }
-       slab_bufctl(slabp)[i-1] = BUFCTL_END;
+       slab_bufctl(slabp)[i - 1] = BUFCTL_END;
        slabp->free = 0;
 }
 
-static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags)
+static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
 {
        if (flags & SLAB_DMA) {
                if (!(cachep->gfpflags & GFP_DMA))
@@ -2140,7 +2243,43 @@ static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags)
        }
 }
 
-static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
+static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nodeid)
+{
+       void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size);
+       kmem_bufctl_t next;
+
+       slabp->inuse++;
+       next = slab_bufctl(slabp)[slabp->free];
+#if DEBUG
+       slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
+       WARN_ON(slabp->nodeid != nodeid);
+#endif
+       slabp->free = next;
+
+       return objp;
+}
+
+static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *objp,
+                         int nodeid)
+{
+       unsigned int objnr = (unsigned)(objp-slabp->s_mem) / cachep->buffer_size;
+
+#if DEBUG
+       /* Verify that the slab belongs to the intended node */
+       WARN_ON(slabp->nodeid != nodeid);
+
+       if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
+               printk(KERN_ERR "slab: double free detected in cache "
+                      "'%s', objp %p\n", cachep->name, objp);
+               BUG();
+       }
+#endif
+       slab_bufctl(slabp)[objnr] = slabp->free;
+       slabp->free = objnr;
+       slabp->inuse--;
+}
+
+static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp, void *objp)
 {
        int i;
        struct page *page;
@@ -2159,19 +2298,19 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
  * Grow (by 1) the number of slabs within a cache.  This is called by
  * kmem_cache_alloc() when there are no active objs left in a cache.
  */
-static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
-       struct slab     *slabp;
-       void            *objp;
-       size_t           offset;
-       gfp_t            local_flags;
-       unsigned long    ctor_flags;
+       struct slab *slabp;
+       void *objp;
+       size_t offset;
+       gfp_t local_flags;
+       unsigned long ctor_flags;
        struct kmem_list3 *l3;
 
        /* Be lazy and only check for valid flags here,
-        * keeping it out of the critical path in kmem_cache_alloc().
+        * keeping it out of the critical path in kmem_cache_alloc().
         */
-       if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW))
+       if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW))
                BUG();
        if (flags & SLAB_NO_GROW)
                return 0;
@@ -2237,9 +2376,9 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
        l3->free_objects += cachep->num;
        spin_unlock(&l3->list_lock);
        return 1;
-opps1:
+      opps1:
        kmem_freepages(cachep, objp);
-failed:
+      failed:
        if (local_flags & __GFP_WAIT)
                local_irq_disable();
        return 0;
@@ -2259,42 +2398,49 @@ static void kfree_debugcheck(const void *objp)
 
        if (!virt_addr_valid(objp)) {
                printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
-                       (unsigned long)objp);   
-               BUG();  
+                      (unsigned long)objp);
+               BUG();
        }
        page = virt_to_page(objp);
        if (!PageSlab(page)) {
-               printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", (unsigned long)objp);
+               printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n",
+                      (unsigned long)objp);
                BUG();
        }
 }
 
-static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
-                                       void *caller)
+static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
+                                  void *caller)
 {
        struct page *page;
        unsigned int objnr;
        struct slab *slabp;
 
-       objp -= obj_dbghead(cachep);
+       objp -= obj_offset(cachep);
        kfree_debugcheck(objp);
        page = virt_to_page(objp);
 
        if (page_get_cache(page) != cachep) {
-               printk(KERN_ERR "mismatch in kmem_cache_free: expected cache %p, got %p\n",
-                               page_get_cache(page),cachep);
+               printk(KERN_ERR
+                      "mismatch in kmem_cache_free: expected cache %p, got %p\n",
+                      page_get_cache(page), cachep);
                printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
-               printk(KERN_ERR "%p is %s.\n", page_get_cache(page), page_get_cache(page)->name);
+               printk(KERN_ERR "%p is %s.\n", page_get_cache(page),
+                      page_get_cache(page)->name);
                WARN_ON(1);
        }
        slabp = page_get_slab(page);
 
        if (cachep->flags & SLAB_RED_ZONE) {
-               if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
-                       slab_error(cachep, "double free, or memory outside"
-                                               " object was overwritten");
-                       printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
-                                       objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp));
+               if (*dbg_redzone1(cachep, objp) != RED_ACTIVE
+                   || *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
+                       slab_error(cachep,
+                                  "double free, or memory outside"
+                                  " object was overwritten");
+                       printk(KERN_ERR
+                              "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
+                              objp, *dbg_redzone1(cachep, objp),
+                              *dbg_redzone2(cachep, objp));
                }
                *dbg_redzone1(cachep, objp) = RED_INACTIVE;
                *dbg_redzone2(cachep, objp) = RED_INACTIVE;
@@ -2302,30 +2448,31 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
        if (cachep->flags & SLAB_STORE_USER)
                *dbg_userword(cachep, objp) = caller;
 
-       objnr = (objp-slabp->s_mem)/cachep->objsize;
+       objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
 
        BUG_ON(objnr >= cachep->num);
-       BUG_ON(objp != slabp->s_mem + objnr*cachep->objsize);
+       BUG_ON(objp != slabp->s_mem + objnr * cachep->buffer_size);
 
        if (cachep->flags & SLAB_DEBUG_INITIAL) {
                /* Need to call the slab's constructor so the
                 * caller can perform a verify of its state (debugging).
                 * Called without the cache-lock held.
                 */
-               cachep->ctor(objp+obj_dbghead(cachep),
-                                       cachep, SLAB_CTOR_CONSTRUCTOR|SLAB_CTOR_VERIFY);
+               cachep->ctor(objp + obj_offset(cachep),
+                            cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
        }
        if (cachep->flags & SLAB_POISON && cachep->dtor) {
                /* we want to cache poison the object,
                 * call the destruction callback
                 */
-               cachep->dtor(objp+obj_dbghead(cachep), cachep, 0);
+               cachep->dtor(objp + obj_offset(cachep), cachep, 0);
        }
        if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
-               if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
+               if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
                        store_stackinfo(cachep, objp, (unsigned long)caller);
-                       kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0);
+                       kernel_map_pages(virt_to_page(objp),
+                                        cachep->buffer_size / PAGE_SIZE, 0);
                } else {
                        poison_obj(cachep, objp, POISON_FREE);
                }
@@ -2336,11 +2483,11 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
        return objp;
 }
 
-static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
+static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
 {
        kmem_bufctl_t i;
        int entries = 0;
-       
+
        /* Check slab's freelist to see if this obj is there. */
        for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
                entries++;
@@ -2348,13 +2495,16 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
                        goto bad;
        }
        if (entries != cachep->num - slabp->inuse) {
-bad:
-               printk(KERN_ERR "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
-                               cachep->name, cachep->num, slabp, slabp->inuse);
-               for (i=0;i<sizeof(slabp)+cachep->num*sizeof(kmem_bufctl_t);i++) {
-                       if ((i%16)==0)
+             bad:
+               printk(KERN_ERR
+                      "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
+                      cachep->name, cachep->num, slabp, slabp->inuse);
+               for (i = 0;
+                    i < sizeof(slabp) + cachep->num * sizeof(kmem_bufctl_t);
+                    i++) {
+                       if ((i % 16) == 0)
                                printk("\n%03x:", i);
-                       printk(" %02x", ((unsigned char*)slabp)[i]);
+                       printk(" %02x", ((unsigned char *)slabp)[i]);
                }
                printk("\n");
                BUG();
@@ -2366,15 +2516,15 @@ bad:
 #define check_slabp(x,y) do { } while(0)
 #endif
 
-static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
+static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
 {
        int batchcount;
        struct kmem_list3 *l3;
        struct array_cache *ac;
 
        check_irq_off();
-       ac = ac_data(cachep);
-retry:
+       ac = cpu_cache_get(cachep);
+      retry:
        batchcount = ac->batchcount;
        if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
                /* if there was little recent activity on this
@@ -2396,8 +2546,8 @@ retry:
                        shared_array->avail -= batchcount;
                        ac->avail = batchcount;
                        memcpy(ac->entry,
-                               &(shared_array->entry[shared_array->avail]),
-                               sizeof(void*)*batchcount);
+                              &(shared_array->entry[shared_array->avail]),
+                              sizeof(void *) * batchcount);
                        shared_array->touched = 1;
                        goto alloc_done;
                }
@@ -2418,22 +2568,12 @@ retry:
                check_slabp(cachep, slabp);
                check_spinlock_acquired(cachep);
                while (slabp->inuse < cachep->num && batchcount--) {
-                       kmem_bufctl_t next;
                        STATS_INC_ALLOCED(cachep);
                        STATS_INC_ACTIVE(cachep);
                        STATS_SET_HIGH(cachep);
 
-                       /* get obj pointer */
-                       ac->entry[ac->avail++] = slabp->s_mem +
-                               slabp->free*cachep->objsize;
-
-                       slabp->inuse++;
-                       next = slab_bufctl(slabp)[slabp->free];
-#if DEBUG
-                       slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
-                       WARN_ON(numa_node_id() != slabp->nodeid);
-#endif
-                       slabp->free = next;
+                       ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
+                                                           numa_node_id());
                }
                check_slabp(cachep, slabp);
 
@@ -2445,9 +2585,9 @@ retry:
                        list_add(&slabp->list, &l3->slabs_partial);
        }
 
-must_grow:
+      must_grow:
        l3->free_objects -= ac->avail;
-alloc_done:
+      alloc_done:
        spin_unlock(&l3->list_lock);
 
        if (unlikely(!ac->avail)) {
@@ -2455,11 +2595,11 @@ alloc_done:
                x = cache_grow(cachep, flags, numa_node_id());
 
                // cache_grow can reenable interrupts, then ac could change.
-               ac = ac_data(cachep);
+               ac = cpu_cache_get(cachep);
                if (!x && ac->avail == 0)       // no objects in sight? abort
                        return NULL;
 
-               if (!ac->avail)         // objects refilled by interrupt?
+               if (!ac->avail) // objects refilled by interrupt?
                        goto retry;
        }
        ac->touched = 1;
@@ -2467,7 +2607,7 @@ alloc_done:
 }
 
 static inline void
-cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags)
+cache_alloc_debugcheck_before(struct kmem_cache *cachep, gfp_t flags)
 {
        might_sleep_if(flags & __GFP_WAIT);
 #if DEBUG
@@ -2476,16 +2616,16 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags)
 }
 
 #if DEBUG
-static void *
-cache_alloc_debugcheck_after(kmem_cache_t *cachep,
-                       gfp_t flags, void *objp, void *caller)
+static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, gfp_t flags,
+                                       void *objp, void *caller)
 {
-       if (!objp)      
+       if (!objp)
                return objp;
-       if (cachep->flags & SLAB_POISON) {
+       if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
-               if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
-                       kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 1);
+               if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
+                       kernel_map_pages(virt_to_page(objp),
+                                        cachep->buffer_size / PAGE_SIZE, 1);
                else
                        check_poison_obj(cachep, objp);
 #else
@@ -2497,37 +2637,50 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep,
                *dbg_userword(cachep, objp) = caller;
 
        if (cachep->flags & SLAB_RED_ZONE) {
-               if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
-                       slab_error(cachep, "double free, or memory outside"
-                                               " object was overwritten");
-                       printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
-                                       objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp));
+               if (*dbg_redzone1(cachep, objp) != RED_INACTIVE
+                   || *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
+                       slab_error(cachep,
+                                  "double free, or memory outside"
+                                  " object was overwritten");
+                       printk(KERN_ERR
+                              "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
+                              objp, *dbg_redzone1(cachep, objp),
+                              *dbg_redzone2(cachep, objp));
                }
                *dbg_redzone1(cachep, objp) = RED_ACTIVE;
                *dbg_redzone2(cachep, objp) = RED_ACTIVE;
        }
-       objp += obj_dbghead(cachep);
+       objp += obj_offset(cachep);
        if (cachep->ctor && cachep->flags & SLAB_POISON) {
-               unsigned long   ctor_flags = SLAB_CTOR_CONSTRUCTOR;
+               unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
 
                if (!(flags & __GFP_WAIT))
                        ctor_flags |= SLAB_CTOR_ATOMIC;
 
                cachep->ctor(objp, cachep, ctor_flags);
-       }       
+       }
        return objp;
 }
 #else
 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
 #endif
 
-static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
+static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 {
-       voidobjp;
+       void *objp;
        struct array_cache *ac;
 
+#ifdef CONFIG_NUMA
+       if (unlikely(current->mempolicy && !in_interrupt())) {
+               int nid = slab_node(current->mempolicy);
+
+               if (nid != numa_node_id())
+                       return __cache_alloc_node(cachep, flags, nid);
+       }
+#endif
+
        check_irq_off();
-       ac = ac_data(cachep);
+       ac = cpu_cache_get(cachep);
        if (likely(ac->avail)) {
                STATS_INC_ALLOCHIT(cachep);
                ac->touched = 1;
@@ -2539,10 +2692,11 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
        return objp;
 }
 
-static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
+static __always_inline void *
+__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
 {
        unsigned long save_flags;
-       voidobjp;
+       void *objp;
 
        cache_alloc_debugcheck_before(cachep, flags);
 
@@ -2550,7 +2704,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
        objp = ____cache_alloc(cachep, flags);
        local_irq_restore(save_flags);
        objp = cache_alloc_debugcheck_after(cachep, flags, objp,
-                                       __builtin_return_address(0));
+                                           caller);
        prefetchw(objp);
        return objp;
 }
@@ -2559,77 +2713,70 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
 /*
  * A interface to enable slab creation on nodeid
  */
-static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
        struct list_head *entry;
-       struct slab *slabp;
-       struct kmem_list3 *l3;
-       void *obj;
-       kmem_bufctl_t next;
-       int x;
-
-       l3 = cachep->nodelists[nodeid];
-       BUG_ON(!l3);
-
-retry:
-       spin_lock(&l3->list_lock);
-       entry = l3->slabs_partial.next;
-       if (entry == &l3->slabs_partial) {
-               l3->free_touched = 1;
-               entry = l3->slabs_free.next;
-               if (entry == &l3->slabs_free)
-                       goto must_grow;
-       }
-
-       slabp = list_entry(entry, struct slab, list);
-       check_spinlock_acquired_node(cachep, nodeid);
-       check_slabp(cachep, slabp);
-
-       STATS_INC_NODEALLOCS(cachep);
-       STATS_INC_ACTIVE(cachep);
-       STATS_SET_HIGH(cachep);
-
-       BUG_ON(slabp->inuse == cachep->num);
-
-       /* get obj pointer */
-       obj =  slabp->s_mem + slabp->free*cachep->objsize;
-       slabp->inuse++;
-       next = slab_bufctl(slabp)[slabp->free];
-#if DEBUG
-       slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
-#endif
-       slabp->free = next;
-       check_slabp(cachep, slabp);
-       l3->free_objects--;
-       /* move slabp to correct slabp list: */
-       list_del(&slabp->list);
+       struct slab *slabp;
+       struct kmem_list3 *l3;
+       void *obj;
+       int x;
+
+       l3 = cachep->nodelists[nodeid];
+       BUG_ON(!l3);
+
+      retry:
+       spin_lock(&l3->list_lock);
+       entry = l3->slabs_partial.next;
+       if (entry == &l3->slabs_partial) {
+               l3->free_touched = 1;
+               entry = l3->slabs_free.next;
+               if (entry == &l3->slabs_free)
+                       goto must_grow;
+       }
+
+       slabp = list_entry(entry, struct slab, list);
+       check_spinlock_acquired_node(cachep, nodeid);
+       check_slabp(cachep, slabp);
 
-       if (slabp->free == BUFCTL_END) {
-               list_add(&slabp->list, &l3->slabs_full);
-       } else {
-               list_add(&slabp->list, &l3->slabs_partial);
-       }
+       STATS_INC_NODEALLOCS(cachep);
+       STATS_INC_ACTIVE(cachep);
+       STATS_SET_HIGH(cachep);
 
-       spin_unlock(&l3->list_lock);
-       goto done;
+       BUG_ON(slabp->inuse == cachep->num);
 
-must_grow:
-       spin_unlock(&l3->list_lock);
-       x = cache_grow(cachep, flags, nodeid);
+       obj = slab_get_obj(cachep, slabp, nodeid);
+       check_slabp(cachep, slabp);
+       l3->free_objects--;
+       /* move slabp to correct slabp list: */
+       list_del(&slabp->list);
 
-       if (!x)
-               return NULL;
+       if (slabp->free == BUFCTL_END) {
+               list_add(&slabp->list, &l3->slabs_full);
+       } else {
+               list_add(&slabp->list, &l3->slabs_partial);
+       }
+
+       spin_unlock(&l3->list_lock);
+       goto done;
 
-       goto retry;
-done:
-       return obj;
+      must_grow:
+       spin_unlock(&l3->list_lock);
+       x = cache_grow(cachep, flags, nodeid);
+
+       if (!x)
+               return NULL;
+
+       goto retry;
+      done:
+       return obj;
 }
 #endif
 
 /*
  * Caller needs to acquire correct kmem_list's list_lock
  */
-static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node)
+static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
+                      int node)
 {
        int i;
        struct kmem_list3 *l3;
@@ -2637,29 +2784,14 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int n
        for (i = 0; i < nr_objects; i++) {
                void *objp = objpp[i];
                struct slab *slabp;
-               unsigned int objnr;
 
-               slabp = page_get_slab(virt_to_page(objp));
+               slabp = virt_to_slab(objp);
                l3 = cachep->nodelists[node];
                list_del(&slabp->list);
-               objnr = (objp - slabp->s_mem) / cachep->objsize;
                check_spinlock_acquired_node(cachep, node);
                check_slabp(cachep, slabp);
-
-#if DEBUG
-               /* Verify that the slab belongs to the intended node */
-               WARN_ON(slabp->nodeid != node);
-
-               if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
-                       printk(KERN_ERR "slab: double free detected in cache "
-                                       "'%s', objp %p\n", cachep->name, objp);
-                       BUG();
-               }
-#endif
-               slab_bufctl(slabp)[objnr] = slabp->free;
-               slabp->free = objnr;
+               slab_put_obj(cachep, slabp, objp, node);
                STATS_DEC_ACTIVE(cachep);
-               slabp->inuse--;
                l3->free_objects++;
                check_slabp(cachep, slabp);
 
@@ -2681,7 +2813,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int n
        }
 }
 
-static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
+static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
 {
        int batchcount;
        struct kmem_list3 *l3;
@@ -2696,20 +2828,19 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
        spin_lock(&l3->list_lock);
        if (l3->shared) {
                struct array_cache *shared_array = l3->shared;
-               int max = shared_array->limit-shared_array->avail;
+               int max = shared_array->limit - shared_array->avail;
                if (max) {
                        if (batchcount > max)
                                batchcount = max;
                        memcpy(&(shared_array->entry[shared_array->avail]),
-                                       ac->entry,
-                                       sizeof(void*)*batchcount);
+                              ac->entry, sizeof(void *) * batchcount);
                        shared_array->avail += batchcount;
                        goto free_done;
                }
        }
 
        free_block(cachep, ac->entry, batchcount, node);
-free_done:
+      free_done:
 #if STATS
        {
                int i = 0;
@@ -2731,10 +2862,9 @@ free_done:
        spin_unlock(&l3->list_lock);
        ac->avail -= batchcount;
        memmove(ac->entry, &(ac->entry[batchcount]),
-                       sizeof(void*)*ac->avail);
+               sizeof(void *) * ac->avail);
 }
 
-
 /*
  * __cache_free
  * Release an obj back to its cache. If the obj has a constructed
@@ -2742,9 +2872,9 @@ free_done:
  *
  * Called with disabled ints.
  */
-static inline void __cache_free(kmem_cache_t *cachep, void *objp)
+static inline void __cache_free(struct kmem_cache *cachep, void *objp)
 {
-       struct array_cache *ac = ac_data(cachep);
+       struct array_cache *ac = cpu_cache_get(cachep);
 
        check_irq_off();
        objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
@@ -2755,11 +2885,12 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
 #ifdef CONFIG_NUMA
        {
                struct slab *slabp;
-               slabp = page_get_slab(virt_to_page(objp));
+               slabp = virt_to_slab(objp);
                if (unlikely(slabp->nodeid != numa_node_id())) {
                        struct array_cache *alien = NULL;
                        int nodeid = slabp->nodeid;
-                       struct kmem_list3 *l3 = cachep->nodelists[numa_node_id()];
+                       struct kmem_list3 *l3 =
+                           cachep->nodelists[numa_node_id()];
 
                        STATS_INC_NODEFREES(cachep);
                        if (l3->alien && l3->alien[nodeid]) {
@@ -2767,15 +2898,15 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
                                spin_lock(&alien->lock);
                                if (unlikely(alien->avail == alien->limit))
                                        __drain_alien_cache(cachep,
-                                                       alien, nodeid);
+                                                           alien, nodeid);
                                alien->entry[alien->avail++] = objp;
                                spin_unlock(&alien->lock);
                        } else {
                                spin_lock(&(cachep->nodelists[nodeid])->
-                                               list_lock);
+                                         list_lock);
                                free_block(cachep, &objp, 1, nodeid);
                                spin_unlock(&(cachep->nodelists[nodeid])->
-                                               list_lock);
+                                           list_lock);
                        }
                        return;
                }
@@ -2800,9 +2931,9 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
  * Allocate an object from this cache.  The flags are only relevant
  * if the cache has no available objects.
  */
-void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags)
+void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 {
-       return __cache_alloc(cachep, flags);
+       return __cache_alloc(cachep, flags, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(kmem_cache_alloc);
 
@@ -2820,12 +2951,12 @@ EXPORT_SYMBOL(kmem_cache_alloc);
  *
  * Currently only used for dentry validation.
  */
-int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
+int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr)
 {
-       unsigned long addr = (unsigned long) ptr;
+       unsigned long addr = (unsigned long)ptr;
        unsigned long min_addr = PAGE_OFFSET;
-       unsigned long align_mask = BYTES_PER_WORD-1;
-       unsigned long size = cachep->objsize;
+       unsigned long align_mask = BYTES_PER_WORD - 1;
+       unsigned long size = cachep->buffer_size;
        struct page *page;
 
        if (unlikely(addr < min_addr))
@@ -2844,7 +2975,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
        if (unlikely(page_get_cache(page) != cachep))
                goto out;
        return 1;
-out:
+      out:
        return 0;
 }
 
@@ -2861,28 +2992,23 @@ out:
  * New and improved: it will now make sure that the object gets
  * put on the correct node list so that there is no false sharing.
  */
-void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
        unsigned long save_flags;
        void *ptr;
 
-       if (nodeid == -1)
-               return __cache_alloc(cachep, flags);
-
-       if (unlikely(!cachep->nodelists[nodeid])) {
-               /* Fall back to __cache_alloc if we run into trouble */
-               printk(KERN_WARNING "slab: not allocating in inactive node %d for cache %s\n", nodeid, cachep->name);
-               return __cache_alloc(cachep,flags);
-       }
-
        cache_alloc_debugcheck_before(cachep, flags);
        local_irq_save(save_flags);
-       if (nodeid == numa_node_id())
+
+       if (nodeid == -1 || nodeid == numa_node_id() ||
+           !cachep->nodelists[nodeid])
                ptr = ____cache_alloc(cachep, flags);
        else
                ptr = __cache_alloc_node(cachep, flags, nodeid);
        local_irq_restore(save_flags);
-       ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0));
+
+       ptr = cache_alloc_debugcheck_after(cachep, flags, ptr,
+                                          __builtin_return_address(0));
 
        return ptr;
 }
@@ -2890,7 +3016,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
 
 void *kmalloc_node(size_t size, gfp_t flags, int node)
 {
-       kmem_cache_t *cachep;
+       struct kmem_cache *cachep;
 
        cachep = kmem_find_general_cachep(size, flags);
        if (unlikely(cachep == NULL))
@@ -2921,9 +3047,10 @@ EXPORT_SYMBOL(kmalloc_node);
  * platforms.  For example, on i386, it means that the memory must come
  * from the first 16MB.
  */
-void *__kmalloc(size_t size, gfp_t flags)
+static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
+                                         void *caller)
 {
-       kmem_cache_t *cachep;
+       struct kmem_cache *cachep;
 
        /* If you want to save a few bytes .text space: replace
         * __ with kmem_.
@@ -2933,10 +3060,27 @@ void *__kmalloc(size_t size, gfp_t flags)
        cachep = __find_general_cachep(size, flags);
        if (unlikely(cachep == NULL))
                return NULL;
-       return __cache_alloc(cachep, flags);
+       return __cache_alloc(cachep, flags, caller);
+}
+
+#ifndef CONFIG_DEBUG_SLAB
+
+void *__kmalloc(size_t size, gfp_t flags)
+{
+       return __do_kmalloc(size, flags, NULL);
 }
 EXPORT_SYMBOL(__kmalloc);
 
+#else
+
+void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
+{
+       return __do_kmalloc(size, flags, caller);
+}
+EXPORT_SYMBOL(__kmalloc_track_caller);
+
+#endif
+
 #ifdef CONFIG_SMP
 /**
  * __alloc_percpu - allocate one copy of the object for every present
@@ -2944,12 +3088,11 @@ EXPORT_SYMBOL(__kmalloc);
  * Objects should be dereferenced using the per_cpu_ptr macro only.
  *
  * @size: how many bytes of memory are required.
- * @align: the alignment, which can't be greater than SMP_CACHE_BYTES.
  */
-void *__alloc_percpu(size_t size, size_t align)
+void *__alloc_percpu(size_t size)
 {
        int i;
-       struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL);
+       struct percpu_data *pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
 
        if (!pdata)
                return NULL;
@@ -2973,9 +3116,9 @@ void *__alloc_percpu(size_t size, size_t align)
        }
 
        /* Catch derefs w/o wrappers */
-       return (void *) (~(unsigned long) pdata);
+       return (void *)(~(unsigned long)pdata);
 
-unwind_oom:
+      unwind_oom:
        while (--i >= 0) {
                if (!cpu_possible(i))
                        continue;
@@ -2995,7 +3138,7 @@ EXPORT_SYMBOL(__alloc_percpu);
  * Free an object which was previously allocated from this
  * cache.
  */
-void kmem_cache_free(kmem_cache_t *cachep, void *objp)
+void kmem_cache_free(struct kmem_cache *cachep, void *objp)
 {
        unsigned long flags;
 
@@ -3005,20 +3148,6 @@ void kmem_cache_free(kmem_cache_t *cachep, void *objp)
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
-/**
- * kzalloc - allocate memory. The memory is set to zero.
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- */
-void *kzalloc(size_t size, gfp_t flags)
-{
-       void *ret = kmalloc(size, flags);
-       if (ret)
-               memset(ret, 0, size);
-       return ret;
-}
-EXPORT_SYMBOL(kzalloc);
-
 /**
  * kfree - free previously allocated memory
  * @objp: pointer returned by kmalloc.
@@ -3030,15 +3159,16 @@ EXPORT_SYMBOL(kzalloc);
  */
 void kfree(const void *objp)
 {
-       kmem_cache_t *c;
+       struct kmem_cache *c;
        unsigned long flags;
 
        if (unlikely(!objp))
                return;
        local_irq_save(flags);
        kfree_debugcheck(objp);
-       c = page_get_cache(virt_to_page(objp));
-       __cache_free(c, (void*)objp);
+       c = virt_to_cache(objp);
+       mutex_debug_check_no_locks_freed(objp, obj_size(c));
+       __cache_free(c, (void *)objp);
        local_irq_restore(flags);
 }
 EXPORT_SYMBOL(kfree);
@@ -3051,29 +3181,28 @@ EXPORT_SYMBOL(kfree);
  * Don't free memory not originally allocated by alloc_percpu()
  * The complemented objp is to check for that.
  */
-void
-free_percpu(const void *objp)
+void free_percpu(const void *objp)
 {
        int i;
-       struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp);
+       struct percpu_data *p = (struct percpu_data *)(~(unsigned long)objp);
 
        /*
         * We allocate for all cpus so we cannot use for online cpu here.
         */
        for_each_cpu(i)
-               kfree(p->ptrs[i]);
+           kfree(p->ptrs[i]);
        kfree(p);
 }
 EXPORT_SYMBOL(free_percpu);
 #endif
 
-unsigned int kmem_cache_size(kmem_cache_t *cachep)
+unsigned int kmem_cache_size(struct kmem_cache *cachep)
 {
-       return obj_reallen(cachep);
+       return obj_size(cachep);
 }
 EXPORT_SYMBOL(kmem_cache_size);
 
-const char *kmem_cache_name(kmem_cache_t *cachep)
+const char *kmem_cache_name(struct kmem_cache *cachep)
 {
        return cachep->name;
 }
@@ -3082,7 +3211,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name);
 /*
  * This initializes kmem_list3 for all nodes.
  */
-static int alloc_kmemlist(kmem_cache_t *cachep)
+static int alloc_kmemlist(struct kmem_cache *cachep)
 {
        int node;
        struct kmem_list3 *l3;
@@ -3095,50 +3224,50 @@ static int alloc_kmemlist(kmem_cache_t *cachep)
                if (!(new_alien = alloc_alien_cache(node, cachep->limit)))
                        goto fail;
 #endif
-               if (!(new = alloc_arraycache(node, (cachep->shared*
-                               cachep->batchcount), 0xbaadf00d)))
+               if (!(new = alloc_arraycache(node, (cachep->shared *
+                                                   cachep->batchcount),
+                                            0xbaadf00d)))
                        goto fail;
                if ((l3 = cachep->nodelists[node])) {
 
                        spin_lock_irq(&l3->list_lock);
 
                        if ((nc = cachep->nodelists[node]->shared))
-                               free_block(cachep, nc->entry,
-                                                       nc->avail, node);
+                               free_block(cachep, nc->entry, nc->avail, node);
 
                        l3->shared = new;
                        if (!cachep->nodelists[node]->alien) {
                                l3->alien = new_alien;
                                new_alien = NULL;
                        }
-                       l3->free_limit = (1 + nr_cpus_node(node))*
-                               cachep->batchcount + cachep->num;
+                       l3->free_limit = (1 + nr_cpus_node(node)) *
+                           cachep->batchcount + cachep->num;
                        spin_unlock_irq(&l3->list_lock);
                        kfree(nc);
                        free_alien_cache(new_alien);
                        continue;
                }
                if (!(l3 = kmalloc_node(sizeof(struct kmem_list3),
-                                               GFP_KERNEL, node)))
+                                       GFP_KERNEL, node)))
                        goto fail;
 
                kmem_list3_init(l3);
                l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
-                       ((unsigned long)cachep)%REAPTIMEOUT_LIST3;
+                   ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
                l3->shared = new;
                l3->alien = new_alien;
-               l3->free_limit = (1 + nr_cpus_node(node))*
-                       cachep->batchcount + cachep->num;
+               l3->free_limit = (1 + nr_cpus_node(node)) *
+                   cachep->batchcount + cachep->num;
                cachep->nodelists[node] = l3;
        }
        return err;
-fail:
+      fail:
        err = -ENOMEM;
        return err;
 }
 
 struct ccupdate_struct {
-       kmem_cache_t *cachep;
+       struct kmem_cache *cachep;
        struct array_cache *new[NR_CPUS];
 };
 
@@ -3148,24 +3277,25 @@ static void do_ccupdate_local(void *info)
        struct array_cache *old;
 
        check_irq_off();
-       old = ac_data(new->cachep);
+       old = cpu_cache_get(new->cachep);
 
        new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
        new->new[smp_processor_id()] = old;
 }
 
-
-static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
-                               int shared)
+static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount,
+                           int shared)
 {
        struct ccupdate_struct new;
        int i, err;
 
-       memset(&new.new,0,sizeof(new.new));
+       memset(&new.new, 0, sizeof(new.new));
        for_each_online_cpu(i) {
-               new.new[i] = alloc_arraycache(cpu_to_node(i), limit, batchcount);
+               new.new[i] =
+                   alloc_arraycache(cpu_to_node(i), limit, batchcount);
                if (!new.new[i]) {
-                       for (i--; i >= 0; i--) kfree(new.new[i]);
+                       for (i--; i >= 0; i--)
+                               kfree(new.new[i]);
                        return -ENOMEM;
                }
        }
@@ -3193,14 +3323,13 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
        err = alloc_kmemlist(cachep);
        if (err) {
                printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n",
-                               cachep->name, -err);
+                      cachep->name, -err);
                BUG();
        }
        return 0;
 }
 
-
-static void enable_cpucache(kmem_cache_t *cachep)
+static void enable_cpucache(struct kmem_cache *cachep)
 {
        int err;
        int limit, shared;
@@ -3213,13 +3342,13 @@ static void enable_cpucache(kmem_cache_t *cachep)
         * The numbers are guessed, we should auto-tune as described by
         * Bonwick.
         */
-       if (cachep->objsize > 131072)
+       if (cachep->buffer_size > 131072)
                limit = 1;
-       else if (cachep->objsize > PAGE_SIZE)
+       else if (cachep->buffer_size > PAGE_SIZE)
                limit = 8;
-       else if (cachep->objsize > 1024)
+       else if (cachep->buffer_size > 1024)
                limit = 24;
-       else if (cachep->objsize > 256)
+       else if (cachep->buffer_size > 256)
                limit = 54;
        else
                limit = 120;
@@ -3234,7 +3363,7 @@ static void enable_cpucache(kmem_cache_t *cachep)
         */
        shared = 0;
 #ifdef CONFIG_SMP
-       if (cachep->objsize <= PAGE_SIZE)
+       if (cachep->buffer_size <= PAGE_SIZE)
                shared = 8;
 #endif
 
@@ -3246,14 +3375,14 @@ static void enable_cpucache(kmem_cache_t *cachep)
        if (limit > 32)
                limit = 32;
 #endif
-       err = do_tune_cpucache(cachep, limit, (limit+1)/2, shared);
+       err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
        if (err)
                printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
-                                       cachep->name, -err);
+                      cachep->name, -err);
 }
 
-static void drain_array_locked(kmem_cache_t *cachep,
-                               struct array_cache *ac, int force, int node)
+static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
+                               int force, int node)
 {
        int tofree;
 
@@ -3261,14 +3390,14 @@ static void drain_array_locked(kmem_cache_t *cachep,
        if (ac->touched && !force) {
                ac->touched = 0;
        } else if (ac->avail) {
-               tofree = force ? ac->avail : (ac->limit+4)/5;
+               tofree = force ? ac->avail : (ac->limit + 4) / 5;
                if (tofree > ac->avail) {
-                       tofree = (ac->avail+1)/2;
+                       tofree = (ac->avail + 1) / 2;
                }
                free_block(cachep, ac->entry, tofree, node);
                ac->avail -= tofree;
                memmove(ac->entry, &(ac->entry[tofree]),
-                                       sizeof(void*)*ac->avail);
+                       sizeof(void *) * ac->avail);
        }
 }
 
@@ -3281,7 +3410,7 @@ static void drain_array_locked(kmem_cache_t *cachep,
  * - clear the per-cpu caches for this CPU.
  * - return freeable pages to the main free memory pool.
  *
- * If we cannot acquire the cache chain semaphore then just give up - we'll
+ * If we cannot acquire the cache chain mutex then just give up - we'll
  * try again on the next iteration.
  */
 static void cache_reap(void *unused)
@@ -3289,19 +3418,20 @@ static void cache_reap(void *unused)
        struct list_head *walk;
        struct kmem_list3 *l3;
 
-       if (down_trylock(&cache_chain_sem)) {
+       if (!mutex_trylock(&cache_chain_mutex)) {
                /* Give up. Setup the next iteration. */
-               schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
+               schedule_delayed_work(&__get_cpu_var(reap_work),
+                                     REAPTIMEOUT_CPUC);
                return;
        }
 
        list_for_each(walk, &cache_chain) {
-               kmem_cache_t *searchp;
-               struct list_headp;
+               struct kmem_cache *searchp;
+               struct list_head *p;
                int tofree;
                struct slab *slabp;
 
-               searchp = list_entry(walk, kmem_cache_t, next);
+               searchp = list_entry(walk, struct kmem_cache, next);
 
                if (searchp->flags & SLAB_NO_REAP)
                        goto next;
@@ -3313,8 +3443,8 @@ static void cache_reap(void *unused)
                        drain_alien_cache(searchp, l3);
                spin_lock_irq(&l3->list_lock);
 
-               drain_array_locked(searchp, ac_data(searchp), 0,
-                               numa_node_id());
+               drain_array_locked(searchp, cpu_cache_get(searchp), 0,
+                                  numa_node_id());
 
                if (time_after(l3->next_reap, jiffies))
                        goto next_unlock;
@@ -3323,14 +3453,16 @@ static void cache_reap(void *unused)
 
                if (l3->shared)
                        drain_array_locked(searchp, l3->shared, 0,
-                               numa_node_id());
+                                          numa_node_id());
 
                if (l3->free_touched) {
                        l3->free_touched = 0;
                        goto next_unlock;
                }
 
-               tofree = (l3->free_limit+5*searchp->num-1)/(5*searchp->num);
+               tofree =
+                   (l3->free_limit + 5 * searchp->num -
+                    1) / (5 * searchp->num);
                do {
                        p = l3->slabs_free.next;
                        if (p == &(l3->slabs_free))
@@ -3350,14 +3482,14 @@ static void cache_reap(void *unused)
                        spin_unlock_irq(&l3->list_lock);
                        slab_destroy(searchp, slabp);
                        spin_lock_irq(&l3->list_lock);
-               } while(--tofree > 0);
-next_unlock:
+               } while (--tofree > 0);
+             next_unlock:
                spin_unlock_irq(&l3->list_lock);
-next:
+             next:
                cond_resched();
        }
        check_irq_on();
-       up(&cache_chain_sem);
+       mutex_unlock(&cache_chain_mutex);
        drain_remote_pages();
        /* Setup the next iteration */
        schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
@@ -3365,63 +3497,68 @@ next:
 
 #ifdef CONFIG_PROC_FS
 
-static void *s_start(struct seq_file *m, loff_t *pos)
+static void print_slabinfo_header(struct seq_file *m)
 {
-       loff_t n = *pos;
-       struct list_head *p;
-
-       down(&cache_chain_sem);
-       if (!n) {
-               /*
-                * Output format version, so at least we can change it
-                * without _too_ many complaints.
-                */
+       /*
+        * Output format version, so at least we can change it
+        * without _too_ many complaints.
+        */
 #if STATS
-               seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
+       seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
 #else
-               seq_puts(m, "slabinfo - version: 2.1\n");
+       seq_puts(m, "slabinfo - version: 2.1\n");
 #endif
-               seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
-               seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
-               seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
+       seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
+                "<objperslab> <pagesperslab>");
+       seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
+       seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
 #if STATS
-               seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped>"
-                               " <error> <maxfreeable> <nodeallocs> <remotefrees>");
-               seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
+       seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
+                "<error> <maxfreeable> <nodeallocs> <remotefrees>");
+       seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
 #endif
-               seq_putc(m, '\n');
-       }
+       seq_putc(m, '\n');
+}
+
+static void *s_start(struct seq_file *m, loff_t *pos)
+{
+       loff_t n = *pos;
+       struct list_head *p;
+
+       mutex_lock(&cache_chain_mutex);
+       if (!n)
+               print_slabinfo_header(m);
        p = cache_chain.next;
        while (n--) {
                p = p->next;
                if (p == &cache_chain)
                        return NULL;
        }
-       return list_entry(p, kmem_cache_t, next);
+       return list_entry(p, struct kmem_cache, next);
 }
 
 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
 {
-       kmem_cache_t *cachep = p;
+       struct kmem_cache *cachep = p;
        ++*pos;
        return cachep->next.next == &cache_chain ? NULL
-               : list_entry(cachep->next.next, kmem_cache_t, next);
+           : list_entry(cachep->next.next, struct kmem_cache, next);
 }
 
 static void s_stop(struct seq_file *m, void *p)
 {
-       up(&cache_chain_sem);
+       mutex_unlock(&cache_chain_mutex);
 }
 
 static int s_show(struct seq_file *m, void *p)
 {
-       kmem_cache_t *cachep = p;
+       struct kmem_cache *cachep = p;
        struct list_head *q;
-       struct slab     *slabp;
-       unsigned long   active_objs;
-       unsigned long   num_objs;
-       unsigned long   active_slabs = 0;
-       unsigned long   num_slabs, free_objects = 0, shared_avail = 0;
+       struct slab *slabp;
+       unsigned long active_objs;
+       unsigned long num_objs;
+       unsigned long active_slabs = 0;
+       unsigned long num_slabs, free_objects = 0, shared_avail = 0;
        const char *name;
        char *error = NULL;
        int node;
@@ -3438,14 +3575,14 @@ static int s_show(struct seq_file *m, void *p)
 
                spin_lock(&l3->list_lock);
 
-               list_for_each(q,&l3->slabs_full) {
+               list_for_each(q, &l3->slabs_full) {
                        slabp = list_entry(q, struct slab, list);
                        if (slabp->inuse != cachep->num && !error)
                                error = "slabs_full accounting error";
                        active_objs += cachep->num;
                        active_slabs++;
                }
-               list_for_each(q,&l3->slabs_partial) {
+               list_for_each(q, &l3->slabs_partial) {
                        slabp = list_entry(q, struct slab, list);
                        if (slabp->inuse == cachep->num && !error)
                                error = "slabs_partial inuse accounting error";
@@ -3454,7 +3591,7 @@ static int s_show(struct seq_file *m, void *p)
                        active_objs += slabp->inuse;
                        active_slabs++;
                }
-               list_for_each(q,&l3->slabs_free) {
+               list_for_each(q, &l3->slabs_free) {
                        slabp = list_entry(q, struct slab, list);
                        if (slabp->inuse && !error)
                                error = "slabs_free/inuse accounting error";
@@ -3465,25 +3602,24 @@ static int s_show(struct seq_file *m, void *p)
 
                spin_unlock(&l3->list_lock);
        }
-       num_slabs+=active_slabs;
-       num_objs = num_slabs*cachep->num;
+       num_slabs += active_slabs;
+       num_objs = num_slabs * cachep->num;
        if (num_objs - active_objs != free_objects && !error)
                error = "free_objects accounting error";
 
-       name = cachep->name; 
+       name = cachep->name;
        if (error)
                printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
 
        seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
-               name, active_objs, num_objs, cachep->objsize,
-               cachep->num, (1<<cachep->gfporder));
+                  name, active_objs, num_objs, cachep->buffer_size,
+                  cachep->num, (1 << cachep->gfporder));
        seq_printf(m, " : tunables %4u %4u %4u",
-                       cachep->limit, cachep->batchcount,
-                       cachep->shared);
+                  cachep->limit, cachep->batchcount, cachep->shared);
        seq_printf(m, " : slabdata %6lu %6lu %6lu",
-                       active_slabs, num_slabs, shared_avail);
+                  active_slabs, num_slabs, shared_avail);
 #if STATS
-       {       /* list3 stats */
+       {                       /* list3 stats */
                unsigned long high = cachep->high_mark;
                unsigned long allocs = cachep->num_allocations;
                unsigned long grown = cachep->grown;
@@ -3494,9 +3630,7 @@ static int s_show(struct seq_file *m, void *p)
                unsigned long node_frees = cachep->node_frees;
 
                seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
-                               %4lu %4lu %4lu %4lu",
-                               allocs, high, grown, reaped, errors,
-                               max_freeable, node_allocs, node_frees);
+                               %4lu %4lu %4lu %4lu", allocs, high, grown, reaped, errors, max_freeable, node_allocs, node_frees);
        }
        /* cpu stats */
        {
@@ -3506,7 +3640,7 @@ static int s_show(struct seq_file *m, void *p)
                unsigned long freemiss = atomic_read(&cachep->freemiss);
 
                seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
-                       allochit, allocmiss, freehit, freemiss);
+                          allochit, allocmiss, freehit, freemiss);
        }
 #endif
        seq_putc(m, '\n');
@@ -3529,10 +3663,10 @@ static int s_show(struct seq_file *m, void *p)
  */
 
 struct seq_operations slabinfo_op = {
-       .start  = s_start,
-       .next   = s_next,
-       .stop   = s_stop,
-       .show   = s_show,
+       .start = s_start,
+       .next = s_next,
+       .stop = s_stop,
+       .show = s_show,
 };
 
 #define MAX_SLABINFO_WRITE 128
@@ -3543,18 +3677,18 @@ struct seq_operations slabinfo_op = {
  * @count: data length
  * @ppos: unused
  */
-ssize_t slabinfo_write(struct file *file, const char __user *buffer,
-                               size_t count, loff_t *ppos)
+ssize_t slabinfo_write(struct file *file, const char __user * buffer,
+                      size_t count, loff_t *ppos)
 {
-       char kbuf[MAX_SLABINFO_WRITE+1], *tmp;
+       char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
        int limit, batchcount, shared, res;
        struct list_head *p;
-       
+
        if (count > MAX_SLABINFO_WRITE)
                return -EINVAL;
        if (copy_from_user(&kbuf, buffer, count))
                return -EFAULT;
-       kbuf[MAX_SLABINFO_WRITE] = '\0'; 
+       kbuf[MAX_SLABINFO_WRITE] = '\0';
 
        tmp = strchr(kbuf, ' ');
        if (!tmp)
@@ -3565,25 +3699,25 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
                return -EINVAL;
 
        /* Find the cache in the chain of caches. */
-       down(&cache_chain_sem);
+       mutex_lock(&cache_chain_mutex);
        res = -EINVAL;
-       list_for_each(p,&cache_chain) {
-               kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
+       list_for_each(p, &cache_chain) {
+               struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
+                                                      next);
 
                if (!strcmp(cachep->name, kbuf)) {
                        if (limit < 1 ||
                            batchcount < 1 ||
-                           batchcount > limit ||
-                           shared < 0) {
+                           batchcount > limit || shared < 0) {
                                res = 0;
                        } else {
                                res = do_tune_cpucache(cachep, limit,
-                                                       batchcount, shared);
+                                                      batchcount, shared);
                        }
                        break;
                }
        }
-       up(&cache_chain_sem);
+       mutex_unlock(&cache_chain_mutex);
        if (res >= 0)
                res = count;
        return res;
@@ -3607,28 +3741,5 @@ unsigned int ksize(const void *objp)
        if (unlikely(objp == NULL))
                return 0;
 
-       return obj_reallen(page_get_cache(virt_to_page(objp)));
-}
-
-
-/*
- * kstrdup - allocate space for and copy an existing string
- *
- * @s: the string to duplicate
- * @gfp: the GFP mask used in the kmalloc() call when allocating memory
- */
-char *kstrdup(const char *s, gfp_t gfp)
-{
-       size_t len;
-       char *buf;
-
-       if (!s)
-               return NULL;
-
-       len = strlen(s) + 1;
-       buf = kmalloc(len, gfp);
-       if (buf)
-               memcpy(buf, s, len);
-       return buf;
+       return obj_size(virt_to_cache(objp));
 }
-EXPORT_SYMBOL(kstrdup);