]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/slab.c
[PATCH] autofs4: increase module version
[linux-2.6-omap-h63xx.git] / mm / slab.c
index 24235506b2a0b59ad54b592d71de189ec852e2d7..681837499d7d59fa967cd2297a250ceff6e278af 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -94,6 +94,7 @@
 #include       <linux/interrupt.h>
 #include       <linux/init.h>
 #include       <linux/compiler.h>
+#include       <linux/cpuset.h>
 #include       <linux/seq_file.h>
 #include       <linux/notifier.h>
 #include       <linux/kallsyms.h>
                         SLAB_CACHE_DMA | \
                         SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
-                        SLAB_DESTROY_BY_RCU)
+                        SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
 #else
 # define CREATE_MASK   (SLAB_HWCACHE_ALIGN | \
                         SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
-                        SLAB_DESTROY_BY_RCU)
+                        SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
 #endif
 
 /*
 typedef unsigned int kmem_bufctl_t;
 #define BUFCTL_END     (((kmem_bufctl_t)(~0U))-0)
 #define BUFCTL_FREE    (((kmem_bufctl_t)(~0U))-1)
-#define        SLAB_LIMIT      (((kmem_bufctl_t)(~0U))-2)
+#define        BUFCTL_ACTIVE   (((kmem_bufctl_t)(~0U))-2)
+#define        SLAB_LIMIT      (((kmem_bufctl_t)(~0U))-3)
 
 /* Max number of objs-per-slab for caches which use off-slab slabs.
  * Needed to avoid a possible looping condition in cache_grow().
@@ -292,13 +294,13 @@ struct kmem_list3 {
        struct list_head slabs_full;
        struct list_head slabs_free;
        unsigned long free_objects;
-       unsigned long next_reap;
-       int free_touched;
        unsigned int free_limit;
        unsigned int colour_next;       /* Per-node cache coloring */
        spinlock_t list_lock;
        struct array_cache *shared;     /* shared per node */
        struct array_cache **alien;     /* on other nodes */
+       unsigned long next_reap;        /* updated without locking */
+       int free_touched;               /* updated without locking */
 };
 
 /*
@@ -590,6 +592,8 @@ static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
 
 static inline struct kmem_cache *page_get_cache(struct page *page)
 {
+       if (unlikely(PageCompound(page)))
+               page = (struct page *)page_private(page);
        return (struct kmem_cache *)page->lru.next;
 }
 
@@ -600,6 +604,8 @@ static inline void page_set_slab(struct page *page, struct slab *slab)
 
 static inline struct slab *page_get_slab(struct page *page)
 {
+       if (unlikely(PageCompound(page)))
+               page = (struct page *)page_private(page);
        return (struct slab *)page->lru.prev;
 }
 
@@ -826,7 +832,7 @@ static void init_reap_node(int cpu)
 
        node = next_node(cpu_to_node(cpu), node_online_map);
        if (node == MAX_NUMNODES)
-               node = 0;
+               node = first_node(node_online_map);
 
        __get_cpu_var(reap_node) = node;
 }
@@ -892,8 +898,33 @@ static struct array_cache *alloc_arraycache(int node, int entries,
        return nc;
 }
 
+/*
+ * Transfer objects in one arraycache to another.
+ * Locking must be handled by the caller.
+ *
+ * Return the number of entries transferred.
+ */
+static int transfer_objects(struct array_cache *to,
+               struct array_cache *from, unsigned int max)
+{
+       /* Figure out how many entries to transfer */
+       int nr = min(min(from->avail, max), to->limit - to->avail);
+
+       if (!nr)
+               return 0;
+
+       memcpy(to->entry + to->avail, from->entry + from->avail -nr,
+                       sizeof(void *) *nr);
+
+       from->avail -= nr;
+       to->avail += nr;
+       to->touched = 1;
+       return nr;
+}
+
 #ifdef CONFIG_NUMA
 static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
+static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
 
 static struct array_cache **alloc_alien_cache(int node, int limit)
 {
@@ -940,6 +971,13 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
 
        if (ac->avail) {
                spin_lock(&rl3->list_lock);
+               /*
+                * Stuff objects into the remote nodes shared array first.
+                * That way we could avoid the overhead of putting the objects
+                * into the free lists and getting them back later.
+                */
+               transfer_objects(rl3->shared, ac, ac->limit);
+
                free_block(cachep, ac->entry, ac->avail, node);
                ac->avail = 0;
                spin_unlock(&rl3->list_lock);
@@ -955,8 +993,8 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
 
        if (l3->alien) {
                struct array_cache *ac = l3->alien[node];
-               if (ac && ac->avail) {
-                       spin_lock_irq(&ac->lock);
+
+               if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
                        __drain_alien_cache(cachep, ac, node);
                        spin_unlock_irq(&ac->lock);
                }
@@ -1983,10 +2021,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
        align = ralign;
 
        /* Get cache's description obj. */
-       cachep = kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
+       cachep = kmem_cache_zalloc(&cache_cache, SLAB_KERNEL);
        if (!cachep)
                goto oops;
-       memset(cachep, 0, sizeof(struct kmem_cache));
 
 #if DEBUG
        cachep->obj_size = size;
@@ -2119,25 +2156,9 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
 #define check_spinlock_acquired_node(x, y) do { } while(0)
 #endif
 
-/*
- * Waits for all CPUs to execute func().
- */
-static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg)
-{
-       check_irq_on();
-       preempt_disable();
-       local_irq_disable();
-       func(arg);
-       local_irq_enable();
-
-       if (smp_call_function(func, arg, 1, 1))
-               BUG();
-
-       preempt_enable();
-}
-
-static void drain_array_locked(struct kmem_cache *cachep,
-                       struct array_cache *ac, int force, int node);
+static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
+                       struct array_cache *ac,
+                       int force, int node);
 
 static void do_drain(void *arg)
 {
@@ -2158,14 +2179,12 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
        struct kmem_list3 *l3;
        int node;
 
-       smp_call_function_all_cpus(do_drain, cachep);
+       on_each_cpu(do_drain, cachep, 1, 1);
        check_irq_on();
        for_each_online_node(node) {
                l3 = cachep->nodelists[node];
                if (l3) {
-                       spin_lock_irq(&l3->list_lock);
-                       drain_array_locked(cachep, l3->shared, 1, node);
-                       spin_unlock_irq(&l3->list_lock);
+                       drain_array(cachep, l3, l3->shared, 1, node);
                        if (l3->alien)
                                drain_alien_cache(cachep, l3->alien);
                }
@@ -2411,7 +2430,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
        /* Verify that the slab belongs to the intended node */
        WARN_ON(slabp->nodeid != nodeid);
 
-       if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
+       if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
                printk(KERN_ERR "slab: double free detected in cache "
                                "'%s', objp %p\n", cachep->name, objp);
                BUG();
@@ -2429,8 +2448,11 @@ static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp,
        struct page *page;
 
        /* Nasty!!!!!! I hope this is OK. */
-       i = 1 << cachep->gfporder;
        page = virt_to_page(objp);
+
+       i = 1;
+       if (likely(!PageCompound(page)))
+               i <<= cachep->gfporder;
        do {
                page_set_cache(page, cachep);
                page_set_slab(page, slabp);
@@ -2614,6 +2636,9 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
                 */
                cachep->dtor(objp + obj_offset(cachep), cachep, 0);
        }
+#ifdef CONFIG_DEBUG_SLAB_LEAK
+       slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
+#endif
        if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
                if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
@@ -2686,20 +2711,10 @@ retry:
        BUG_ON(ac->avail > 0 || !l3);
        spin_lock(&l3->list_lock);
 
-       if (l3->shared) {
-               struct array_cache *shared_array = l3->shared;
-               if (shared_array->avail) {
-                       if (batchcount > shared_array->avail)
-                               batchcount = shared_array->avail;
-                       shared_array->avail -= batchcount;
-                       ac->avail = batchcount;
-                       memcpy(ac->entry,
-                              &(shared_array->entry[shared_array->avail]),
-                              sizeof(void *) * batchcount);
-                       shared_array->touched = 1;
-                       goto alloc_done;
-               }
-       }
+       /* See if we can refill from the shared array */
+       if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
+               goto alloc_done;
+
        while (batchcount > 0) {
                struct list_head *entry;
                struct slab *slabp;
@@ -2797,6 +2812,16 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
                *dbg_redzone1(cachep, objp) = RED_ACTIVE;
                *dbg_redzone2(cachep, objp) = RED_ACTIVE;
        }
+#ifdef CONFIG_DEBUG_SLAB_LEAK
+       {
+               struct slab *slabp;
+               unsigned objnr;
+
+               slabp = page_get_slab(virt_to_page(objp));
+               objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
+               slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
+       }
+#endif
        objp += obj_offset(cachep);
        if (cachep->ctor && cachep->flags & SLAB_POISON) {
                unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
@@ -2818,11 +2843,10 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
        struct array_cache *ac;
 
 #ifdef CONFIG_NUMA
-       if (unlikely(current->mempolicy && !in_interrupt())) {
-               int nid = slab_node(current->mempolicy);
-
-               if (nid != numa_node_id())
-                       return __cache_alloc_node(cachep, flags, nid);
+       if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
+               objp = alternate_node_alloc(cachep, flags);
+               if (objp != NULL)
+                       return objp;
        }
 #endif
 
@@ -2857,6 +2881,28 @@ static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
 }
 
 #ifdef CONFIG_NUMA
+/*
+ * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
+ *
+ * If we are in_interrupt, then process context, including cpusets and
+ * mempolicy, may not apply and should not be used for allocation policy.
+ */
+static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
+{
+       int nid_alloc, nid_here;
+
+       if (in_interrupt())
+               return NULL;
+       nid_alloc = nid_here = numa_node_id();
+       if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
+               nid_alloc = cpuset_mem_spread_node();
+       else if (current->mempolicy)
+               nid_alloc = slab_node(current->mempolicy);
+       if (nid_alloc != nid_here)
+               return __cache_alloc_node(cachep, flags, nid_alloc);
+       return NULL;
+}
+
 /*
  * A interface to enable slab creation on nodeid
  */
@@ -3081,6 +3127,23 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 }
 EXPORT_SYMBOL(kmem_cache_alloc);
 
+/**
+ * kmem_cache_alloc - Allocate an object. The memory is set to zero.
+ * @cache: The cache to allocate from.
+ * @flags: See kmalloc().
+ *
+ * Allocate an object from this cache and set the allocated memory to zero.
+ * The flags are only relevant if the cache has no available objects.
+ */
+void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
+{
+       void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
+       if (ret)
+               memset(ret, 0, obj_size(cache));
+       return ret;
+}
+EXPORT_SYMBOL(kmem_cache_zalloc);
+
 /**
  * kmem_ptr_validate - check if an untrusted pointer might
  *     be a slab entry.
@@ -3208,22 +3271,23 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
        return __cache_alloc(cachep, flags, caller);
 }
 
-#ifndef CONFIG_DEBUG_SLAB
 
 void *__kmalloc(size_t size, gfp_t flags)
 {
+#ifndef CONFIG_DEBUG_SLAB
        return __do_kmalloc(size, flags, NULL);
+#else
+       return __do_kmalloc(size, flags, __builtin_return_address(0));
+#endif
 }
 EXPORT_SYMBOL(__kmalloc);
 
-#else
-
+#ifdef CONFIG_DEBUG_SLAB
 void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
 {
        return __do_kmalloc(size, flags, caller);
 }
 EXPORT_SYMBOL(__kmalloc_track_caller);
-
 #endif
 
 #ifdef CONFIG_SMP
@@ -3354,63 +3418,86 @@ const char *kmem_cache_name(struct kmem_cache *cachep)
 EXPORT_SYMBOL_GPL(kmem_cache_name);
 
 /*
- * This initializes kmem_list3 for all nodes.
+ * This initializes kmem_list3 or resizes varioius caches for all nodes.
  */
 static int alloc_kmemlist(struct kmem_cache *cachep)
 {
        int node;
        struct kmem_list3 *l3;
-       int err = 0;
+       struct array_cache *new_shared;
+       struct array_cache **new_alien;
 
        for_each_online_node(node) {
-               struct array_cache *nc = NULL, *new;
-               struct array_cache **new_alien = NULL;
-#ifdef CONFIG_NUMA
+
                new_alien = alloc_alien_cache(node, cachep->limit);
                if (!new_alien)
                        goto fail;
-#endif
-               new = alloc_arraycache(node, cachep->shared*cachep->batchcount,
+
+               new_shared = alloc_arraycache(node,
+                               cachep->shared*cachep->batchcount,
                                        0xbaadf00d);
-               if (!new)
+               if (!new_shared) {
+                       free_alien_cache(new_alien);
                        goto fail;
+               }
+
                l3 = cachep->nodelists[node];
                if (l3) {
+                       struct array_cache *shared = l3->shared;
+
                        spin_lock_irq(&l3->list_lock);
 
-                       nc = cachep->nodelists[node]->shared;
-                       if (nc)
-                               free_block(cachep, nc->entry, nc->avail, node);
+                       if (shared)
+                               free_block(cachep, shared->entry,
+                                               shared->avail, node);
 
-                       l3->shared = new;
-                       if (!cachep->nodelists[node]->alien) {
+                       l3->shared = new_shared;
+                       if (!l3->alien) {
                                l3->alien = new_alien;
                                new_alien = NULL;
                        }
                        l3->free_limit = (1 + nr_cpus_node(node)) *
                                        cachep->batchcount + cachep->num;
                        spin_unlock_irq(&l3->list_lock);
-                       kfree(nc);
+                       kfree(shared);
                        free_alien_cache(new_alien);
                        continue;
                }
                l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
-               if (!l3)
+               if (!l3) {
+                       free_alien_cache(new_alien);
+                       kfree(new_shared);
                        goto fail;
+               }
 
                kmem_list3_init(l3);
                l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
                                ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
-               l3->shared = new;
+               l3->shared = new_shared;
                l3->alien = new_alien;
                l3->free_limit = (1 + nr_cpus_node(node)) *
                                        cachep->batchcount + cachep->num;
                cachep->nodelists[node] = l3;
        }
-       return err;
+       return 0;
+
 fail:
-       err = -ENOMEM;
-       return err;
+       if (!cachep->next.next) {
+               /* Cache is not active yet. Roll back what we did */
+               node--;
+               while (node >= 0) {
+                       if (cachep->nodelists[node]) {
+                               l3 = cachep->nodelists[node];
+
+                               kfree(l3->shared);
+                               free_alien_cache(l3->alien);
+                               kfree(l3);
+                               cachep->nodelists[node] = NULL;
+                       }
+                       node--;
+               }
+       }
+       return -ENOMEM;
 }
 
 struct ccupdate_struct {
@@ -3449,7 +3536,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
        }
        new.cachep = cachep;
 
-       smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
+       on_each_cpu(do_ccupdate_local, (void *)&new, 1, 1);
 
        check_irq_on();
        cachep->batchcount = batchcount;
@@ -3530,22 +3617,32 @@ static void enable_cpucache(struct kmem_cache *cachep)
                       cachep->name, -err);
 }
 
-static void drain_array_locked(struct kmem_cache *cachep,
-                               struct array_cache *ac, int force, int node)
+/*
+ * Drain an array if it contains any elements taking the l3 lock only if
+ * necessary. Note that the l3 listlock also protects the array_cache
+ * if drain_array() is used on the shared array.
+ */
+void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
+                        struct array_cache *ac, int force, int node)
 {
        int tofree;
 
-       check_spinlock_acquired_node(cachep, node);
+       if (!ac || !ac->avail)
+               return;
        if (ac->touched && !force) {
                ac->touched = 0;
-       } else if (ac->avail) {
-               tofree = force ? ac->avail : (ac->limit + 4) / 5;
-               if (tofree > ac->avail)
-                       tofree = (ac->avail + 1) / 2;
-               free_block(cachep, ac->entry, tofree, node);
-               ac->avail -= tofree;
-               memmove(ac->entry, &(ac->entry[tofree]),
-                       sizeof(void *) * ac->avail);
+       } else {
+               spin_lock_irq(&l3->list_lock);
+               if (ac->avail) {
+                       tofree = force ? ac->avail : (ac->limit + 4) / 5;
+                       if (tofree > ac->avail)
+                               tofree = (ac->avail + 1) / 2;
+                       free_block(cachep, ac->entry, tofree, node);
+                       ac->avail -= tofree;
+                       memmove(ac->entry, &(ac->entry[tofree]),
+                               sizeof(void *) * ac->avail);
+               }
+               spin_unlock_irq(&l3->list_lock);
        }
 }
 
@@ -3565,6 +3662,7 @@ static void cache_reap(void *unused)
 {
        struct list_head *walk;
        struct kmem_list3 *l3;
+       int node = numa_node_id();
 
        if (!mutex_trylock(&cache_chain_mutex)) {
                /* Give up. Setup the next iteration. */
@@ -3582,33 +3680,48 @@ static void cache_reap(void *unused)
                searchp = list_entry(walk, struct kmem_cache, next);
                check_irq_on();
 
-               l3 = searchp->nodelists[numa_node_id()];
+               /*
+                * We only take the l3 lock if absolutely necessary and we
+                * have established with reasonable certainty that
+                * we can do some work if the lock was obtained.
+                */
+               l3 = searchp->nodelists[node];
+
                reap_alien(searchp, l3);
-               spin_lock_irq(&l3->list_lock);
 
-               drain_array_locked(searchp, cpu_cache_get(searchp), 0,
-                                  numa_node_id());
+               drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
 
+               /*
+                * These are racy checks but it does not matter
+                * if we skip one check or scan twice.
+                */
                if (time_after(l3->next_reap, jiffies))
-                       goto next_unlock;
+                       goto next;
 
                l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
 
-               if (l3->shared)
-                       drain_array_locked(searchp, l3->shared, 0,
-                                          numa_node_id());
+               drain_array(searchp, l3, l3->shared, 0, node);
 
                if (l3->free_touched) {
                        l3->free_touched = 0;
-                       goto next_unlock;
+                       goto next;
                }
 
                tofree = (l3->free_limit + 5 * searchp->num - 1) /
                                (5 * searchp->num);
                do {
+                       /*
+                        * Do not lock if there are no free blocks.
+                        */
+                       if (list_empty(&l3->slabs_free))
+                               break;
+
+                       spin_lock_irq(&l3->list_lock);
                        p = l3->slabs_free.next;
-                       if (p == &(l3->slabs_free))
+                       if (p == &(l3->slabs_free)) {
+                               spin_unlock_irq(&l3->list_lock);
                                break;
+                       }
 
                        slabp = list_entry(p, struct slab, list);
                        BUG_ON(slabp->inuse);
@@ -3623,10 +3736,8 @@ static void cache_reap(void *unused)
                        l3->free_objects -= searchp->num;
                        spin_unlock_irq(&l3->list_lock);
                        slab_destroy(searchp, slabp);
-                       spin_lock_irq(&l3->list_lock);
                } while (--tofree > 0);
-next_unlock:
-               spin_unlock_irq(&l3->list_lock);
+next:
                cond_resched();
        }
        check_irq_on();
@@ -3863,6 +3974,159 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
                res = count;
        return res;
 }
+
+#ifdef CONFIG_DEBUG_SLAB_LEAK
+
+static void *leaks_start(struct seq_file *m, loff_t *pos)
+{
+       loff_t n = *pos;
+       struct list_head *p;
+
+       mutex_lock(&cache_chain_mutex);
+       p = cache_chain.next;
+       while (n--) {
+               p = p->next;
+               if (p == &cache_chain)
+                       return NULL;
+       }
+       return list_entry(p, struct kmem_cache, next);
+}
+
+static inline int add_caller(unsigned long *n, unsigned long v)
+{
+       unsigned long *p;
+       int l;
+       if (!v)
+               return 1;
+       l = n[1];
+       p = n + 2;
+       while (l) {
+               int i = l/2;
+               unsigned long *q = p + 2 * i;
+               if (*q == v) {
+                       q[1]++;
+                       return 1;
+               }
+               if (*q > v) {
+                       l = i;
+               } else {
+                       p = q + 2;
+                       l -= i + 1;
+               }
+       }
+       if (++n[1] == n[0])
+               return 0;
+       memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
+       p[0] = v;
+       p[1] = 1;
+       return 1;
+}
+
+static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
+{
+       void *p;
+       int i;
+       if (n[0] == n[1])
+               return;
+       for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
+               if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
+                       continue;
+               if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
+                       return;
+       }
+}
+
+static void show_symbol(struct seq_file *m, unsigned long address)
+{
+#ifdef CONFIG_KALLSYMS
+       char *modname;
+       const char *name;
+       unsigned long offset, size;
+       char namebuf[KSYM_NAME_LEN+1];
+
+       name = kallsyms_lookup(address, &size, &offset, &modname, namebuf);
+
+       if (name) {
+               seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
+               if (modname)
+                       seq_printf(m, " [%s]", modname);
+               return;
+       }
+#endif
+       seq_printf(m, "%p", (void *)address);
+}
+
+static int leaks_show(struct seq_file *m, void *p)
+{
+       struct kmem_cache *cachep = p;
+       struct list_head *q;
+       struct slab *slabp;
+       struct kmem_list3 *l3;
+       const char *name;
+       unsigned long *n = m->private;
+       int node;
+       int i;
+
+       if (!(cachep->flags & SLAB_STORE_USER))
+               return 0;
+       if (!(cachep->flags & SLAB_RED_ZONE))
+               return 0;
+
+       /* OK, we can do it */
+
+       n[1] = 0;
+
+       for_each_online_node(node) {
+               l3 = cachep->nodelists[node];
+               if (!l3)
+                       continue;
+
+               check_irq_on();
+               spin_lock_irq(&l3->list_lock);
+
+               list_for_each(q, &l3->slabs_full) {
+                       slabp = list_entry(q, struct slab, list);
+                       handle_slab(n, cachep, slabp);
+               }
+               list_for_each(q, &l3->slabs_partial) {
+                       slabp = list_entry(q, struct slab, list);
+                       handle_slab(n, cachep, slabp);
+               }
+               spin_unlock_irq(&l3->list_lock);
+       }
+       name = cachep->name;
+       if (n[0] == n[1]) {
+               /* Increase the buffer size */
+               mutex_unlock(&cache_chain_mutex);
+               m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
+               if (!m->private) {
+                       /* Too bad, we are really out */
+                       m->private = n;
+                       mutex_lock(&cache_chain_mutex);
+                       return -ENOMEM;
+               }
+               *(unsigned long *)m->private = n[0] * 2;
+               kfree(n);
+               mutex_lock(&cache_chain_mutex);
+               /* Now make sure this entry will be retried */
+               m->count = m->size;
+               return 0;
+       }
+       for (i = 0; i < n[1]; i++) {
+               seq_printf(m, "%s: %lu ", name, n[2*i+3]);
+               show_symbol(m, n[2*i+2]);
+               seq_putc(m, '\n');
+       }
+       return 0;
+}
+
+struct seq_operations slabstats_op = {
+       .start = leaks_start,
+       .next = s_next,
+       .stop = s_stop,
+       .show = leaks_show,
+};
+#endif
 #endif
 
 /**