]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/slab.c
[PATCH] bootmem: limit to 80 columns width
[linux-2.6-omap-h63xx.git] / mm / slab.c
index fd1e4c4c13976e4552ace8316f82eafd01f577a3..5870bcbd33cfe82cbcae84a89f7c443e1329789a 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -674,6 +674,37 @@ static struct kmem_cache cache_cache = {
 #endif
 };
 
+#ifdef CONFIG_LOCKDEP
+
+/*
+ * Slab sometimes uses the kmalloc slabs to store the slab headers
+ * for other slabs "off slab".
+ * The locking for this is tricky in that it nests within the locks
+ * of all other slabs in a few places; to deal with this special
+ * locking we put on-slab caches into a separate lock-class.
+ */
+static struct lock_class_key on_slab_key;
+
+static inline void init_lock_keys(struct cache_sizes *s)
+{
+       int q;
+
+       for (q = 0; q < MAX_NUMNODES; q++) {
+               if (!s->cs_cachep->nodelists[q] || OFF_SLAB(s->cs_cachep))
+                       continue;
+               lockdep_set_class(&s->cs_cachep->nodelists[q]->list_lock,
+                                 &on_slab_key);
+       }
+}
+
+#else
+static inline void init_lock_keys(struct cache_sizes *s)
+{
+}
+#endif
+
+
+
 /* Guard access to the cache-chain. */
 static DEFINE_MUTEX(cache_chain_mutex);
 static struct list_head cache_chain;
@@ -737,11 +768,10 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
        return csizep->cs_cachep;
 }
 
-struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
+static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
 {
        return __find_general_cachep(size, gfpflags);
 }
-EXPORT_SYMBOL(kmem_find_general_cachep);
 
 static size_t slab_mgmt_size(size_t nr_objs, size_t align)
 {
@@ -1075,7 +1105,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
 
 #endif
 
-static int __devinit cpuup_callback(struct notifier_block *nfb,
+static int __cpuinit cpuup_callback(struct notifier_block *nfb,
                                    unsigned long action, void *hcpu)
 {
        long cpu = (long)hcpu;
@@ -1391,6 +1421,7 @@ void __init kmem_cache_init(void)
                                        ARCH_KMALLOC_FLAGS|SLAB_PANIC,
                                        NULL, NULL);
                }
+               init_lock_keys(sizes);
 
                sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
                                        sizes->cs_size,
@@ -3087,16 +3118,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
                if (slabp->inuse == 0) {
                        if (l3->free_objects > l3->free_limit) {
                                l3->free_objects -= cachep->num;
-                               /*
-                                * It is safe to drop the lock. The slab is
-                                * no longer linked to the cache. cachep
-                                * cannot disappear - we are using it and
-                                * all destruction of caches must be
-                                * serialized properly by the user.
-                                */
-                               spin_unlock(&l3->list_lock);
                                slab_destroy(cachep, slabp);
-                               spin_lock(&l3->list_lock);
                        } else {
                                list_add(&slabp->list, &l3->slabs_free);
                        }
@@ -3201,7 +3223,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 EXPORT_SYMBOL(kmem_cache_alloc);
 
 /**
- * kmem_cache_alloc - Allocate an object. The memory is set to zero.
+ * kmem_cache_zalloc - Allocate an object. The memory is set to zero.
  * @cache: The cache to allocate from.
  * @flags: See kmalloc().
  *