]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/slub.c
hugetlb: Add hugetlb_dynamic_pool sysctl
[linux-2.6-omap-h63xx.git] / mm / slub.c
index ea9fd72093d8d91abfaef03ccedf2c83dd99824a..f426f9bc644b992fd12ec535ac8510b93682fe0c 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -269,7 +269,11 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
 
 static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
 {
-       return &s->cpu_slab[cpu];
+#ifdef CONFIG_SMP
+       return s->cpu_slab[cpu];
+#else
+       return &s->cpu_slab;
+#endif
 }
 
 static inline int check_valid_pointer(struct kmem_cache *s,
@@ -1572,7 +1576,7 @@ static void __always_inline *slab_alloc(struct kmem_cache *s,
        local_irq_restore(flags);
 
        if (unlikely((gfpflags & __GFP_ZERO) && object))
-               memset(object, 0, s->objsize);
+               memset(object, 0, c->objsize);
 
        return object;
 }
@@ -1854,18 +1858,9 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
 {
        c->page = NULL;
        c->freelist = NULL;
-       c->offset = s->offset / sizeof(void *);
        c->node = 0;
-}
-
-static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
-{
-       int cpu;
-
-       for_each_possible_cpu(cpu)
-               init_kmem_cache_cpu(s, get_cpu_slab(s, cpu));
-
-       return 1;
+       c->offset = s->offset / sizeof(void *);
+       c->objsize = s->objsize;
 }
 
 static void init_kmem_cache_node(struct kmem_cache_node *n)
@@ -1879,6 +1874,131 @@ static void init_kmem_cache_node(struct kmem_cache_node *n)
 #endif
 }
 
+#ifdef CONFIG_SMP
+/*
+ * Per cpu array for per cpu structures.
+ *
+ * The per cpu array places all kmem_cache_cpu structures from one processor
+ * close together meaning that it becomes possible that multiple per cpu
+ * structures are contained in one cacheline. This may be particularly
+ * beneficial for the kmalloc caches.
+ *
+ * A desktop system typically has around 60-80 slabs. With 100 here we are
+ * likely able to get per cpu structures for all caches from the array defined
+ * here. We must be able to cover all kmalloc caches during bootstrap.
+ *
+ * If the per cpu array is exhausted then fall back to kmalloc
+ * of individual cachelines. No sharing is possible then.
+ */
+#define NR_KMEM_CACHE_CPU 100
+
+static DEFINE_PER_CPU(struct kmem_cache_cpu,
+                               kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
+
+static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
+static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE;
+
+static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
+                                                       int cpu, gfp_t flags)
+{
+       struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
+
+       if (c)
+               per_cpu(kmem_cache_cpu_free, cpu) =
+                               (void *)c->freelist;
+       else {
+               /* Table overflow: So allocate ourselves */
+               c = kmalloc_node(
+                       ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
+                       flags, cpu_to_node(cpu));
+               if (!c)
+                       return NULL;
+       }
+
+       init_kmem_cache_cpu(s, c);
+       return c;
+}
+
+static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
+{
+       if (c < per_cpu(kmem_cache_cpu, cpu) ||
+                       c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
+               kfree(c);
+               return;
+       }
+       c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
+       per_cpu(kmem_cache_cpu_free, cpu) = c;
+}
+
+static void free_kmem_cache_cpus(struct kmem_cache *s)
+{
+       int cpu;
+
+       for_each_online_cpu(cpu) {
+               struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+
+               if (c) {
+                       s->cpu_slab[cpu] = NULL;
+                       free_kmem_cache_cpu(c, cpu);
+               }
+       }
+}
+
+static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
+{
+       int cpu;
+
+       for_each_online_cpu(cpu) {
+               struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+
+               if (c)
+                       continue;
+
+               c = alloc_kmem_cache_cpu(s, cpu, flags);
+               if (!c) {
+                       free_kmem_cache_cpus(s);
+                       return 0;
+               }
+               s->cpu_slab[cpu] = c;
+       }
+       return 1;
+}
+
+/*
+ * Initialize the per cpu array.
+ */
+static void init_alloc_cpu_cpu(int cpu)
+{
+       int i;
+
+       if (cpu_isset(cpu, kmem_cach_cpu_free_init_once))
+               return;
+
+       for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
+               free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
+
+       cpu_set(cpu, kmem_cach_cpu_free_init_once);
+}
+
+static void __init init_alloc_cpu(void)
+{
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               init_alloc_cpu_cpu(cpu);
+  }
+
+#else
+static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
+static inline void init_alloc_cpu(void) {}
+
+static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
+{
+       init_kmem_cache_cpu(s, &s->cpu_slab);
+       return 1;
+}
+#endif
+
 #ifdef CONFIG_NUMA
 /*
  * No kmalloc_node yet so do it by hand. We know that this is the first
@@ -1886,7 +2006,8 @@ static void init_kmem_cache_node(struct kmem_cache_node *n)
  * possible.
  *
  * Note that this function only works on the kmalloc_node_cache
- * when allocating for the kmalloc_node_cache.
+ * when allocating for the kmalloc_node_cache. This is used for bootstrapping
+ * memory on a fresh node that has no slab structures yet.
  */
 static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
                                                           int node)
@@ -2115,6 +2236,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
 
        if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
                return 1;
+       free_kmem_cache_nodes(s);
 error:
        if (flags & SLAB_PANIC)
                panic("Cannot create slab %s size=%lu realsize=%u "
@@ -2197,6 +2319,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
        flush_all(s);
 
        /* Attempt to free all objects */
+       free_kmem_cache_cpus(s);
        for_each_node_state(node, N_NORMAL_MEMORY) {
                struct kmem_cache_node *n = get_node(s, node);
 
@@ -2584,6 +2707,8 @@ void __init kmem_cache_init(void)
        int i;
        int caches = 0;
 
+       init_alloc_cpu();
+
 #ifdef CONFIG_NUMA
        /*
         * Must first have the slab cache available for the allocations of the
@@ -2644,10 +2769,12 @@ void __init kmem_cache_init(void)
 
 #ifdef CONFIG_SMP
        register_cpu_notifier(&slab_notifier);
+       kmem_size = offsetof(struct kmem_cache, cpu_slab) +
+                               nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
+#else
+       kmem_size = sizeof(struct kmem_cache);
 #endif
 
-       kmem_size = offsetof(struct kmem_cache, cpu_slab) +
-                               nr_cpu_ids * sizeof(struct kmem_cache_cpu);
 
        printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
                " CPUs=%d, Nodes=%d\n",
@@ -2726,12 +2853,21 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
        down_write(&slub_lock);
        s = find_mergeable(size, align, flags, name, ctor);
        if (s) {
+               int cpu;
+
                s->refcount++;
                /*
                 * Adjust the object sizes so that we clear
                 * the complete object on kzalloc.
                 */
                s->objsize = max(s->objsize, (int)size);
+
+               /*
+                * And then we need to update the object size in the
+                * per cpu structures
+                */
+               for_each_online_cpu(cpu)
+                       get_cpu_slab(s, cpu)->objsize = s->objsize;
                s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
                up_write(&slub_lock);
                if (sysfs_slab_alias(s, name))
@@ -2774,15 +2910,29 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
        unsigned long flags;
 
        switch (action) {
+       case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
+               init_alloc_cpu_cpu(cpu);
+               down_read(&slub_lock);
+               list_for_each_entry(s, &slab_caches, list)
+                       s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
+                                                       GFP_KERNEL);
+               up_read(&slub_lock);
+               break;
+
        case CPU_UP_CANCELED:
        case CPU_UP_CANCELED_FROZEN:
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
                down_read(&slub_lock);
                list_for_each_entry(s, &slab_caches, list) {
+                       struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+
                        local_irq_save(flags);
                        __flush_cpu_slab(s, cpu);
                        local_irq_restore(flags);
+                       free_kmem_cache_cpu(c, cpu);
+                       s->cpu_slab[cpu] = NULL;
                }
                up_read(&slub_lock);
                break;
@@ -3135,7 +3285,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
        int node;
 
        if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
-                       GFP_KERNEL))
+                       GFP_TEMPORARY))
                return sprintf(buf, "Out of memory\n");
 
        /* Push back cpu slabs */