}
}
#else
-#define alloc_alien_cache(node, limit) do { } while (0)
+
#define drain_alien_cache(cachep, alien) do { } while (0)
+static inline struct array_cache **alloc_alien_cache(int node, int limit)
+{
+ return (struct array_cache **) 0x01020304ul;
+}
+
static inline void free_alien_cache(struct array_cache **ac_ptr)
{
}
+
#endif
static int __devinit cpuup_callback(struct notifier_block *nfb,
0xbaadf00d);
if (!shared)
goto bad;
-#ifdef CONFIG_NUMA
+
alien = alloc_alien_cache(node, cachep->limit);
if (!alien)
goto bad;
-#endif
cachep->array[cpu] = nc;
l3 = cachep->nodelists[node];
BUG();
}
+ /*
+ * Prevent CPUs from coming and going.
+ * lock_cpu_hotplug() nests outside cache_chain_mutex
+ */
+ lock_cpu_hotplug();
+
mutex_lock(&cache_chain_mutex);
list_for_each(p, &cache_chain) {
cachep->dtor = dtor;
cachep->name = name;
- /* Don't let CPUs to come and go */
- lock_cpu_hotplug();
if (g_cpucache_up == FULL) {
enable_cpucache(cachep);
/* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain);
- unlock_cpu_hotplug();
oops:
if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n",
name);
mutex_unlock(&cache_chain_mutex);
+ unlock_cpu_hotplug();
return cachep;
}
EXPORT_SYMBOL(kmem_cache_create);