#define CPU_DOWN_PREPARE       0x0005 /* CPU (unsigned)v going down */
 #define CPU_DOWN_FAILED                0x0006 /* CPU (unsigned)v NOT going down */
 #define CPU_DEAD               0x0007 /* CPU (unsigned)v dead */
-#define CPU_LOCK_ACQUIRE       0x0008 /* Acquire all hotcpu locks */
-#define CPU_LOCK_RELEASE       0x0009 /* Release all hotcpu locks */
-#define CPU_DYING              0x000A /* CPU (unsigned)v not running any task,
+#define CPU_DYING              0x0008 /* CPU (unsigned)v not running any task,
                                        * not handling interrupts, soon dead */
 
 /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
 
                return -EINVAL;
 
        cpu_hotplug_begin();
-       raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu);
        err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
                                        hcpu, -1, &nr_calls);
        if (err == NOTIFY_BAD) {
 out_allowed:
        set_cpus_allowed(current, old_allowed);
 out_release:
-       raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu);
        cpu_hotplug_done();
        return err;
 }
                return -EINVAL;
 
        cpu_hotplug_begin();
-       raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu);
        ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
                                                        -1, &nr_calls);
        if (ret == NOTIFY_BAD) {
        if (ret != 0)
                __raw_notifier_call_chain(&cpu_chain,
                                CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
-       raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu);
        cpu_hotplug_done();
 
        return ret;
 
 };
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-static DEFINE_MUTEX(sched_hotcpu_mutex);
 
 static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
 {
        struct task_struct *p;
        int retval;
 
-       mutex_lock(&sched_hotcpu_mutex);
+       get_online_cpus();
        read_lock(&tasklist_lock);
 
        p = find_process_by_pid(pid);
        if (!p) {
                read_unlock(&tasklist_lock);
-               mutex_unlock(&sched_hotcpu_mutex);
+               put_online_cpus();
                return -ESRCH;
        }
 
        }
 out_unlock:
        put_task_struct(p);
-       mutex_unlock(&sched_hotcpu_mutex);
+       put_online_cpus();
        return retval;
 }
 
        struct task_struct *p;
        int retval;
 
-       mutex_lock(&sched_hotcpu_mutex);
+       get_online_cpus();
        read_lock(&tasklist_lock);
 
        retval = -ESRCH;
 
 out_unlock:
        read_unlock(&tasklist_lock);
-       mutex_unlock(&sched_hotcpu_mutex);
+       put_online_cpus();
 
        return retval;
 }
        struct rq *rq;
 
        switch (action) {
-       case CPU_LOCK_ACQUIRE:
-               mutex_lock(&sched_hotcpu_mutex);
-               break;
 
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
                spin_unlock_irq(&rq->lock);
                break;
 #endif
-       case CPU_LOCK_RELEASE:
-               mutex_unlock(&sched_hotcpu_mutex);
-               break;
        }
        return NOTIFY_OK;
 }
 {
        int err;
 
-       mutex_lock(&sched_hotcpu_mutex);
+       get_online_cpus();
        detach_destroy_domains(&cpu_online_map);
        err = arch_init_sched_domains(&cpu_online_map);
-       mutex_unlock(&sched_hotcpu_mutex);
+       put_online_cpus();
 
        return err;
 }
 {
        cpumask_t non_isolated_cpus;
 
-       mutex_lock(&sched_hotcpu_mutex);
+       get_online_cpus();
        arch_init_sched_domains(&cpu_online_map);
        cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
        if (cpus_empty(non_isolated_cpus))
                cpu_set(smp_processor_id(), non_isolated_cpus);
-       mutex_unlock(&sched_hotcpu_mutex);
+       put_online_cpus();
        /* XXX: Theoretical race here - CPU may be hotplugged now */
        hotcpu_notifier(update_sched_domains, 0);
 
 
 #endif
 };
 
-/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
-   threads to each one as cpus come/go. */
-static DEFINE_MUTEX(workqueue_mutex);
+/* Serializes the accesses to the list of workqueues. */
+static DEFINE_SPINLOCK(workqueue_lock);
 static LIST_HEAD(workqueues);
 
 static int singlethread_cpu __read_mostly;
  * Returns zero on success.
  * Returns -ve errno on failure.
  *
- * Appears to be racy against CPU hotplug.
- *
  * schedule_on_each_cpu() is very slow.
  */
 int schedule_on_each_cpu(work_func_t func)
        if (!works)
                return -ENOMEM;
 
-       preempt_disable();              /* CPU hotplug */
+       get_online_cpus();
        for_each_online_cpu(cpu) {
                struct work_struct *work = per_cpu_ptr(works, cpu);
 
                set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
                __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
        }
-       preempt_enable();
        flush_workqueue(keventd_wq);
+       put_online_cpus();
        free_percpu(works);
        return 0;
 }
                err = create_workqueue_thread(cwq, singlethread_cpu);
                start_workqueue_thread(cwq, -1);
        } else {
-               mutex_lock(&workqueue_mutex);
+               get_online_cpus();
+               spin_lock(&workqueue_lock);
                list_add(&wq->list, &workqueues);
+               spin_unlock(&workqueue_lock);
 
                for_each_possible_cpu(cpu) {
                        cwq = init_cpu_workqueue(wq, cpu);
                        err = create_workqueue_thread(cwq, cpu);
                        start_workqueue_thread(cwq, cpu);
                }
-               mutex_unlock(&workqueue_mutex);
+               put_online_cpus();
        }
 
        if (err) {
 {
        /*
         * Our caller is either destroy_workqueue() or CPU_DEAD,
-        * workqueue_mutex protects cwq->thread
+        * get_online_cpus() protects cwq->thread.
         */
        if (cwq->thread == NULL)
                return;
        struct cpu_workqueue_struct *cwq;
        int cpu;
 
-       mutex_lock(&workqueue_mutex);
+       get_online_cpus();
+       spin_lock(&workqueue_lock);
        list_del(&wq->list);
-       mutex_unlock(&workqueue_mutex);
+       spin_unlock(&workqueue_lock);
+       put_online_cpus();
 
        for_each_cpu_mask(cpu, *cpu_map) {
                cwq = per_cpu_ptr(wq->cpu_wq, cpu);
        action &= ~CPU_TASKS_FROZEN;
 
        switch (action) {
-       case CPU_LOCK_ACQUIRE:
-               mutex_lock(&workqueue_mutex);
-               return NOTIFY_OK;
-
-       case CPU_LOCK_RELEASE:
-               mutex_unlock(&workqueue_mutex);
-               return NOTIFY_OK;
 
        case CPU_UP_PREPARE:
                cpu_set(cpu, cpu_populated_map);
                case CPU_UP_PREPARE:
                        if (!create_workqueue_thread(cwq, cpu))
                                break;
-                       printk(KERN_ERR "workqueue for %i failed\n", cpu);
+                       printk(KERN_ERR "workqueue [%s] for %i failed\n",
+                               wq->name, cpu);
                        return NOTIFY_BAD;
 
                case CPU_ONLINE:
 
 #endif
 
 /*
- * 1. Guard access to the cache-chain.
- * 2. Protect sanity of cpu_online_map against cpu hotplug events
+ * Guard access to the cache-chain.
  */
 static DEFINE_MUTEX(cache_chain_mutex);
 static struct list_head cache_chain;
        int err = 0;
 
        switch (action) {
-       case CPU_LOCK_ACQUIRE:
-               mutex_lock(&cache_chain_mutex);
-               break;
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
+               mutex_lock(&cache_chain_mutex);
                err = cpuup_prepare(cpu);
+               mutex_unlock(&cache_chain_mutex);
                break;
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
 #endif
        case CPU_UP_CANCELED:
        case CPU_UP_CANCELED_FROZEN:
+               mutex_lock(&cache_chain_mutex);
                cpuup_canceled(cpu);
-               break;
-       case CPU_LOCK_RELEASE:
                mutex_unlock(&cache_chain_mutex);
                break;
        }
         * We use cache_chain_mutex to ensure a consistent view of
         * cpu_online_map as well.  Please see cpuup_callback
         */
+       get_online_cpus();
        mutex_lock(&cache_chain_mutex);
 
        list_for_each_entry(pc, &cache_chain, next) {
                panic("kmem_cache_create(): failed to create slab `%s'\n",
                      name);
        mutex_unlock(&cache_chain_mutex);
+       put_online_cpus();
        return cachep;
 }
 EXPORT_SYMBOL(kmem_cache_create);
        int ret;
        BUG_ON(!cachep || in_interrupt());
 
+       get_online_cpus();
        mutex_lock(&cache_chain_mutex);
        ret = __cache_shrink(cachep);
        mutex_unlock(&cache_chain_mutex);
+       put_online_cpus();
        return ret;
 }
 EXPORT_SYMBOL(kmem_cache_shrink);
        BUG_ON(!cachep || in_interrupt());
 
        /* Find the cache in the chain of caches. */
+       get_online_cpus();
        mutex_lock(&cache_chain_mutex);
        /*
         * the chain is never empty, cache_cache is never destroyed
                slab_error(cachep, "Can't free all objects");
                list_add(&cachep->next, &cache_chain);
                mutex_unlock(&cache_chain_mutex);
+               put_online_cpus();
                return;
        }
 
 
        __kmem_cache_destroy(cachep);
        mutex_unlock(&cache_chain_mutex);
+       put_online_cpus();
 }
 EXPORT_SYMBOL(kmem_cache_destroy);