#endif
 
 int cpu_up(unsigned int cpu);
-
 extern void cpu_hotplug_init(void);
+extern void cpu_maps_update_begin(void);
+extern void cpu_maps_update_done(void);
 
-#else
+#else  /* CONFIG_SMP */
 
 static inline int register_cpu_notifier(struct notifier_block *nb)
 {
 {
 }
 
+static inline void cpu_maps_update_begin(void)
+{
+}
+
+static inline void cpu_maps_update_done(void)
+{
+}
+
 #endif /* CONFIG_SMP */
 extern struct sysdev_class cpu_sysdev_class;
-extern void cpu_maps_update_begin(void);
-extern void cpu_maps_update_done(void);
 
 #ifdef CONFIG_HOTPLUG_CPU
 /* Stop CPUs going up and down. */
 
 #define CPU_DEAD               0x0007 /* CPU (unsigned)v dead */
 #define CPU_DYING              0x0008 /* CPU (unsigned)v not running any task,
                                        * not handling interrupts, soon dead */
+#define CPU_POST_DEAD          0x0009 /* CPU (unsigned)v dead, cpu_hotplug
+                                       * lock is dropped */
 
 /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
  * operation in progress
 
                err = create_workqueue_thread(cwq, singlethread_cpu);
                start_workqueue_thread(cwq, -1);
        } else {
-               get_online_cpus();
+               cpu_maps_update_begin();
                spin_lock(&workqueue_lock);
                list_add(&wq->list, &workqueues);
                spin_unlock(&workqueue_lock);
                        err = create_workqueue_thread(cwq, cpu);
                        start_workqueue_thread(cwq, cpu);
                }
-               put_online_cpus();
+               cpu_maps_update_done();
        }
 
        if (err) {
 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
 {
        /*
-        * Our caller is either destroy_workqueue() or CPU_DEAD,
-        * get_online_cpus() protects cwq->thread.
+        * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
+        * cpu_add_remove_lock protects cwq->thread.
         */
        if (cwq->thread == NULL)
                return;
 
        flush_cpu_workqueue(cwq);
        /*
-        * If the caller is CPU_DEAD and cwq->worklist was not empty,
+        * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
         * a concurrent flush_workqueue() can insert a barrier after us.
         * However, in that case run_workqueue() won't return and check
         * kthread_should_stop() until it flushes all work_struct's.
        const cpumask_t *cpu_map = wq_cpu_map(wq);
        int cpu;
 
-       get_online_cpus();
+       cpu_maps_update_begin();
        spin_lock(&workqueue_lock);
        list_del(&wq->list);
        spin_unlock(&workqueue_lock);
 
        for_each_cpu_mask_nr(cpu, *cpu_map)
                cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
-       put_online_cpus();
+       cpu_maps_update_done();
 
        free_percpu(wq->cpu_wq);
        kfree(wq);
 
                case CPU_UP_CANCELED:
                        start_workqueue_thread(cwq, -1);
-               case CPU_DEAD:
+               case CPU_POST_DEAD:
                        cleanup_workqueue_thread(cwq);
                        break;
                }
 
        switch (action) {
        case CPU_UP_CANCELED:
-       case CPU_DEAD:
+       case CPU_POST_DEAD:
                cpu_clear(cpu, cpu_populated_map);
        }