]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/workqueue.c
lguest: turn Waker into a thread, not a process
[linux-2.6-omap-h63xx.git] / kernel / workqueue.c
index 828e58230cbcd2e591d9f32e61d00a8881fc9216..ec7e4f62aaff4e71051eefb7d684e8ff4868a5ad 100644 (file)
@@ -159,14 +159,11 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
  */
 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
 {
-       int ret = 0;
+       int ret;
+
+       ret = queue_work_on(get_cpu(), wq, work);
+       put_cpu();
 
-       if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
-               BUG_ON(!list_empty(&work->entry));
-               __queue_work(wq_per_cpu(wq, get_cpu()), work);
-               put_cpu();
-               ret = 1;
-       }
        return ret;
 }
 EXPORT_SYMBOL_GPL(queue_work);
@@ -427,6 +424,8 @@ EXPORT_SYMBOL_GPL(flush_workqueue);
  * flush_work - block until a work_struct's callback has terminated
  * @work: the work which is to be flushed
  *
+ * Returns false if @work has already terminated.
+ *
  * It is expected that, prior to calling flush_work(), the caller has
  * arranged for the work to not be requeued, otherwise it doesn't make
  * sense to use this function.
@@ -442,6 +441,9 @@ int flush_work(struct work_struct *work)
        if (!cwq)
                return 0;
 
+       lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+       lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+
        prev = NULL;
        spin_lock_irq(&cwq->lock);
        if (!list_empty(&work->entry)) {
@@ -687,8 +689,7 @@ int schedule_on_each_cpu(work_func_t func)
                struct work_struct *work = per_cpu_ptr(works, cpu);
 
                INIT_WORK(work, func);
-               set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
-               __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
+               schedule_work_on(cpu, work);
        }
        for_each_online_cpu(cpu)
                flush_work(per_cpu_ptr(works, cpu));
@@ -910,6 +911,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
        unsigned int cpu = (unsigned long)hcpu;
        struct cpu_workqueue_struct *cwq;
        struct workqueue_struct *wq;
+       int ret = NOTIFY_OK;
 
        action &= ~CPU_TASKS_FROZEN;
 
@@ -917,7 +919,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
        case CPU_UP_PREPARE:
                cpu_set(cpu, cpu_populated_map);
        }
-
+undo:
        list_for_each_entry(wq, &workqueues, list) {
                cwq = per_cpu_ptr(wq->cpu_wq, cpu);
 
@@ -927,7 +929,9 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
                                break;
                        printk(KERN_ERR "workqueue [%s] for %i failed\n",
                                wq->name, cpu);
-                       return NOTIFY_BAD;
+                       action = CPU_UP_CANCELED;
+                       ret = NOTIFY_BAD;
+                       goto undo;
 
                case CPU_ONLINE:
                        start_workqueue_thread(cwq, cpu);
@@ -947,7 +951,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
                cpu_clear(cpu, cpu_populated_map);
        }
 
-       return NOTIFY_OK;
+       return ret;
 }
 
 void __init init_workqueues(void)