]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/workqueue.c
[PATCH] N32 sigset and __COMPAT_ENDIAN_SWAP__
[linux-2.6-omap-h63xx.git] / kernel / workqueue.c
index b052e2c4c71053720e87606e20924cfec5f3f27c..f869aff6bc0c6aafaa2b3c5ef350851797276adb 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/cpu.h>
 #include <linux/notifier.h>
 #include <linux/kthread.h>
+#include <linux/hardirq.h>
 
 /*
  * The per-CPU workqueue (if single thread, we always use the first
@@ -427,22 +428,34 @@ int schedule_delayed_work_on(int cpu,
        return ret;
 }
 
-int schedule_on_each_cpu(void (*func) (void *info), void *info)
+/**
+ * schedule_on_each_cpu - call a function on each online CPU from keventd
+ * @func: the function to call
+ * @info: a pointer to pass to func()
+ *
+ * Returns zero on success.
+ * Returns -ve errno on failure.
+ *
+ * Appears to be racy against CPU hotplug.
+ *
+ * schedule_on_each_cpu() is very slow.
+ */
+int schedule_on_each_cpu(void (*func)(void *info), void *info)
 {
        int cpu;
-       struct work_struct *work;
-
-       work = kmalloc(NR_CPUS * sizeof(struct work_struct), GFP_KERNEL);
+       struct work_struct *works;
 
-       if (!work)
+       works = alloc_percpu(struct work_struct);
+       if (!works)
                return -ENOMEM;
+
        for_each_online_cpu(cpu) {
-               INIT_WORK(work + cpu, func, info);
+               INIT_WORK(per_cpu_ptr(works, cpu), func, info);
                __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
-                               work + cpu);
+                               per_cpu_ptr(works, cpu));
        }
        flush_workqueue(keventd_wq);
-       kfree(work);
+       free_percpu(works);
        return 0;
 }
 
@@ -476,6 +489,34 @@ void cancel_rearming_delayed_work(struct work_struct *work)
 }
 EXPORT_SYMBOL(cancel_rearming_delayed_work);
 
+/**
+ * execute_in_process_context - reliably execute the routine with user context
+ * @fn:                the function to execute
+ * @data:      data to pass to the function
+ * @ew:                guaranteed storage for the execute work structure (must
+ *             be available when the work executes)
+ *
+ * Executes the function immediately if process context is available,
+ * otherwise schedules the function for delayed execution.
+ *
+ * Returns:    0 - function was executed
+ *             1 - function was scheduled for execution
+ */
+int execute_in_process_context(void (*fn)(void *data), void *data,
+                              struct execute_work *ew)
+{
+       if (!in_interrupt()) {
+               fn(data);
+               return 0;
+       }
+
+       INIT_WORK(&ew->work, fn, data);
+       schedule_work(&ew->work);
+
+       return 1;
+}
+EXPORT_SYMBOL_GPL(execute_in_process_context);
+
 int keventd_up(void)
 {
        return keventd_wq != NULL;
@@ -502,11 +543,11 @@ int current_is_keventd(void)
 static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
 {
        struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
-       LIST_HEAD(list);
+       struct list_head list;
        struct work_struct *work;
 
        spin_lock_irq(&cwq->lock);
-       list_splice_init(&cwq->worklist, &list);
+       list_replace_init(&cwq->worklist, &list);
 
        while (!list_empty(&list)) {
                printk("Taking work for %s\n", wq->name);
@@ -518,7 +559,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
 }
 
 /* We're holding the cpucontrol mutex here */
-static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
+static int workqueue_cpu_callback(struct notifier_block *nfb,
                                  unsigned long action,
                                  void *hcpu)
 {