]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/workqueue.c
[PATCH] kill ->put_inode
[linux-2.6-omap-h63xx.git] / kernel / workqueue.c
index 1ad0ee489cd14f7239ce74d07fb3f15fdbd2765f..29fc39f1029cec92528b0cb66380028670b9eee1 100644 (file)
@@ -195,7 +195,6 @@ static void delayed_work_timer_fn(unsigned long __data)
 int queue_delayed_work(struct workqueue_struct *wq,
                        struct delayed_work *dwork, unsigned long delay)
 {
-       timer_stats_timer_set_start_info(&dwork->timer);
        if (delay == 0)
                return queue_work(wq, &dwork->work);
 
@@ -219,11 +218,12 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
        struct timer_list *timer = &dwork->timer;
        struct work_struct *work = &dwork->work;
 
-       timer_stats_timer_set_start_info(&dwork->timer);
        if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
                BUG_ON(timer_pending(timer));
                BUG_ON(!list_empty(&work->entry));
 
+               timer_stats_timer_set_start_info(&dwork->timer);
+
                /* This stores cwq for the moment, for the timer_fn */
                set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
                timer->expires = jiffies + delay;
@@ -247,7 +247,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
        if (cwq->run_depth > 3) {
                /* morton gets to eat his hat */
                printk("%s: recursion depth exceeded: %d\n",
-                       __FUNCTION__, cwq->run_depth);
+                       __func__, cwq->run_depth);
                dump_stack();
        }
        while (!list_empty(&cwq->worklist)) {
@@ -564,7 +564,6 @@ EXPORT_SYMBOL(schedule_work);
 int schedule_delayed_work(struct delayed_work *dwork,
                                        unsigned long delay)
 {
-       timer_stats_timer_set_start_info(&dwork->timer);
        return queue_delayed_work(keventd_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work);
@@ -581,7 +580,6 @@ EXPORT_SYMBOL(schedule_delayed_work);
 int schedule_delayed_work_on(int cpu,
                        struct delayed_work *dwork, unsigned long delay)
 {
-       timer_stats_timer_set_start_info(&dwork->timer);
        return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work_on);
@@ -772,7 +770,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
 }
 EXPORT_SYMBOL_GPL(__create_workqueue_key);
 
-static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
+static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
 {
        /*
         * Our caller is either destroy_workqueue() or CPU_DEAD,
@@ -808,7 +806,6 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
 void destroy_workqueue(struct workqueue_struct *wq)
 {
        const cpumask_t *cpu_map = wq_cpu_map(wq);
-       struct cpu_workqueue_struct *cwq;
        int cpu;
 
        get_online_cpus();
@@ -816,10 +813,8 @@ void destroy_workqueue(struct workqueue_struct *wq)
        list_del(&wq->list);
        spin_unlock(&workqueue_lock);
 
-       for_each_cpu_mask(cpu, *cpu_map) {
-               cwq = per_cpu_ptr(wq->cpu_wq, cpu);
-               cleanup_workqueue_thread(cwq, cpu);
-       }
+       for_each_cpu_mask(cpu, *cpu_map)
+               cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
        put_online_cpus();
 
        free_percpu(wq->cpu_wq);
@@ -860,7 +855,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
                case CPU_UP_CANCELED:
                        start_workqueue_thread(cwq, -1);
                case CPU_DEAD:
-                       cleanup_workqueue_thread(cwq, cpu);
+                       cleanup_workqueue_thread(cwq);
                        break;
                }
        }