X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=kernel%2Fworkqueue.c;h=721093a22561100bd626a9e16981c768ead6c40f;hb=a58730c42174672fe0012a4edbe3e38f94ef2bad;hp=52db48e7f6e79b5dc78da41903dc162c5e97d755;hpb=7924e4f6519dd5b349ed146fe9fe35206730be67;p=linux-2.6-omap-h63xx.git diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 52db48e7f6e..721093a2256 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -158,10 +158,10 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, * * Returns 0 if @work was already on a queue, non-zero otherwise. * - * We queue the work to the CPU it was submitted, but there is no - * guarantee that it will be processed by that CPU. + * We queue the work to the CPU on which it was submitted, but if the CPU dies + * it can be processed by another CPU. */ -int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) +int queue_work(struct workqueue_struct *wq, struct work_struct *work) { int ret = 0; @@ -175,7 +175,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) } EXPORT_SYMBOL_GPL(queue_work); -void delayed_work_timer_fn(unsigned long __data) +static void delayed_work_timer_fn(unsigned long __data) { struct delayed_work *dwork = (struct delayed_work *)__data; struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); @@ -192,7 +192,7 @@ void delayed_work_timer_fn(unsigned long __data) * * Returns 0 if @work was already on a queue, non-zero otherwise. */ -int fastcall queue_delayed_work(struct workqueue_struct *wq, +int queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) { timer_stats_timer_set_start_info(&dwork->timer); @@ -219,6 +219,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct timer_list *timer = &dwork->timer; struct work_struct *work = &dwork->work; + timer_stats_timer_set_start_info(&dwork->timer); if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); @@ -246,7 +247,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) if (cwq->run_depth > 3) { /* morton gets to eat his hat */ printk("%s: recursion depth exceeded: %d\n", - __FUNCTION__, cwq->run_depth); + __func__, cwq->run_depth); dump_stack(); } while (!list_empty(&cwq->worklist)) { @@ -388,7 +389,7 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) * This function used to run the workqueues itself. Now we just wait for the * helper threads to do it. */ -void fastcall flush_workqueue(struct workqueue_struct *wq) +void flush_workqueue(struct workqueue_struct *wq) { const cpumask_t *cpu_map = wq_cpu_map(wq); int cpu; @@ -546,7 +547,7 @@ static struct workqueue_struct *keventd_wq __read_mostly; * * This puts a job in the kernel-global workqueue. */ -int fastcall schedule_work(struct work_struct *work) +int schedule_work(struct work_struct *work) { return queue_work(keventd_wq, work); } @@ -560,7 +561,7 @@ EXPORT_SYMBOL(schedule_work); * After waiting for a given time this puts a job in the kernel-global * workqueue. */ -int fastcall schedule_delayed_work(struct delayed_work *dwork, +int schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) { timer_stats_timer_set_start_info(&dwork->timer); @@ -580,6 +581,7 @@ EXPORT_SYMBOL(schedule_delayed_work); int schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay) { + timer_stats_timer_set_start_info(&dwork->timer); return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); } EXPORT_SYMBOL(schedule_delayed_work_on); @@ -770,7 +772,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, } EXPORT_SYMBOL_GPL(__create_workqueue_key); -static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) +static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) { /* * Our caller is either destroy_workqueue() or CPU_DEAD, @@ -806,19 +808,16 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) void destroy_workqueue(struct workqueue_struct *wq) { const cpumask_t *cpu_map = wq_cpu_map(wq); - struct cpu_workqueue_struct *cwq; int cpu; get_online_cpus(); spin_lock(&workqueue_lock); list_del(&wq->list); spin_unlock(&workqueue_lock); - put_online_cpus(); - for_each_cpu_mask(cpu, *cpu_map) { - cwq = per_cpu_ptr(wq->cpu_wq, cpu); - cleanup_workqueue_thread(cwq, cpu); - } + for_each_cpu_mask(cpu, *cpu_map) + cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); + put_online_cpus(); free_percpu(wq->cpu_wq); kfree(wq); @@ -836,7 +835,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, action &= ~CPU_TASKS_FROZEN; switch (action) { - case CPU_UP_PREPARE: cpu_set(cpu, cpu_populated_map); } @@ -859,11 +857,17 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, case CPU_UP_CANCELED: start_workqueue_thread(cwq, -1); case CPU_DEAD: - cleanup_workqueue_thread(cwq, cpu); + cleanup_workqueue_thread(cwq); break; } } + switch (action) { + case CPU_UP_CANCELED: + case CPU_DEAD: + cpu_clear(cpu, cpu_populated_map); + } + return NOTIFY_OK; }