EXPORT_SYMBOL(_local_bh_enable);
-void local_bh_enable(void)
+static inline void _local_bh_enable_ip(unsigned long ip)
{
+ WARN_ON_ONCE(in_irq() || irqs_disabled());
#ifdef CONFIG_TRACE_IRQFLAGS
- unsigned long flags;
-
- WARN_ON_ONCE(in_irq());
-#endif
- WARN_ON_ONCE(irqs_disabled());
-
-#ifdef CONFIG_TRACE_IRQFLAGS
- local_irq_save(flags);
+ local_irq_disable();
#endif
/*
* Are softirqs going to be turned on now:
*/
if (softirq_count() == SOFTIRQ_OFFSET)
- trace_softirqs_on((unsigned long)__builtin_return_address(0));
+ trace_softirqs_on(ip);
/*
* Keep preemption disabled until we are done with
* softirq processing:
dec_preempt_count();
#ifdef CONFIG_TRACE_IRQFLAGS
- local_irq_restore(flags);
+ local_irq_enable();
#endif
preempt_check_resched();
}
+
+void local_bh_enable(void)
+{
+ _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
EXPORT_SYMBOL(local_bh_enable);
void local_bh_enable_ip(unsigned long ip)
{
-#ifdef CONFIG_TRACE_IRQFLAGS
- unsigned long flags;
-
- WARN_ON_ONCE(in_irq());
-
- local_irq_save(flags);
-#endif
- /*
- * Are softirqs going to be turned on now:
- */
- if (softirq_count() == SOFTIRQ_OFFSET)
- trace_softirqs_on(ip);
- /*
- * Keep preemption disabled until we are done with
- * softirq processing:
- */
- sub_preempt_count(SOFTIRQ_OFFSET - 1);
-
- if (unlikely(!in_interrupt() && local_softirq_pending()))
- do_softirq();
-
- dec_preempt_count();
-#ifdef CONFIG_TRACE_IRQFLAGS
- local_irq_restore(flags);
-#endif
- preempt_check_resched();
+ _local_bh_enable_ip(ip);
}
EXPORT_SYMBOL(local_bh_enable_ip);
local_irq_restore(flags);
}
-void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
+void open_softirq(int nr, void (*action)(struct softirq_action *))
{
- softirq_vec[nr].data = data;
softirq_vec[nr].action = action;
}
struct tasklet_struct **tail;
};
-/* Some compilers disobey section attribute on statics when not
- initialized -- RR */
-static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
-static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
+static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
+static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
void __tasklet_schedule(struct tasklet_struct *t)
{
&per_cpu(tasklet_hi_vec, cpu).head;
}
- open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
- open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
+ open_softirq(TASKLET_SOFTIRQ, tasklet_action);
+ open_softirq(HI_SOFTIRQ, tasklet_hi_action);
}
static int ksoftirqd(void * __bind_cpu)
p = per_cpu(ksoftirqd, hotcpu);
per_cpu(ksoftirqd, hotcpu) = NULL;
- sched_setscheduler(p, SCHED_FIFO, ¶m);
+ sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);
kthread_stop(p);
takeover_tasklets(hotcpu);
break;
.notifier_call = cpu_callback
};
-__init int spawn_ksoftirqd(void)
+static __init int spawn_ksoftirqd(void)
{
void *cpu = (void *)(long)smp_processor_id();
int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
register_cpu_notifier(&cpu_nfb);
return 0;
}
+early_initcall(spawn_ksoftirqd);
#ifdef CONFIG_SMP
/*
* Call a function on all processors
*/
-int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait)
+int on_each_cpu(void (*func) (void *info), void *info, int wait)
{
int ret = 0;
preempt_disable();
- ret = smp_call_function(func, info, retry, wait);
+ ret = smp_call_function(func, info, wait);
local_irq_disable();
func(info);
local_irq_enable();