static int wakeup_cpu;
static unsigned wakeup_prio = -1;
-static DEFINE_SPINLOCK(wakeup_lock);
+static raw_spinlock_t wakeup_lock =
+ (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
static void __wakeup_reset(struct trace_array *tr);
if (unlikely(disabled != 1))
goto out;
- spin_lock_irqsave(&wakeup_lock, flags);
+ local_irq_save(flags);
+ __raw_spin_lock(&wakeup_lock);
if (unlikely(!wakeup_task))
goto unlock;
trace_function(tr, data, ip, parent_ip, flags);
unlock:
- spin_unlock_irqrestore(&wakeup_lock, flags);
+ __raw_spin_unlock(&wakeup_lock);
+ local_irq_restore(flags);
out:
atomic_dec(&data->disabled);
if (likely(disabled != 1))
goto out;
- spin_lock_irqsave(&wakeup_lock, flags);
+ local_irq_save(flags);
+ __raw_spin_lock(&wakeup_lock);
/* We could race with grabbing wakeup_lock */
if (unlikely(!tracer_enabled || next != wakeup_task))
out_unlock:
__wakeup_reset(tr);
- spin_unlock_irqrestore(&wakeup_lock, flags);
+ __raw_spin_unlock(&wakeup_lock);
+ local_irq_restore(flags);
out:
atomic_dec(&tr->data[cpu]->disabled);
}
struct trace_array_cpu *data;
int cpu;
- assert_spin_locked(&wakeup_lock);
-
for_each_possible_cpu(cpu) {
data = tr->data[cpu];
tracing_reset(data);
{
unsigned long flags;
- spin_lock_irqsave(&wakeup_lock, flags);
+ local_irq_save(flags);
+ __raw_spin_lock(&wakeup_lock);
__wakeup_reset(tr);
- spin_unlock_irqrestore(&wakeup_lock, flags);
+ __raw_spin_unlock(&wakeup_lock);
+ local_irq_restore(flags);
}
static void
goto out;
/* interrupts should be off from try_to_wake_up */
- spin_lock(&wakeup_lock);
+ __raw_spin_lock(&wakeup_lock);
/* check for races. */
if (!tracer_enabled || p->prio >= wakeup_prio)
CALLER_ADDR1, CALLER_ADDR2, flags);
out_locked:
- spin_unlock(&wakeup_lock);
+ __raw_spin_unlock(&wakeup_lock);
out:
atomic_dec(&tr->data[cpu]->disabled);
}