2 * Split spinlock implementation out into its own file, so it can be
3 * compiled in a FTRACE-compatible way.
5 #include <linux/kernel_stat.h>
6 #include <linux/spinlock.h>
8 #include <asm/paravirt.h>
10 #include <xen/interface/xen.h>
11 #include <xen/events.h>
16 unsigned char lock; /* 0 -> free; 1 -> locked */
17 unsigned short spinners; /* count of waiting cpus */
20 static int xen_spin_is_locked(struct raw_spinlock *lock)
22 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
27 static int xen_spin_is_contended(struct raw_spinlock *lock)
29 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
31 /* Not strictly true; this is only the count of contended
32 lock-takers entering the slow path. */
33 return xl->spinners != 0;
36 static int xen_spin_trylock(struct raw_spinlock *lock)
38 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
42 : "+q" (old), "+m" (xl->lock) : : "memory");
47 static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
48 static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
50 static inline void spinning_lock(struct xen_spinlock *xl)
52 __get_cpu_var(lock_spinners) = xl;
53 wmb(); /* set lock of interest before count */
54 asm(LOCK_PREFIX " incw %0"
55 : "+m" (xl->spinners) : : "memory");
58 static inline void unspinning_lock(struct xen_spinlock *xl)
60 asm(LOCK_PREFIX " decw %0"
61 : "+m" (xl->spinners) : : "memory");
62 wmb(); /* decrement count before clearing lock */
63 __get_cpu_var(lock_spinners) = NULL;
66 static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
68 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
69 int irq = __get_cpu_var(lock_kicker_irq);
72 /* If kicker interrupts not initialized yet, just spin */
76 /* announce we're spinning */
80 xen_clear_irq_pending(irq);
82 /* check again make sure it didn't become free while
84 ret = xen_spin_trylock(lock);
88 /* block until irq becomes pending */
90 kstat_this_cpu.irqs[irq]++;
97 static void xen_spin_lock(struct raw_spinlock *lock)
99 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
106 asm("1: xchgb %1,%0\n"
115 : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
119 } while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock)));
122 static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
126 for_each_online_cpu(cpu) {
127 /* XXX should mix up next cpu selection */
128 if (per_cpu(lock_spinners, cpu) == xl) {
129 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
135 static void xen_spin_unlock(struct raw_spinlock *lock)
137 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
139 smp_wmb(); /* make sure no writes get moved after unlock */
140 xl->lock = 0; /* release lock */
142 /* make sure unlock happens before kick */
145 if (unlikely(xl->spinners))
146 xen_spin_unlock_slow(xl);
149 static irqreturn_t dummy_handler(int irq, void *dev_id)
155 void __cpuinit xen_init_lock_cpu(int cpu)
160 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
161 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
164 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
169 disable_irq(irq); /* make sure it's never delivered */
170 per_cpu(lock_kicker_irq, cpu) = irq;
173 printk("cpu %d spinlock event irq %d\n", cpu, irq);
176 void __init xen_init_spinlocks(void)
178 pv_lock_ops.spin_is_locked = xen_spin_is_locked;
179 pv_lock_ops.spin_is_contended = xen_spin_is_contended;
180 pv_lock_ops.spin_lock = xen_spin_lock;
181 pv_lock_ops.spin_trylock = xen_spin_trylock;
182 pv_lock_ops.spin_unlock = xen_spin_unlock;