1 #ifndef _X86_SPINLOCK_H_
2 #define _X86_SPINLOCK_H_
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
7 #include <asm/processor.h>
8 #include <linux/compiler.h>
11 * Your basic SMP spinlocks, allowing only a single CPU anywhere
13 * Simple spin lock operations. There are two variants, one clears IRQ's
14 * on the local processor, one does not.
16 * These are fair FIFO ticket locks, which are currently limited to 256
19 * (the type definitions are in asm/spinlock_types.h)
23 typedef char _slock_t;
24 # define LOCK_INS_DEC "decb"
25 # define LOCK_INS_XCH "xchgb"
26 # define LOCK_INS_MOV "movb"
27 # define LOCK_INS_CMP "cmpb"
28 # define LOCK_PTR_REG "a"
31 # define LOCK_INS_DEC "decl"
32 # define LOCK_INS_XCH "xchgl"
33 # define LOCK_INS_MOV "movl"
34 # define LOCK_INS_CMP "cmpl"
35 # define LOCK_PTR_REG "D"
39 #error spinlock supports a maximum of 256 CPUs
42 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
44 int tmp = *(volatile signed int *)(&(lock)->slock);
46 return (((tmp >> 8) & 0xff) != (tmp & 0xff));
49 static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
51 int tmp = *(volatile signed int *)(&(lock)->slock);
53 return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
56 static inline void __raw_spin_lock(raw_spinlock_t *lock)
61 * Ticket locks are conceptually two bytes, one indicating the current
62 * head of the queue, and the other indicating the current tail. The
63 * lock is acquired by atomically noting the tail and incrementing it
64 * by one (thus adding ourself to the queue and noting our position),
65 * then waiting until the head becomes equal to the the initial value
68 * This uses a 16-bit xadd to increment the tail and also load the
69 * position of the head, which takes care of memory ordering issues
70 * and should be optimal for the uncontended case. Note the tail must
71 * be in the high byte, otherwise the 16-bit wide increment of the low
72 * byte would carry up and contaminate the high byte.
75 __asm__ __volatile__ (
76 LOCK_PREFIX "xaddw %w0, %1\n"
82 /* don't need lfence here, because loads are in-order */
85 :"+Q" (inc), "+m" (lock->slock)
90 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
92 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
103 "lock ; cmpxchgw %w1,%2\n\t"
107 :"=&a" (tmp), "=Q" (new), "+m" (lock->slock)
114 #if defined(CONFIG_X86_32) && \
115 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
117 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
118 * (PPro errata 66, 92)
120 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
122 # define UNLOCK_LOCK_PREFIX
125 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
127 __asm__ __volatile__(
128 UNLOCK_LOCK_PREFIX "incb %0"
134 static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
136 while (__raw_spin_is_locked(lock))
141 * Read-write spinlocks, allowing multiple readers
142 * but only one writer.
144 * NOTE! it is quite common to have readers in interrupts
145 * but no interrupt writers. For those circumstances we
146 * can "mix" irq-safe locks - any writer needs to get a
147 * irq-safe write-lock, but readers can get non-irqsafe
150 * On x86, we implement read-write locks as a 32-bit counter
151 * with the high bit (sign) being the "contended" bit.
155 * read_can_lock - would read_trylock() succeed?
156 * @lock: the rwlock in question.
158 static inline int __raw_read_can_lock(raw_rwlock_t *lock)
160 return (int)(lock)->lock > 0;
164 * write_can_lock - would write_trylock() succeed?
165 * @lock: the rwlock in question.
167 static inline int __raw_write_can_lock(raw_rwlock_t *lock)
169 return (lock)->lock == RW_LOCK_BIAS;
172 static inline void __raw_read_lock(raw_rwlock_t *rw)
174 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
176 "call __read_lock_failed\n\t"
178 ::LOCK_PTR_REG (rw) : "memory");
181 static inline void __raw_write_lock(raw_rwlock_t *rw)
183 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
185 "call __write_lock_failed\n\t"
187 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
190 static inline int __raw_read_trylock(raw_rwlock_t *lock)
192 atomic_t *count = (atomic_t *)lock;
195 if (atomic_read(count) >= 0)
201 static inline int __raw_write_trylock(raw_rwlock_t *lock)
203 atomic_t *count = (atomic_t *)lock;
205 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
207 atomic_add(RW_LOCK_BIAS, count);
211 static inline void __raw_read_unlock(raw_rwlock_t *rw)
213 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
216 static inline void __raw_write_unlock(raw_rwlock_t *rw)
218 asm volatile(LOCK_PREFIX "addl %1, %0"
219 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
222 #define _raw_spin_relax(lock) cpu_relax()
223 #define _raw_read_relax(lock) cpu_relax()
224 #define _raw_write_relax(lock) cpu_relax()