X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-x86%2Fspinlock.h;h=21e89bf92f1c8cd547b697fcc37021388d8115e2;hb=2ad3cfbac58d0a6c6e65aafd9e0e757ca3d35292;hp=23804c1890ffcd0780ba4c6171896db99d46a658;hpb=acf63867ae06ef95eea7bf445ded2f05528a81b1;p=linux-2.6-omap-h63xx.git diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index 23804c1890f..21e89bf92f1 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h @@ -20,18 +20,8 @@ */ #ifdef CONFIG_X86_32 -typedef char _slock_t; -# define LOCK_INS_DEC "decb" -# define LOCK_INS_XCH "xchgb" -# define LOCK_INS_MOV "movb" -# define LOCK_INS_CMP "cmpb" # define LOCK_PTR_REG "a" #else -typedef int _slock_t; -# define LOCK_INS_DEC "decl" -# define LOCK_INS_XCH "xchgl" -# define LOCK_INS_MOV "movl" -# define LOCK_INS_CMP "cmpl" # define LOCK_PTR_REG "D" #endif @@ -66,23 +56,23 @@ typedef int _slock_t; #if (NR_CPUS < 256) static inline int __raw_spin_is_locked(raw_spinlock_t *lock) { - int tmp = *(volatile signed int *)(&(lock)->slock); + int tmp = ACCESS_ONCE(lock->slock); return (((tmp >> 8) & 0xff) != (tmp & 0xff)); } static inline int __raw_spin_is_contended(raw_spinlock_t *lock) { - int tmp = *(volatile signed int *)(&(lock)->slock); + int tmp = ACCESS_ONCE(lock->slock); return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) { short inc = 0x0100; - __asm__ __volatile__ ( + asm volatile ( LOCK_PREFIX "xaddw %w0, %1\n" "1:\t" "cmpb %h0, %b0\n\t" @@ -92,112 +82,107 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) /* don't need lfence here, because loads are in-order */ "jmp 1b\n" "2:" - :"+Q" (inc), "+m" (lock->slock) + : "+Q" (inc), "+m" (lock->slock) : - :"memory", "cc"); + : "memory", "cc"); } #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) { int tmp; short new; - asm volatile( - "movw %2,%w0\n\t" - "cmpb %h0,%b0\n\t" - "jne 1f\n\t" - "movw %w0,%w1\n\t" - "incb %h1\n\t" - "lock ; cmpxchgw %w1,%2\n\t" - "1:" - "sete %b1\n\t" - "movzbl %b1,%0\n\t" - :"=&a" (tmp), "=Q" (new), "+m" (lock->slock) - : - : "memory", "cc"); + asm volatile("movw %2,%w0\n\t" + "cmpb %h0,%b0\n\t" + "jne 1f\n\t" + "movw %w0,%w1\n\t" + "incb %h1\n\t" + "lock ; cmpxchgw %w1,%2\n\t" + "1:" + "sete %b1\n\t" + "movzbl %b1,%0\n\t" + : "=&a" (tmp), "=Q" (new), "+m" (lock->slock) + : + : "memory", "cc"); return tmp; } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) { - __asm__ __volatile__( - UNLOCK_LOCK_PREFIX "incb %0" - :"+m" (lock->slock) - : - :"memory", "cc"); + asm volatile(UNLOCK_LOCK_PREFIX "incb %0" + : "+m" (lock->slock) + : + : "memory", "cc"); } #else static inline int __raw_spin_is_locked(raw_spinlock_t *lock) { - int tmp = *(volatile signed int *)(&(lock)->slock); + int tmp = ACCESS_ONCE(lock->slock); return (((tmp >> 16) & 0xffff) != (tmp & 0xffff)); } static inline int __raw_spin_is_contended(raw_spinlock_t *lock) { - int tmp = *(volatile signed int *)(&(lock)->slock); + int tmp = ACCESS_ONCE(lock->slock); return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) { int inc = 0x00010000; int tmp; - __asm__ __volatile__ ( - "lock ; xaddl %0, %1\n" - "movzwl %w0, %2\n\t" - "shrl $16, %0\n\t" - "1:\t" - "cmpl %0, %2\n\t" - "je 2f\n\t" - "rep ; nop\n\t" - "movzwl %1, %2\n\t" - /* don't need lfence here, because loads are in-order */ - "jmp 1b\n" - "2:" - :"+Q" (inc), "+m" (lock->slock), "=r" (tmp) - : - :"memory", "cc"); + asm volatile("lock ; xaddl %0, %1\n" + "movzwl %w0, %2\n\t" + "shrl $16, %0\n\t" + "1:\t" + "cmpl %0, %2\n\t" + "je 2f\n\t" + "rep ; nop\n\t" + "movzwl %1, %2\n\t" + /* don't need lfence here, because loads are in-order */ + "jmp 1b\n" + "2:" + : "+Q" (inc), "+m" (lock->slock), "=r" (tmp) + : + : "memory", "cc"); } #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) { int tmp; int new; - asm volatile( - "movl %2,%0\n\t" - "movl %0,%1\n\t" - "roll $16, %0\n\t" - "cmpl %0,%1\n\t" - "jne 1f\n\t" - "addl $0x00010000, %1\n\t" - "lock ; cmpxchgl %1,%2\n\t" - "1:" - "sete %b1\n\t" - "movzbl %b1,%0\n\t" - :"=&a" (tmp), "=r" (new), "+m" (lock->slock) - : - : "memory", "cc"); + asm volatile("movl %2,%0\n\t" + "movl %0,%1\n\t" + "roll $16, %0\n\t" + "cmpl %0,%1\n\t" + "jne 1f\n\t" + "addl $0x00010000, %1\n\t" + "lock ; cmpxchgl %1,%2\n\t" + "1:" + "sete %b1\n\t" + "movzbl %b1,%0\n\t" + : "=&a" (tmp), "=r" (new), "+m" (lock->slock) + : + : "memory", "cc"); return tmp; } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) { - __asm__ __volatile__( - UNLOCK_LOCK_PREFIX "incw %0" - :"+m" (lock->slock) - : - :"memory", "cc"); + asm volatile(UNLOCK_LOCK_PREFIX "incw %0" + : "+m" (lock->slock) + : + : "memory", "cc"); } #endif