X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-arm%2Fspinlock.h;h=406ca97a8ab29f99d381565ab3c9b745c1f147a3;hb=ab76fb13d73488ded53c87b77b1b5e38df2acf74;hp=1f906d09b6880f2ac04e9bb0dedf3dd0eafdafee;hpb=c0d6f9663b30a09ed725229b2d50391268c8538e;p=linux-2.6-omap-h63xx.git diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h index 1f906d09b68..406ca97a8ab 100644 --- a/include/asm-arm/spinlock.h +++ b/include/asm-arm/spinlock.h @@ -16,27 +16,23 @@ * Unlocked value: 0 * Locked value: 1 */ -typedef struct { - volatile unsigned int lock; -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} spinlock_t; -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } +#define __raw_spin_is_locked(x) ((x)->lock != 0) +#define __raw_spin_unlock_wait(lock) \ + do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) -#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0) -#define spin_is_locked(x) ((x)->lock != 0) -#define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x)) -#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) +#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -static inline void _raw_spin_lock(spinlock_t *lock) +static inline void __raw_spin_lock(raw_spinlock_t *lock) { unsigned long tmp; __asm__ __volatile__( "1: ldrex %0, [%1]\n" " teq %0, #0\n" +#ifdef CONFIG_CPU_32v6K +" wfene\n" +#endif " strexeq %0, %2, [%1]\n" " teqeq %0, #0\n" " bne 1b" @@ -47,7 +43,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) smp_mb(); } -static inline int _raw_spin_trylock(spinlock_t *lock) +static inline int __raw_spin_trylock(raw_spinlock_t *lock) { unsigned long tmp; @@ -67,12 +63,16 @@ static inline int _raw_spin_trylock(spinlock_t *lock) } } -static inline void _raw_spin_unlock(spinlock_t *lock) +static inline void __raw_spin_unlock(raw_spinlock_t *lock) { smp_mb(); __asm__ __volatile__( -" str %1, [%0]" +" str %1, [%0]\n" +#ifdef CONFIG_CPU_32v6K +" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ +" sev" +#endif : : "r" (&lock->lock), "r" (0) : "cc"); @@ -80,29 +80,23 @@ static inline void _raw_spin_unlock(spinlock_t *lock) /* * RWLOCKS - */ -typedef struct { - volatile unsigned int lock; -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} rwlock_t; - -#define RW_LOCK_UNLOCKED (rwlock_t) { 0 } -#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0) -#define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) - -/* + * + * * Write locks are easy - we just set bit 31. When unlocking, we can * just write zero since the lock is exclusively held. */ -static inline void _raw_write_lock(rwlock_t *rw) +#define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) + +static inline void __raw_write_lock(raw_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__( "1: ldrex %0, [%1]\n" " teq %0, #0\n" +#ifdef CONFIG_CPU_32v6K +" wfene\n" +#endif " strexeq %0, %2, [%1]\n" " teq %0, #0\n" " bne 1b" @@ -113,7 +107,7 @@ static inline void _raw_write_lock(rwlock_t *rw) smp_mb(); } -static inline int _raw_write_trylock(rwlock_t *rw) +static inline int __raw_write_trylock(raw_rwlock_t *rw) { unsigned long tmp; @@ -133,17 +127,24 @@ static inline int _raw_write_trylock(rwlock_t *rw) } } -static inline void _raw_write_unlock(rwlock_t *rw) +static inline void __raw_write_unlock(raw_rwlock_t *rw) { smp_mb(); __asm__ __volatile__( - "str %1, [%0]" + "str %1, [%0]\n" +#ifdef CONFIG_CPU_32v6K +" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ +" sev\n" +#endif : : "r" (&rw->lock), "r" (0) : "cc"); } +/* write_can_lock - would write_trylock() succeed? */ +#define __raw_write_can_lock(x) ((x)->lock == 0x80000000) + /* * Read locks are a bit more hairy: * - Exclusively load the lock value. @@ -156,7 +157,7 @@ static inline void _raw_write_unlock(rwlock_t *rw) * currently active. However, we know we won't have any write * locks. */ -static inline void _raw_read_lock(rwlock_t *rw) +static inline void __raw_read_lock(raw_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -164,6 +165,9 @@ static inline void _raw_read_lock(rwlock_t *rw) "1: ldrex %0, [%2]\n" " adds %0, %0, #1\n" " strexpl %1, %0, [%2]\n" +#ifdef CONFIG_CPU_32v6K +" wfemi\n" +#endif " rsbpls %0, %1, #0\n" " bmi 1b" : "=&r" (tmp), "=&r" (tmp2) @@ -173,7 +177,7 @@ static inline void _raw_read_lock(rwlock_t *rw) smp_mb(); } -static inline void _raw_read_unlock(rwlock_t *rw) +static inline void __raw_read_unlock(raw_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -185,11 +189,19 @@ static inline void _raw_read_unlock(rwlock_t *rw) " strex %1, %0, [%2]\n" " teq %1, #0\n" " bne 1b" +#ifdef CONFIG_CPU_32v6K +"\n cmp %0, #0\n" +" mcreq p15, 0, %0, c7, c10, 4\n" +" seveq" +#endif : "=&r" (tmp), "=&r" (tmp2) : "r" (&rw->lock) : "cc"); } -#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) +#define __raw_read_trylock(lock) generic__raw_read_trylock(lock) + +/* read_can_lock - would read_trylock() succeed? */ +#define __raw_read_can_lock(x) ((x)->lock < 0x80000000) #endif /* __ASM_SPINLOCK_H */