X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-arm%2Fspinlock.h;h=406ca97a8ab29f99d381565ab3c9b745c1f147a3;hb=ab76fb13d73488ded53c87b77b1b5e38df2acf74;hp=cb4906b4555583d36c0b3b91f94d2d9d9a4724c4;hpb=5833f1420b96c4f9b193b7f2fcbc0003dc032fe8;p=linux-2.6-omap-h63xx.git diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h index cb4906b4555..406ca97a8ab 100644 --- a/include/asm-arm/spinlock.h +++ b/include/asm-arm/spinlock.h @@ -30,6 +30,9 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) __asm__ __volatile__( "1: ldrex %0, [%1]\n" " teq %0, #0\n" +#ifdef CONFIG_CPU_32v6K +" wfene\n" +#endif " strexeq %0, %2, [%1]\n" " teqeq %0, #0\n" " bne 1b" @@ -65,7 +68,11 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) smp_mb(); __asm__ __volatile__( -" str %1, [%0]" +" str %1, [%0]\n" +#ifdef CONFIG_CPU_32v6K +" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ +" sev" +#endif : : "r" (&lock->lock), "r" (0) : "cc"); @@ -80,13 +87,16 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) */ #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) -static inline void __raw_write_lock(rwlock_t *rw) +static inline void __raw_write_lock(raw_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__( "1: ldrex %0, [%1]\n" " teq %0, #0\n" +#ifdef CONFIG_CPU_32v6K +" wfene\n" +#endif " strexeq %0, %2, [%1]\n" " teq %0, #0\n" " bne 1b" @@ -97,7 +107,7 @@ static inline void __raw_write_lock(rwlock_t *rw) smp_mb(); } -static inline int __raw_write_trylock(rwlock_t *rw) +static inline int __raw_write_trylock(raw_rwlock_t *rw) { unsigned long tmp; @@ -122,12 +132,19 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) smp_mb(); __asm__ __volatile__( - "str %1, [%0]" + "str %1, [%0]\n" +#ifdef CONFIG_CPU_32v6K +" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ +" sev\n" +#endif : : "r" (&rw->lock), "r" (0) : "cc"); } +/* write_can_lock - would write_trylock() succeed? */ +#define __raw_write_can_lock(x) ((x)->lock == 0x80000000) + /* * Read locks are a bit more hairy: * - Exclusively load the lock value. @@ -148,6 +165,9 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) "1: ldrex %0, [%2]\n" " adds %0, %0, #1\n" " strexpl %1, %0, [%2]\n" +#ifdef CONFIG_CPU_32v6K +" wfemi\n" +#endif " rsbpls %0, %1, #0\n" " bmi 1b" : "=&r" (tmp), "=&r" (tmp2) @@ -157,7 +177,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) smp_mb(); } -static inline void __raw_read_unlock(rwlock_t *rw) +static inline void __raw_read_unlock(raw_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -169,6 +189,11 @@ static inline void __raw_read_unlock(rwlock_t *rw) " strex %1, %0, [%2]\n" " teq %1, #0\n" " bne 1b" +#ifdef CONFIG_CPU_32v6K +"\n cmp %0, #0\n" +" mcreq p15, 0, %0, c7, c10, 4\n" +" seveq" +#endif : "=&r" (tmp), "=&r" (tmp2) : "r" (&rw->lock) : "cc"); @@ -176,4 +201,7 @@ static inline void __raw_read_unlock(rwlock_t *rw) #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) +/* read_can_lock - would read_trylock() succeed? */ +#define __raw_read_can_lock(x) ((x)->lock < 0x80000000) + #endif /* __ASM_SPINLOCK_H */