X-Git-Url: http://pilppa.org/gitweb/?a=blobdiff_plain;f=include%2Fasm-x86%2Fsync_bitops.h;h=b47a1d0b8a834719866a713dadf65a28fd566a4c;hb=4b7227ca321ccf447cdc04538687c895db8b77f5;hp=cbce08a2d135e22b721bbfe7a643a3f01e3f321f;hpb=e3d18658d4f28e4783e1bb1c41e9134c9e5db0a9;p=linux-2.6-omap-h63xx.git diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h index cbce08a2d13..b47a1d0b8a8 100644 --- a/include/asm-x86/sync_bitops.h +++ b/include/asm-x86/sync_bitops.h @@ -13,7 +13,7 @@ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ -#define ADDR (*(volatile long *) addr) +#define ADDR (*(volatile long *)addr) /** * sync_set_bit - Atomically set a bit in memory @@ -23,19 +23,15 @@ * This function is atomic and may not be reordered. See __set_bit() * if you do not require the atomic guarantees. * - * Note: there are no guarantees that this function will not be reordered - * on non-x86 architectures, so if you are writing portable code, - * make sure not to rely on its reordering guarantees. - * * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void sync_set_bit(int nr, volatile unsigned long * addr) +static inline void sync_set_bit(int nr, volatile unsigned long *addr) { - __asm__ __volatile__("lock; btsl %1,%0" - :"+m" (ADDR) - :"Ir" (nr) - : "memory"); + asm volatile("lock; btsl %1,%0" + : "+m" (ADDR) + : "Ir" (nr) + : "memory"); } /** @@ -48,12 +44,12 @@ static inline void sync_set_bit(int nr, volatile unsigned long * addr) * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ -static inline void sync_clear_bit(int nr, volatile unsigned long * addr) +static inline void sync_clear_bit(int nr, volatile unsigned long *addr) { - __asm__ __volatile__("lock; btrl %1,%0" - :"+m" (ADDR) - :"Ir" (nr) - : "memory"); + asm volatile("lock; btrl %1,%0" + : "+m" (ADDR) + : "Ir" (nr) + : "memory"); } /** @@ -61,17 +57,16 @@ static inline void sync_clear_bit(int nr, volatile unsigned long * addr) * @nr: Bit to change * @addr: Address to start counting from * - * change_bit() is atomic and may not be reordered. It may be - * reordered on other architectures than x86. + * sync_change_bit() is atomic and may not be reordered. * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void sync_change_bit(int nr, volatile unsigned long * addr) +static inline void sync_change_bit(int nr, volatile unsigned long *addr) { - __asm__ __volatile__("lock; btcl %1,%0" - :"+m" (ADDR) - :"Ir" (nr) - : "memory"); + asm volatile("lock; btcl %1,%0" + : "+m" (ADDR) + : "Ir" (nr) + : "memory"); } /** @@ -80,16 +75,15 @@ static inline void sync_change_bit(int nr, volatile unsigned long * addr) * @addr: Address to count from * * This operation is atomic and cannot be reordered. - * It may be reordered on other architectures than x86. * It also implies a memory barrier. */ -static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) +static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr) { int oldbit; - __asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); + asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit), "+m" (ADDR) + : "Ir" (nr) : "memory"); return oldbit; } @@ -99,16 +93,15 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) * @addr: Address to count from * * This operation is atomic and cannot be reordered. - * It can be reorderdered on other architectures other than x86. * It also implies a memory barrier. */ -static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) +static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr) { int oldbit; - __asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); + asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit), "+m" (ADDR) + : "Ir" (nr) : "memory"); return oldbit; } @@ -120,36 +113,17 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr) -{ - int oldbit; - - __asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); - return oldbit; -} - -static __always_inline int sync_constant_test_bit(int nr, const volatile unsigned long *addr) -{ - return ((1UL << (nr & 31)) & - (((const volatile unsigned int *)addr)[nr >> 5])) != 0; -} - -static inline int sync_var_test_bit(int nr, const volatile unsigned long * addr) +static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr) { int oldbit; - __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit) - :"m" (ADDR),"Ir" (nr)); + asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit), "+m" (ADDR) + : "Ir" (nr) : "memory"); return oldbit; } -#define sync_test_bit(nr,addr) \ - (__builtin_constant_p(nr) ? \ - sync_constant_test_bit((nr),(addr)) : \ - sync_var_test_bit((nr),(addr))) +#define sync_test_bit(nr, addr) test_bit(nr, addr) #undef ADDR