* Copyright 1992, Linus Torvalds.
*/
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
#include <asm/alternative.h>
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static __inline__ void set_bit(int nr, volatile void * addr)
+static inline void set_bit(int nr, volatile void *addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btsl %1,%0"
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static __inline__ void __set_bit(int nr, volatile void * addr)
+static inline void __set_bit(int nr, volatile void *addr)
{
__asm__ volatile(
"btsl %1,%0"
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
-static __inline__ void clear_bit(int nr, volatile void * addr)
+static inline void clear_bit(int nr, volatile void *addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btrl %1,%0"
:"dIr" (nr));
}
-static __inline__ void __clear_bit(int nr, volatile void * addr)
+/*
+ * clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and implies release semantics before the memory
+ * operation. It can be used for an unlock.
+ */
+static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
+{
+ barrier();
+ clear_bit(nr, addr);
+}
+
+static inline void __clear_bit(int nr, volatile void *addr)
{
__asm__ __volatile__(
"btrl %1,%0"
:"dIr" (nr));
}
+/*
+ * __clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * __clear_bit() is non-atomic and implies release semantics before the memory
+ * operation. It can be used for an unlock if no other CPUs can concurrently
+ * modify other bits in the word.
+ *
+ * No memory barrier is required here, because x86 cannot reorder stores past
+ * older loads. Same principle as spin_unlock.
+ */
+static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
+{
+ barrier();
+ __clear_bit(nr, addr);
+}
+
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static __inline__ void __change_bit(int nr, volatile void * addr)
+static inline void __change_bit(int nr, volatile void *addr)
{
__asm__ __volatile__(
"btcl %1,%0"
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static __inline__ void change_bit(int nr, volatile void * addr)
+static inline void change_bit(int nr, volatile void *addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btcl %1,%0"
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static __inline__ int test_and_set_bit(int nr, volatile void * addr)
+static inline int test_and_set_bit(int nr, volatile void *addr)
{
int oldbit;
return oldbit;
}
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This is the same as test_and_set_bit on x86.
+ */
+static inline int test_and_set_bit_lock(int nr, volatile void *addr)
+{
+ return test_and_set_bit(nr, addr);
+}
+
/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
+static inline int __test_and_set_bit(int nr, volatile void *addr)
{
int oldbit;
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+static inline int test_and_clear_bit(int nr, volatile void *addr)
{
int oldbit;
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
+static inline int __test_and_clear_bit(int nr, volatile void *addr)
{
int oldbit;
}
/* WARNING: non atomic and it can be reordered! */
-static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
+static inline int __test_and_change_bit(int nr, volatile void *addr)
{
int oldbit;
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static __inline__ int test_and_change_bit(int nr, volatile void * addr)
+static inline int test_and_change_bit(int nr, volatile void *addr)
{
int oldbit;
* @nr: bit number to test
* @addr: Address to start counting from
*/
-static int test_bit(int nr, const volatile void * addr);
+static int test_bit(int nr, const volatile void *addr);
#endif
-static __inline__ int constant_test_bit(int nr, const volatile void * addr)
+static inline int constant_test_bit(int nr, const volatile void *addr)
{
return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
}
-static __inline__ int variable_test_bit(int nr, volatile const void * addr)
+static inline int variable_test_bit(int nr, volatile const void *addr)
{
int oldbit;
#undef ADDR
-extern long find_first_zero_bit(const unsigned long * addr, unsigned long size);
-extern long find_next_zero_bit (const unsigned long * addr, long size, long offset);
-extern long find_first_bit(const unsigned long * addr, unsigned long size);
-extern long find_next_bit(const unsigned long * addr, long size, long offset);
+extern long find_first_zero_bit(const unsigned long *addr, unsigned long size);
+extern long find_next_zero_bit(const unsigned long *addr, long size, long offset);
+extern long find_first_bit(const unsigned long *addr, unsigned long size);
+extern long find_next_bit(const unsigned long *addr, long size, long offset);
/* return index of first bet set in val or max when no bit is set */
-static inline unsigned long __scanbit(unsigned long val, unsigned long max)
+static inline long __scanbit(unsigned long val, unsigned long max)
{
asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
return val;
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
-static __inline__ unsigned long ffz(unsigned long word)
+static inline unsigned long ffz(unsigned long word)
{
__asm__("bsfq %1,%0"
:"=r" (word)
*
* Undefined if no bit exists, so code should check against 0 first.
*/
-static __inline__ unsigned long __ffs(unsigned long word)
+static inline unsigned long __ffs(unsigned long word)
{
__asm__("bsfq %1,%0"
:"=r" (word)
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
-static __inline__ unsigned long __fls(unsigned long word)
+static inline unsigned long __fls(unsigned long word)
{
__asm__("bsrq %1,%0"
:"=r" (word)
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
-static __inline__ int ffs(int x)
+static inline int ffs(int x)
{
int r;
*
* This is defined the same way as fls.
*/
-static __inline__ int fls64(__u64 x)
+static inline int fls64(__u64 x)
{
if (x == 0)
return 0;
*
* This is defined the same way as ffs.
*/
-static __inline__ int fls(int x)
+static inline int fls(int x)
{
int r;