]> pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
bitops: introduce lock ops
authorNick Piggin <npiggin@suse.de>
Thu, 18 Oct 2007 10:06:39 +0000 (03:06 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Thu, 18 Oct 2007 21:37:29 +0000 (14:37 -0700)
Introduce test_and_set_bit_lock / clear_bit_unlock bitops with lock semantics.
Convert all architectures to use the generic implementation.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-By: David Howells <dhowells@redhat.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Haavard Skinnemoen <hskinnemoen@atmel.com>
Cc: Bryan Wu <bryan.wu@analog.com>
Cc: Mikael Starvik <starvik@axis.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Hirokazu Takata <takata@linux-m32r.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: Greg Ungerer <gerg@uclinux.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Matthew Wilcox <willy@debian.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Kazumoto Kojima <kkojima@rr.iij4u.or.jp>
Cc: Richard Curnow <rc@rc0.org.uk>
Cc: William Lee Irwin III <wli@holomorphy.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Cc: Miles Bader <uclinux-v850@lsi.nec.co.jp>
Cc: Andi Kleen <ak@muc.de>
Cc: Chris Zankel <chris@zankel.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
27 files changed:
Documentation/atomic_ops.txt
Documentation/memory-barriers.txt
include/asm-alpha/bitops.h
include/asm-arm/bitops.h
include/asm-avr32/bitops.h
include/asm-blackfin/bitops.h
include/asm-cris/bitops.h
include/asm-frv/bitops.h
include/asm-generic/bitops.h
include/asm-generic/bitops/lock.h [new file with mode: 0644]
include/asm-h8300/bitops.h
include/asm-ia64/bitops.h
include/asm-m32r/bitops.h
include/asm-m68k/bitops.h
include/asm-m68knommu/bitops.h
include/asm-mips/bitops.h
include/asm-parisc/bitops.h
include/asm-powerpc/bitops.h
include/asm-s390/bitops.h
include/asm-sh/bitops.h
include/asm-sh64/bitops.h
include/asm-sparc/bitops.h
include/asm-sparc64/bitops.h
include/asm-v850/bitops.h
include/asm-x86/bitops_32.h
include/asm-x86/bitops_64.h
include/asm-xtensa/bitops.h

index d46306fea230cda4a21ee19da45337ab39898918..f20c10c2858fc7c60f3003a77c905bf108dfddd7 100644 (file)
@@ -418,6 +418,20 @@ brothers:
         */
         smp_mb__after_clear_bit();
 
+There are two special bitops with lock barrier semantics (acquire/release,
+same as spinlocks). These operate in the same way as their non-_lock/unlock
+postfixed variants, except that they are to provide acquire/release semantics,
+respectively. This means they can be used for bit_spin_trylock and
+bit_spin_unlock type operations without specifying any more barriers.
+
+       int test_and_set_bit_lock(unsigned long nr, unsigned long *addr);
+       void clear_bit_unlock(unsigned long nr, unsigned long *addr);
+       void __clear_bit_unlock(unsigned long nr, unsigned long *addr);
+
+The __clear_bit_unlock version is non-atomic, however it still implements
+unlock barrier semantics. This can be useful if the lock itself is protecting
+the other bits in the word.
+
 Finally, there are non-atomic versions of the bitmask operations
 provided.  They are used in contexts where some other higher-level SMP
 locking scheme is being used to protect the bitmask, and thus less
index 650657c5473340dcd8f5ffe5f097c6776a754441..4e17beba237902908b33d38b764c285fec433724 100644 (file)
@@ -1479,7 +1479,8 @@ kernel.
 
 Any atomic operation that modifies some state in memory and returns information
 about the state (old or new) implies an SMP-conditional general memory barrier
-(smp_mb()) on each side of the actual operation.  These include:
+(smp_mb()) on each side of the actual operation (with the exception of
+explicit lock operations, described later).  These include:
 
        xchg();
        cmpxchg();
@@ -1536,10 +1537,19 @@ If they're used for constructing a lock of some description, then they probably
 do need memory barriers as a lock primitive generally has to do things in a
 specific order.
 
-
 Basically, each usage case has to be carefully considered as to whether memory
 barriers are needed or not.
 
+The following operations are special locking primitives:
+
+       test_and_set_bit_lock();
+       clear_bit_unlock();
+       __clear_bit_unlock();
+
+These implement LOCK-class and UNLOCK-class operations. These should be used in
+preference to other operations when implementing locking primitives, because
+their implementations can be optimised on many architectures.
+
 [!] Note that special memory barrier primitives are available for these
 situations because on some CPUs the atomic instructions used imply full memory
 barriers, and so barrier instructions are superfluous in conjunction with them,
index 9e71201000d58eb8943650bc14c938aca1850513..ca667d121898ff61806bc60d4abab4d277f742ba 100644 (file)
@@ -367,6 +367,7 @@ static inline unsigned int hweight8(unsigned int w)
 #else
 #include <asm-generic/bitops/hweight.h>
 #endif
+#include <asm-generic/bitops/lock.h>
 
 #endif /* __KERNEL__ */
 
index b41831b6432fb44f7967720b31e892e58005ed82..52fe05895debb77159a13f1eb68f05f907799aee 100644 (file)
@@ -286,6 +286,7 @@ static inline int constant_fls(int x)
 
 #include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 
 /*
  * Ext2 is defined to use little-endian byte ordering.
index 5299f8c8e11dc71d99c068d66a2848fb1c73b7f3..f3faddfd46a8b924bd40ef7eb544797fc56bd229 100644 (file)
@@ -288,6 +288,7 @@ static inline int ffs(unsigned long word)
 #include <asm-generic/bitops/fls64.h>
 #include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 
 #include <asm-generic/bitops/ext2-non-atomic.h>
 #include <asm-generic/bitops/ext2-atomic.h>
index 27c2d0e48e1b6b314e88f18b274dd15c41541017..03ecedc1f2a74089838768efc21ba495b62f2a67 100644 (file)
@@ -199,6 +199,7 @@ static __inline__ int __test_bit(int nr, const void *addr)
 
 #include <asm-generic/bitops/find.h>
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 
 #include <asm-generic/bitops/ext2-atomic.h>
 #include <asm-generic/bitops/ext2-non-atomic.h>
index a569065113d916671f7562c946a8c7690de60fd7..617151b9b72bb52cde9433c825ac0829b879ba7b 100644 (file)
@@ -154,6 +154,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
 #include <asm-generic/bitops/fls64.h>
 #include <asm-generic/bitops/hweight.h>
 #include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/lock.h>
 
 #include <asm-generic/bitops/ext2-non-atomic.h>
 
index f8560edf59ff8f30859b78c7142f461803bee1ee..8dba74b1a2549e10a520ae9766151f4e613c77a6 100644 (file)
@@ -302,6 +302,7 @@ int __ilog2_u64(u64 n)
 
 #include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 
 #include <asm-generic/bitops/ext2-non-atomic.h>
 
index 1f9d99193df8ef368c70d8abb18ddcf3a885f91a..e022a0f59e6bb8707268466922adcb849e9e284e 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/ffs.h>
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 
 #include <asm-generic/bitops/ext2-non-atomic.h>
 #include <asm-generic/bitops/ext2-atomic.h>
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
new file mode 100644 (file)
index 0000000..308a9e2
--- /dev/null
@@ -0,0 +1,45 @@
+#ifndef _ASM_GENERIC_BITOPS_LOCK_H_
+#define _ASM_GENERIC_BITOPS_LOCK_H_
+
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and provides acquire barrier semantics.
+ * It can be used to implement bit locks.
+ */
+#define test_and_set_bit_lock(nr, addr)        test_and_set_bit(nr, addr)
+
+/**
+ * clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is atomic and provides release barrier semantics.
+ */
+#define clear_bit_unlock(nr, addr)     \
+do {                                   \
+       smp_mb__before_clear_bit();     \
+       clear_bit(nr, addr);            \
+} while (0)
+
+/**
+ * __clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is like clear_bit_unlock, however it is not atomic.
+ * It does provide release barrier semantics so it can be used to unlock
+ * a bit lock, however it would only be used if no other CPU can modify
+ * any bits in the memory until the lock is released (a good example is
+ * if the bit lock itself protects access to the other bits in the word).
+ */
+#define __clear_bit_unlock(nr, addr)   \
+do {                                   \
+       smp_mb();                       \
+       __clear_bit(nr, addr);          \
+} while (0)
+
+#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
+
index d76299c98b817ad0c492a276f79262bb22fea8d8..e64ad315656dfea57dbe90bfb14eb775b25657a2 100644 (file)
@@ -194,6 +194,7 @@ static __inline__ unsigned long __ffs(unsigned long word)
 #include <asm-generic/bitops/find.h>
 #include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 #include <asm-generic/bitops/ext2-non-atomic.h>
 #include <asm-generic/bitops/ext2-atomic.h>
 #include <asm-generic/bitops/minix.h>
index 6cc517e212a92ee94e461c2e1944f3fe9af0c627..569dd62fe1924f80568b88f3d6887f8e74b4060a 100644 (file)
@@ -371,6 +371,8 @@ hweight64 (unsigned long x)
 #define hweight16(x)   (unsigned int) hweight64((x) & 0xfffful)
 #define hweight8(x)    (unsigned int) hweight64((x) & 0xfful)
 
+#include <asm-generic/bitops/lock.h>
+
 #endif /* __KERNEL__ */
 
 #include <asm-generic/bitops/find.h>
index 66ab672162cdc42b8f2b5725d64508e07aea1fb6..313a02c4a88923cbd1003833835df45ee980993d 100644 (file)
@@ -255,6 +255,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr)
 #include <asm-generic/bitops/find.h>
 #include <asm-generic/bitops/ffs.h>
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 
 #endif /* __KERNEL__ */
 
index 1a61fdb56aaf671dc82e669d009d6fb6f3e64324..da151f70cdc6389a245590245863029129e12075 100644 (file)
@@ -314,6 +314,7 @@ static inline int fls(int x)
 #include <asm-generic/bitops/fls64.h>
 #include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 
 /* Bitmap functions for the minix filesystem */
 
index 7d6075d9b5cb8998faa785d68a8392dd2115ab00..b8b2770d6870965de40edcf112b405aadd2f881e 100644 (file)
@@ -160,6 +160,7 @@ static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
 
 #include <asm-generic/bitops/find.h>
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 
 static __inline__ int ext2_set_bit(int nr, volatile void * addr)
 {
index 899357a72ac4e15faab61c77a89ae20e34e9dcc5..0d3373f649203f14cf51d1e249de35ef2261f223 100644 (file)
@@ -556,6 +556,7 @@ static inline int ffs(int word)
 
 #include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 #include <asm-generic/bitops/ext2-non-atomic.h>
 #include <asm-generic/bitops/ext2-atomic.h>
 #include <asm-generic/bitops/minix.h>
index 015cb0d379bd991d42c719d32bd2111f9573b63f..03ae287baf89ccd6bd81d10692d14616f9e18ca5 100644 (file)
@@ -208,6 +208,7 @@ static __inline__ int fls(int x)
 
 #include <asm-generic/bitops/fls64.h>
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 #include <asm-generic/bitops/sched.h>
 
 #endif /* __KERNEL__ */
index 8144a2788db67bc101e47b20545d8fecf03ded35..1d4c16613d2faa06203495fa1ba918962d15131c 100644 (file)
@@ -266,6 +266,7 @@ static __inline__ int fls(unsigned int x)
 #include <asm-generic/bitops/fls64.h>
 
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 
 #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
 unsigned long find_next_zero_bit(const unsigned long *addr,
index f79c9b792af1d88d0d54ed7f5762bc232c96b51d..d756b34d25f309555d91631d1aead2a0a3e13578 100644 (file)
@@ -746,6 +746,7 @@ static inline int sched_find_first_bit(unsigned long *b)
 #include <asm-generic/bitops/fls64.h>
 
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 
 /*
  * ATTENTION: intel byte ordering convention for ext2 and minix !!
index 1c16792cee1dbaf239948ac8971dd2ad7290865e..9d7021723a25363c0828f4b3422ef9fc1b52d145 100644 (file)
@@ -137,6 +137,7 @@ static inline unsigned long __ffs(unsigned long word)
 #include <asm-generic/bitops/find.h>
 #include <asm-generic/bitops/ffs.h>
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 #include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/ext2-non-atomic.h>
 #include <asm-generic/bitops/ext2-atomic.h>
index f3bdcdb5d0463089205783e876db06f73e096409..444d5ea92ce9452c8f70abc46539330c8c9f2fd1 100644 (file)
@@ -136,6 +136,7 @@ static __inline__ unsigned long ffz(unsigned long word)
 #include <asm-generic/bitops/__ffs.h>
 #include <asm-generic/bitops/find.h>
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 #include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/ffs.h>
 #include <asm-generic/bitops/ext2-non-atomic.h>
index 329e696e77516c8511ec0a04972aeefafabf16d3..00bd0a679d702f0a07737cd66f766442cb70f09d 100644 (file)
@@ -96,6 +96,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 #include <asm-generic/bitops/fls.h>
 #include <asm-generic/bitops/fls64.h>
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 #include <asm-generic/bitops/find.h>
 #include <asm-generic/bitops/ext2-non-atomic.h>
 #include <asm-generic/bitops/ext2-atomic.h>
index 3d5e1af84723bade56ba80e46e98944d14d0e4d7..dd4bfe993b619160ba8e7e9629cacc6342a8930b 100644 (file)
@@ -81,6 +81,7 @@ static inline unsigned int hweight8(unsigned int w)
 #include <asm-generic/bitops/hweight.h>
 
 #endif
+#include <asm-generic/bitops/lock.h>
 #endif /* __KERNEL__ */
 
 #include <asm-generic/bitops/find.h>
index 1fa99baf4e25bbaf2f82daaf1960537bb59bfde8..8eafdb1c08ba96b3939b30fdd2b38b250c3584ff 100644 (file)
@@ -145,6 +145,7 @@ static inline int __test_bit (int nr, const void *addr)
 #include <asm-generic/bitops/find.h>
 #include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 
 #include <asm-generic/bitops/ext2-non-atomic.h>
 #define ext2_set_bit_atomic(l,n,a)      test_and_set_bit(n,a)
index a20fe9822f6002db96607331fd7ae50f0a95ee58..c96641f7502298fca9aa60c5f511c9e8df50dfb3 100644 (file)
@@ -402,6 +402,7 @@ static inline int fls(int x)
 }
 
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 
 #endif /* __KERNEL__ */
 
index 1d7d9b4bcacb0846f44f72282008552f4582dba5..525edf2ce5c263454158fb8a724f9b75083f63f5 100644 (file)
@@ -408,6 +408,7 @@ static __inline__ int fls(int x)
 #define ARCH_HAS_FAST_MULTIPLIER 1
 
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 
 #endif /* __KERNEL__ */
 
index 1c1e0d933eea8311db44907231d94a62b025c334..78db04cf6e4859bf24d57ea6ed854760a7aa1201 100644 (file)
@@ -108,6 +108,7 @@ static inline int fls (unsigned int x)
 #endif
 
 #include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
 #include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/minix.h>