X-Git-Url: http://pilppa.org/gitweb/?a=blobdiff_plain;f=include%2Flinux%2Fspinlock.h;h=e0c0fccced46c4b09d654121423f76357407bc95;hb=7ef9964e6d1b911b78709f144000aacadd0ebc21;hp=61fef376ed2e6161299719f8b3a7cfaf4f246b33;hpb=54c66f6d781e03dc0b23956234963c4911e6d1c0;p=linux-2.6-omap-h63xx.git diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 61fef376ed2..e0c0fccced4 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -46,6 +46,7 @@ * linux/spinlock.h: builds the final spin_*() APIs. */ +#include #include #include #include @@ -71,7 +72,7 @@ #define LOCK_SECTION_END \ ".previous\n\t" -#define __lockfunc fastcall __attribute__((section(".spinlock.text"))) +#define __lockfunc __attribute__((section(".spinlock.text"))) /* * Pull the raw_spinlock_t and raw_rwlock_t definitions: @@ -120,6 +121,12 @@ do { \ #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) +#ifdef CONFIG_GENERIC_LOCKBREAK +#define spin_is_contended(lock) ((lock)->break_lock) +#else +#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) +#endif + /** * spin_unlock_wait - wait until the spinlock gets unlocked * @lock: the spinlock in question. @@ -176,8 +183,14 @@ do { \ #ifdef CONFIG_DEBUG_LOCK_ALLOC # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) +# define spin_lock_nest_lock(lock, nest_lock) \ + do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ + _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ + } while (0) #else # define spin_lock_nested(lock, subclass) _spin_lock(lock) +# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) #endif #define write_lock(lock) _write_lock(lock) @@ -185,23 +198,53 @@ do { \ #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) -#define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock) -#define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock) -#define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock) +#define spin_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _spin_lock_irqsave(lock); \ + } while (0) +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _read_lock_irqsave(lock); \ + } while (0) +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _write_lock_irqsave(lock); \ + } while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC -#define spin_lock_irqsave_nested(lock, flags, subclass) \ - flags = _spin_lock_irqsave_nested(lock, subclass) +#define spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _spin_lock_irqsave_nested(lock, subclass); \ + } while (0) #else -#define spin_lock_irqsave_nested(lock, flags, subclass) \ - flags = _spin_lock_irqsave(lock) +#define spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _spin_lock_irqsave(lock); \ + } while (0) #endif #else -#define spin_lock_irqsave(lock, flags) _spin_lock_irqsave(lock, flags) -#define read_lock_irqsave(lock, flags) _read_lock_irqsave(lock, flags) -#define write_lock_irqsave(lock, flags) _write_lock_irqsave(lock, flags) +#define spin_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _spin_lock_irqsave(lock, flags); \ + } while (0) +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _read_lock_irqsave(lock, flags); \ + } while (0) +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _write_lock_irqsave(lock, flags); \ + } while (0) #define spin_lock_irqsave_nested(lock, flags, subclass) \ spin_lock_irqsave(lock, flags) @@ -254,16 +297,25 @@ do { \ } while (0) #endif -#define spin_unlock_irqrestore(lock, flags) \ - _spin_unlock_irqrestore(lock, flags) +#define spin_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _spin_unlock_irqrestore(lock, flags); \ + } while (0) #define spin_unlock_bh(lock) _spin_unlock_bh(lock) -#define read_unlock_irqrestore(lock, flags) \ - _read_unlock_irqrestore(lock, flags) +#define read_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _read_unlock_irqrestore(lock, flags); \ + } while (0) #define read_unlock_bh(lock) _read_unlock_bh(lock) -#define write_unlock_irqrestore(lock, flags) \ - _write_unlock_irqrestore(lock, flags) +#define write_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _write_unlock_irqrestore(lock, flags); \ + } while (0) #define write_unlock_bh(lock) _write_unlock_bh(lock) #define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) @@ -282,6 +334,13 @@ do { \ 1 : ({ local_irq_restore(flags); 0; }); \ }) +#define write_trylock_irqsave(lock, flags) \ +({ \ + local_irq_save(flags); \ + write_trylock(lock) ? \ + 1 : ({ local_irq_restore(flags); 0; }); \ +}) + /* * Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) @@ -291,6 +350,9 @@ do { \ * atomic_dec_and_lock - lock on reaching reference count zero * @atomic: the atomic counter * @lock: the spinlock in question + * + * Decrements @atomic by 1. If the result is 0, returns true and locks + * @lock. Returns false for all other cases. */ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); #define atomic_dec_and_lock(atomic, lock) \