X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-generic%2Fmutex-dec.h;h=f104af7cf4375f045af902af4315834ae41460c2;hb=1f6d6e8ebe73ba9d9d4c693f7f6f50f661dbd6e4;hp=29c6ac34e236f48ab1704bfda30350b453a3c6af;hpb=a8b59e79ed55de97949ff1ca7d933786b95b39bd;p=linux-2.6-omap-h63xx.git diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h index 29c6ac34e23..f104af7cf43 100644 --- a/include/asm-generic/mutex-dec.h +++ b/include/asm-generic/mutex-dec.h @@ -1,5 +1,5 @@ /* - * asm-generic/mutex-dec.h + * include/asm-generic/mutex-dec.h * * Generic implementation of the mutex fastpath, based on atomic * decrement/increment. @@ -18,12 +18,10 @@ * 1 even when the "1" assertion wasn't true. */ static inline void -__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) +__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) { if (unlikely(atomic_dec_return(count) < 0)) fail_fn(count); - else - smp_mb(); } /** @@ -37,14 +35,11 @@ __mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) * or anything the slow path function returns. */ static inline int -__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *)) +__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) { if (unlikely(atomic_dec_return(count) < 0)) return fail_fn(count); - else { - smp_mb(); - return 0; - } + return 0; } /** @@ -61,9 +56,8 @@ __mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t * * to return 0 otherwise. */ static inline void -__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) +__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) { - smp_mb(); if (unlikely(atomic_inc_return(count) <= 0)) fail_fn(count); } @@ -88,25 +82,9 @@ __mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) static inline int __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) { - /* - * We have two variants here. The cmpxchg based one is the best one - * because it never induce a false contention state. It is included - * here because architectures using the inc/dec algorithms over the - * xchg ones are much more likely to support cmpxchg natively. - * - * If not we fall back to the spinlock based variant - that is - * just as efficient (and simpler) as a 'destructive' probing of - * the mutex state would be. - */ -#ifdef __HAVE_ARCH_CMPXCHG - if (likely(atomic_cmpxchg(count, 1, 0) == 1)) { - smp_mb(); + if (likely(atomic_cmpxchg(count, 1, 0) == 1)) return 1; - } return 0; -#else - return fail_fn(count); -#endif } #endif