X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-i386%2Fatomic.h;h=437aac80171131b18f68721c0ecc716f6de13598;hb=03491c92932b8d2dab1bb298e0122ff6d39680af;hp=4dd272331361c7eb747305366810fc92c28b3b08;hpb=e0cc09e295f346b7921e921f385fe5213472316a;p=linux-2.6-omap-h63xx.git diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index 4dd27233136..437aac80171 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h @@ -3,6 +3,7 @@ #include #include +#include /* * Atomic operations that C can't guarantee us. Useful for @@ -51,7 +52,7 @@ static __inline__ void atomic_add(int i, atomic_t *v) } /** - * atomic_sub - subtract the atomic variable + * atomic_sub - subtract integer from atomic variable * @i: integer value to subtract * @v: pointer of type atomic_t * @@ -170,7 +171,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v) } /** - * atomic_add_return - add and return + * atomic_add_return - add integer and return * @v: pointer of type atomic_t * @i: integer value to add * @@ -181,7 +182,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v) int __i; #ifdef CONFIG_M386 unsigned long flags; - if(unlikely(boot_cpu_data.x86==3)) + if(unlikely(boot_cpu_data.x86 <= 3)) goto no_xadd; #endif /* Modern 486+ processor */ @@ -202,13 +203,20 @@ no_xadd: /* Legacy 386 processor */ #endif } +/** + * atomic_sub_return - subtract integer and return + * @v: pointer of type atomic_t + * @i: integer value to subtract + * + * Atomically subtracts @i from @v and returns @v - @i + */ static __inline__ int atomic_sub_return(int i, atomic_t *v) { return atomic_add_return(-i,v); } -#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) +#define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) /** * atomic_add_unless - add unless the number is already a given value @@ -219,20 +227,21 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) * Atomically adds @a to @v, so long as @v was not already @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_return(v) (atomic_add_return(1,v))