]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - include/asm-i386/atomic.h
i386: fix early usage of atomic_add_return and local_add_return on real i386
[linux-2.6-omap-h63xx.git] / include / asm-i386 / atomic.h
index 4dd272331361c7eb747305366810fc92c28b3b08..437aac80171131b18f68721c0ecc716f6de13598 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/compiler.h>
 #include <asm/processor.h>
+#include <asm/cmpxchg.h>
 
 /*
  * Atomic operations that C can't guarantee us.  Useful for
@@ -51,7 +52,7 @@ static __inline__ void atomic_add(int i, atomic_t *v)
 }
 
 /**
- * atomic_sub - subtract the atomic variable
+ * atomic_sub - subtract integer from atomic variable
  * @i: integer value to subtract
  * @v: pointer of type atomic_t
  * 
@@ -170,7 +171,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
 }
 
 /**
- * atomic_add_return - add and return
+ * atomic_add_return - add integer and return
  * @v: pointer of type atomic_t
  * @i: integer value to add
  *
@@ -181,7 +182,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
        int __i;
 #ifdef CONFIG_M386
        unsigned long flags;
-       if(unlikely(boot_cpu_data.x86==3))
+       if(unlikely(boot_cpu_data.x86 <= 3))
                goto no_xadd;
 #endif
        /* Modern 486+ processor */
@@ -202,13 +203,20 @@ no_xadd: /* Legacy 386 processor */
 #endif
 }
 
+/**
+ * atomic_sub_return - subtract integer and return
+ * @v: pointer of type atomic_t
+ * @i: integer value to subtract
+ *
+ * Atomically subtracts @i from @v and returns @v - @i
+ */
 static __inline__ int atomic_sub_return(int i, atomic_t *v)
 {
        return atomic_add_return(-i,v);
 }
 
-#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
 
 /**
  * atomic_add_unless - add unless the number is already a given value
@@ -219,20 +227,21 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
  * Atomically adds @a to @v, so long as @v was not already @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-#define atomic_add_unless(v, a, u)                             \
-({                                                             \
-       int c, old;                                             \
-       c = atomic_read(v);                                     \
-       for (;;) {                                              \
-               if (unlikely(c == (u)))                         \
-                       break;                                  \
-               old = atomic_cmpxchg((v), c, c + (a));          \
-               if (likely(old == c))                           \
-                       break;                                  \
-               c = old;                                        \
-       }                                                       \
-       c != (u);                                               \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
 #define atomic_inc_return(v)  (atomic_add_return(1,v))