]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - include/asm-ia64/atomic.h
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[linux-2.6-omap-h63xx.git] / include / asm-ia64 / atomic.h
index b16ad235c7ee797da4b4704f7793b02e1fe2b045..50c2b83fd5a04cccf72996add92fae7b2ba0b407 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/types.h>
 
 #include <asm/intrinsics.h>
+#include <asm/system.h>
 
 /*
  * On IA-64, counter must always be volatile to ensure that that the
@@ -54,7 +55,7 @@ ia64_atomic64_add (__s64 i, atomic64_t *v)
 
        do {
                CMPXCHG_BUGCHECK(v);
-               old = atomic_read(v);
+               old = atomic64_read(v);
                new = old + i;
        } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
        return new;
@@ -82,7 +83,7 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
 
        do {
                CMPXCHG_BUGCHECK(v);
-               old = atomic_read(v);
+               old = atomic64_read(v);
                new = old - i;
        } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
        return new;
@@ -95,36 +96,38 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
        (cmpxchg(&((v)->counter), old, new))
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 
-#define atomic_add_unless(v, a, u)                             \
-({                                                             \
-       __typeof__(v->counter) c, old;                          \
-       c = atomic_read(v);                                     \
-       for (;;) {                                              \
-               if (unlikely(c == (u)))                         \
-                       break;                                  \
-               old = atomic_cmpxchg((v), c, c + (a));          \
-               if (likely(old == c))                           \
-                       break;                                  \
-               c = old;                                        \
-       }                                                       \
-       c != (u);                                               \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
-#define atomic64_add_unless(v, a, u)                           \
-({                                                             \
-       __typeof__(v->counter) c, old;                          \
-       c = atomic64_read(v);                                   \
-       for (;;) {                                              \
-               if (unlikely(c == (u)))                         \
-                       break;                                  \
-               old = atomic64_cmpxchg((v), c, c + (a));        \
-               if (likely(old == c))                           \
-                       break;                                  \
-               c = old;                                        \
-       }                                                       \
-       c != (u);                                               \
-})
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+       long c, old;
+       c = atomic64_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic64_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
 
 #define atomic_add_return(i,v)                                         \