]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - include/asm-parisc/atomic.h
ARM: OMAP: fix uninitialized warning
[linux-2.6-omap-h63xx.git] / include / asm-parisc / atomic.h
index 7d57d34fcca8a969e520e4817b04cfe641506857..57fcc4a5ebb4e93188fa338f786f49bcf509be35 100644 (file)
@@ -122,6 +122,39 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
                                    (unsigned long)_n_, sizeof(*(ptr))); \
   })
 
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+                                     unsigned long old,
+                                     unsigned long new_, int size)
+{
+       switch (size) {
+#ifdef CONFIG_64BIT
+       case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
+#endif
+       case 4: return __cmpxchg_u32(ptr, old, new_);
+       default:
+               return __cmpxchg_local_generic(ptr, old, new_, size);
+       }
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n)                                       \
+       ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
+                       (unsigned long)(n), sizeof(*(ptr))))
+#ifdef CONFIG_64BIT
+#define cmpxchg64_local(ptr, o, n)                                     \
+  ({                                                                   \
+       BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
+       cmpxchg_local((ptr), (o), (n));                                 \
+  })
+#else
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#endif
+
 /* Note that we need not lock read accesses - aligned word writes/reads
  * are atomic, so a reader never sees unconsistent values.
  *
@@ -163,7 +196,7 @@ static __inline__ int atomic_read(const atomic_t *v)
 }
 
 /* exported interface */
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
 /**
@@ -175,14 +208,21 @@ static __inline__ int atomic_read(const atomic_t *v)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-#define atomic_add_unless(v, a, u)                             \
-({                                                             \
-       int c, old;                                             \
-       c = atomic_read(v);                                     \
-       while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
-               c = old;                                        \
-       c != (u);                                               \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
 #define atomic_add(i,v)        ((void)(__atomic_add_return( ((int)i),(v))))
@@ -270,6 +310,37 @@ atomic64_read(const atomic64_t *v)
 #define atomic64_dec_and_test(v)       (atomic64_dec_return(v) == 0)
 #define atomic64_sub_and_test(i,v)     (atomic64_sub_return((i),(v)) == 0)
 
+/* exported interface */
+#define atomic64_cmpxchg(v, o, n) \
+       ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+       long c, old;
+       c = atomic64_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic64_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
 #endif /* CONFIG_64BIT */
 
 #include <asm-generic/atomic.h>