]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - include/asm-alpha/system.h
via-velocity: fix vlan receipt
[linux-2.6-omap-h63xx.git] / include / asm-alpha / system.h
index 03e9c0e5ed7451d99161959d0d6f716e6789984c..ed221d6408fc308acbf737d84ba51adb9899dc28 100644 (file)
@@ -48,6 +48,7 @@
 
 #ifndef __ASSEMBLY__
 #include <linux/kernel.h>
+#define AT_VECTOR_SIZE_ARCH 4 /* entries in ARCH_DLINFO */
 
 /*
  * This is the logout header that should be common to all platforms
@@ -139,16 +140,6 @@ extern void halt(void) __attribute__((noreturn));
 struct task_struct;
 extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
 
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
 #define imb() \
 __asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
 
@@ -443,8 +434,110 @@ extern void __xchg_called_with_bad_pointer(void);
      (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
   })
 
-#define tas(ptr) (xchg((ptr),1))
+static inline unsigned long
+__xchg_u8_local(volatile char *m, unsigned long val)
+{
+       unsigned long ret, tmp, addr64;
+
+       __asm__ __volatile__(
+       "       andnot  %4,7,%3\n"
+       "       insbl   %1,%4,%1\n"
+       "1:     ldq_l   %2,0(%3)\n"
+       "       extbl   %2,%4,%0\n"
+       "       mskbl   %2,%4,%2\n"
+       "       or      %1,%2,%2\n"
+       "       stq_c   %2,0(%3)\n"
+       "       beq     %2,2f\n"
+       ".subsection 2\n"
+       "2:     br      1b\n"
+       ".previous"
+       : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
+       : "r" ((long)m), "1" (val) : "memory");
+
+       return ret;
+}
+
+static inline unsigned long
+__xchg_u16_local(volatile short *m, unsigned long val)
+{
+       unsigned long ret, tmp, addr64;
+
+       __asm__ __volatile__(
+       "       andnot  %4,7,%3\n"
+       "       inswl   %1,%4,%1\n"
+       "1:     ldq_l   %2,0(%3)\n"
+       "       extwl   %2,%4,%0\n"
+       "       mskwl   %2,%4,%2\n"
+       "       or      %1,%2,%2\n"
+       "       stq_c   %2,0(%3)\n"
+       "       beq     %2,2f\n"
+       ".subsection 2\n"
+       "2:     br      1b\n"
+       ".previous"
+       : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
+       : "r" ((long)m), "1" (val) : "memory");
+
+       return ret;
+}
+
+static inline unsigned long
+__xchg_u32_local(volatile int *m, unsigned long val)
+{
+       unsigned long dummy;
+
+       __asm__ __volatile__(
+       "1:     ldl_l %0,%4\n"
+       "       bis $31,%3,%1\n"
+       "       stl_c %1,%2\n"
+       "       beq %1,2f\n"
+       ".subsection 2\n"
+       "2:     br 1b\n"
+       ".previous"
+       : "=&r" (val), "=&r" (dummy), "=m" (*m)
+       : "rI" (val), "m" (*m) : "memory");
 
+       return val;
+}
+
+static inline unsigned long
+__xchg_u64_local(volatile long *m, unsigned long val)
+{
+       unsigned long dummy;
+
+       __asm__ __volatile__(
+       "1:     ldq_l %0,%4\n"
+       "       bis $31,%3,%1\n"
+       "       stq_c %1,%2\n"
+       "       beq %1,2f\n"
+       ".subsection 2\n"
+       "2:     br 1b\n"
+       ".previous"
+       : "=&r" (val), "=&r" (dummy), "=m" (*m)
+       : "rI" (val), "m" (*m) : "memory");
+
+       return val;
+}
+
+#define __xchg_local(ptr, x, size) \
+({ \
+       unsigned long __xchg__res; \
+       volatile void *__xchg__ptr = (ptr); \
+       switch (size) { \
+               case 1: __xchg__res = __xchg_u8_local(__xchg__ptr, x); break; \
+               case 2: __xchg__res = __xchg_u16_local(__xchg__ptr, x); break; \
+               case 4: __xchg__res = __xchg_u32_local(__xchg__ptr, x); break; \
+               case 8: __xchg__res = __xchg_u64_local(__xchg__ptr, x); break; \
+               default: __xchg_called_with_bad_pointer(); __xchg__res = x; \
+       } \
+       __xchg__res; \
+})
+
+#define xchg_local(ptr,x)                                                   \
+  ({                                                                        \
+     __typeof__(*(ptr)) _x_ = (x);                                          \
+     (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_,           \
+               sizeof(*(ptr))); \
+  })
 
 /* 
  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
@@ -588,13 +681,146 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
        return old;
 }
 
-#define cmpxchg(ptr,o,n)                                                \
+#define cmpxchg(ptr, o, n)                                              \
   ({                                                                    \
      __typeof__(*(ptr)) _o_ = (o);                                      \
      __typeof__(*(ptr)) _n_ = (n);                                      \
      (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,          \
                                    (unsigned long)_n_, sizeof(*(ptr))); \
   })
+#define cmpxchg64(ptr, o, n)                                            \
+  ({                                                                    \
+       BUILD_BUG_ON(sizeof(*(ptr)) != 8);                               \
+       cmpxchg((ptr), (o), (n));                                        \
+  })
+
+static inline unsigned long
+__cmpxchg_u8_local(volatile char *m, long old, long new)
+{
+       unsigned long prev, tmp, cmp, addr64;
+
+       __asm__ __volatile__(
+       "       andnot  %5,7,%4\n"
+       "       insbl   %1,%5,%1\n"
+       "1:     ldq_l   %2,0(%4)\n"
+       "       extbl   %2,%5,%0\n"
+       "       cmpeq   %0,%6,%3\n"
+       "       beq     %3,2f\n"
+       "       mskbl   %2,%5,%2\n"
+       "       or      %1,%2,%2\n"
+       "       stq_c   %2,0(%4)\n"
+       "       beq     %2,3f\n"
+       "2:\n"
+       ".subsection 2\n"
+       "3:     br      1b\n"
+       ".previous"
+       : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
+       : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+
+       return prev;
+}
+
+static inline unsigned long
+__cmpxchg_u16_local(volatile short *m, long old, long new)
+{
+       unsigned long prev, tmp, cmp, addr64;
+
+       __asm__ __volatile__(
+       "       andnot  %5,7,%4\n"
+       "       inswl   %1,%5,%1\n"
+       "1:     ldq_l   %2,0(%4)\n"
+       "       extwl   %2,%5,%0\n"
+       "       cmpeq   %0,%6,%3\n"
+       "       beq     %3,2f\n"
+       "       mskwl   %2,%5,%2\n"
+       "       or      %1,%2,%2\n"
+       "       stq_c   %2,0(%4)\n"
+       "       beq     %2,3f\n"
+       "2:\n"
+       ".subsection 2\n"
+       "3:     br      1b\n"
+       ".previous"
+       : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
+       : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+
+       return prev;
+}
+
+static inline unsigned long
+__cmpxchg_u32_local(volatile int *m, int old, int new)
+{
+       unsigned long prev, cmp;
+
+       __asm__ __volatile__(
+       "1:     ldl_l %0,%5\n"
+       "       cmpeq %0,%3,%1\n"
+       "       beq %1,2f\n"
+       "       mov %4,%1\n"
+       "       stl_c %1,%2\n"
+       "       beq %1,3f\n"
+       "2:\n"
+       ".subsection 2\n"
+       "3:     br 1b\n"
+       ".previous"
+       : "=&r"(prev), "=&r"(cmp), "=m"(*m)
+       : "r"((long) old), "r"(new), "m"(*m) : "memory");
+
+       return prev;
+}
+
+static inline unsigned long
+__cmpxchg_u64_local(volatile long *m, unsigned long old, unsigned long new)
+{
+       unsigned long prev, cmp;
+
+       __asm__ __volatile__(
+       "1:     ldq_l %0,%5\n"
+       "       cmpeq %0,%3,%1\n"
+       "       beq %1,2f\n"
+       "       mov %4,%1\n"
+       "       stq_c %1,%2\n"
+       "       beq %1,3f\n"
+       "2:\n"
+       ".subsection 2\n"
+       "3:     br 1b\n"
+       ".previous"
+       : "=&r"(prev), "=&r"(cmp), "=m"(*m)
+       : "r"((long) old), "r"(new), "m"(*m) : "memory");
+
+       return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
+               int size)
+{
+       switch (size) {
+               case 1:
+                       return __cmpxchg_u8_local(ptr, old, new);
+               case 2:
+                       return __cmpxchg_u16_local(ptr, old, new);
+               case 4:
+                       return __cmpxchg_u32_local(ptr, old, new);
+               case 8:
+                       return __cmpxchg_u64_local(ptr, old, new);
+       }
+       __cmpxchg_called_with_bad_pointer();
+       return old;
+}
+
+#define cmpxchg_local(ptr, o, n)                                        \
+  ({                                                                    \
+     __typeof__(*(ptr)) _o_ = (o);                                      \
+     __typeof__(*(ptr)) _n_ = (n);                                      \
+     (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_,    \
+                                   (unsigned long)_n_, sizeof(*(ptr))); \
+  })
+#define cmpxchg64_local(ptr, o, n)                                      \
+  ({                                                                    \
+       BUILD_BUG_ON(sizeof(*(ptr)) != 8);                               \
+       cmpxchg_local((ptr), (o), (n));                                  \
+  })
+
 
 #endif /* __ASSEMBLY__ */