]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - include/asm-sparc64/system.h
Merge branch 'linus' into x86/ptemask
[linux-2.6-omap-h63xx.git] / include / asm-sparc64 / system.h
index 99a669c190c776a030ddc2be595693982a1c3fe0..6897ac31be4100baf733c45043294a36d4ed66bb 100644 (file)
@@ -8,6 +8,7 @@
 #ifndef __ASSEMBLY__
 
 #include <linux/irqflags.h>
+#include <asm-generic/cmpxchg-local.h>
 
 /*
  * Sparc (general) CPU types
@@ -29,6 +30,8 @@ enum sparc_cpu {
 #define ARCH_SUN4C_SUN4 0
 #define ARCH_SUN4 0
 
+extern char reboot_command[];
+
 /* These are here in an effort to more fully work around Spitfire Errata
  * #51.  Essentially, if a memory barrier occurs soon after a mispredicted
  * branch, the chip can stop executing instructions until a trap occurs.
@@ -116,6 +119,7 @@ do {        __asm__ __volatile__("ba,pt     %%xcc, 1f\n\t" \
 extern void sun_do_break(void);
 extern int stop_a_enabled;
 
+extern void fault_in_user_windows(void);
 extern void synchronize_user_stack(void);
 
 extern void __flushw_user(void);
@@ -176,12 +180,13 @@ do {      if (test_thread_flag(TIF_PERFCTR)) {                            \
        "ldx    [%%sp + 2047 + 0x70], %%i6\n\t"                         \
        "ldx    [%%sp + 2047 + 0x78], %%i7\n\t"                         \
        "ldx    [%%g6 + %9], %%g4\n\t"                                  \
-       "brz,pt %%o7, 1f\n\t"                                           \
+       "brz,pt %%o7, switch_to_pc\n\t"                                 \
        " mov   %%g7, %0\n\t"                                           \
        "sethi  %%hi(ret_from_syscall), %%g1\n\t"                       \
        "jmpl   %%g1 + %%lo(ret_from_syscall), %%g0\n\t"                \
        " nop\n\t"                                                      \
-       "1:\n\t"                                                        \
+       ".globl switch_to_pc\n\t"                                       \
+       "switch_to_pc:\n\t"                                             \
        : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \
          "=r" (__local_per_cpu_offset)                                 \
        : "0" (task_thread_info(next)),                                 \
@@ -315,6 +320,34 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
                                    (unsigned long)_n_, sizeof(*(ptr))); \
   })
 
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+                                     unsigned long old,
+                                     unsigned long new, int size)
+{
+       switch (size) {
+       case 4:
+       case 8: return __cmpxchg(ptr, old, new, size);
+       default:
+               return __cmpxchg_local_generic(ptr, old, new, size);
+       }
+
+       return old;
+}
+
+#define cmpxchg_local(ptr, o, n)                                       \
+       ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
+                       (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n)                                     \
+  ({                                                                   \
+       BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
+       cmpxchg_local((ptr), (o), (n));                                 \
+  })
+
 #endif /* !(__ASSEMBLY__) */
 
 #define arch_align_stack(x) (x)