]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - include/asm-i386/system.h
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6-omap-h63xx.git] / include / asm-i386 / system.h
index 098bcee94e389a615b3b2327fa2be03f117c28c3..c3a58c08c49506dcd120e92fa73d0fd3e95ca602 100644 (file)
@@ -88,68 +88,103 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
 #define savesegment(seg, value) \
        asm volatile("mov %%" #seg ",%0":"=rm" (value))
 
-#define read_cr0() ({ \
-       unsigned int __dummy; \
-       __asm__ __volatile__( \
-               "movl %%cr0,%0\n\t" \
-               :"=r" (__dummy)); \
-       __dummy; \
-})
-#define write_cr0(x) \
-       __asm__ __volatile__("movl %0,%%cr0": :"r" (x))
-
-#define read_cr2() ({ \
-       unsigned int __dummy; \
-       __asm__ __volatile__( \
-               "movl %%cr2,%0\n\t" \
-               :"=r" (__dummy)); \
-       __dummy; \
-})
-#define write_cr2(x) \
-       __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
-
-#define read_cr3() ({ \
-       unsigned int __dummy; \
-       __asm__ ( \
-               "movl %%cr3,%0\n\t" \
-               :"=r" (__dummy)); \
-       __dummy; \
-})
-#define write_cr3(x) \
-       __asm__ __volatile__("movl %0,%%cr3": :"r" (x))
-
-#define read_cr4() ({ \
-       unsigned int __dummy; \
-       __asm__( \
-               "movl %%cr4,%0\n\t" \
-               :"=r" (__dummy)); \
-       __dummy; \
-})
-#define read_cr4_safe() ({                           \
-       unsigned int __dummy;                         \
-       /* This could fault if %cr4 does not exist */ \
-       __asm__("1: movl %%cr4, %0              \n"   \
-               "2:                             \n"   \
-               ".section __ex_table,\"a\"      \n"   \
-               ".long 1b,2b                    \n"   \
-               ".previous                      \n"   \
-               : "=r" (__dummy): "0" (0));           \
-       __dummy;                                      \
-})
-#define write_cr4(x) \
-       __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
 
-/*
- * Clear and set 'TS' bit respectively
- */
-#define clts() __asm__ __volatile__ ("clts")
+static inline void native_clts(void)
+{
+       asm volatile ("clts");
+}
+
+static inline unsigned long native_read_cr0(void)
+{
+       unsigned long val;
+       asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
+       return val;
+}
+
+static inline void native_write_cr0(unsigned long val)
+{
+       asm volatile("movl %0,%%cr0": :"r" (val));
+}
+
+static inline unsigned long native_read_cr2(void)
+{
+       unsigned long val;
+       asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
+       return val;
+}
+
+static inline void native_write_cr2(unsigned long val)
+{
+       asm volatile("movl %0,%%cr2": :"r" (val));
+}
+
+static inline unsigned long native_read_cr3(void)
+{
+       unsigned long val;
+       asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
+       return val;
+}
+
+static inline void native_write_cr3(unsigned long val)
+{
+       asm volatile("movl %0,%%cr3": :"r" (val));
+}
+
+static inline unsigned long native_read_cr4(void)
+{
+       unsigned long val;
+       asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
+       return val;
+}
+
+static inline unsigned long native_read_cr4_safe(void)
+{
+       unsigned long val;
+       /* This could fault if %cr4 does not exist */
+       asm("1: movl %%cr4, %0          \n"
+               "2:                             \n"
+               ".section __ex_table,\"a\"      \n"
+               ".long 1b,2b                    \n"
+               ".previous                      \n"
+               : "=r" (val): "0" (0));
+       return val;
+}
+
+static inline void native_write_cr4(unsigned long val)
+{
+       asm volatile("movl %0,%%cr4": :"r" (val));
+}
+
+static inline void native_wbinvd(void)
+{
+       asm volatile("wbinvd": : :"memory");
+}
+
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define read_cr0()     (native_read_cr0())
+#define write_cr0(x)   (native_write_cr0(x))
+#define read_cr2()     (native_read_cr2())
+#define write_cr2(x)   (native_write_cr2(x))
+#define read_cr3()     (native_read_cr3())
+#define write_cr3(x)   (native_write_cr3(x))
+#define read_cr4()     (native_read_cr4())
+#define read_cr4_safe()        (native_read_cr4_safe())
+#define write_cr4(x)   (native_write_cr4(x))
+#define wbinvd()       (native_wbinvd())
+
+/* Clear the 'TS' bit */
+#define clts()         (native_clts())
+
+#endif/* CONFIG_PARAVIRT */
+
+/* Set the 'TS' bit */
 #define stts() write_cr0(8 | read_cr0())
 
 #endif /* __KERNEL__ */
 
-#define wbinvd() \
-       __asm__ __volatile__ ("wbinvd": : :"memory")
-
 static inline unsigned long get_limit(unsigned long segment)
 {
        unsigned long __limit;
@@ -267,6 +302,9 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
 #define cmpxchg(ptr,o,n)\
        ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
                                        (unsigned long)(n),sizeof(*(ptr))))
+#define sync_cmpxchg(ptr,o,n)\
+       ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
+                                       (unsigned long)(n),sizeof(*(ptr))))
 #endif
 
 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
@@ -296,6 +334,39 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
        return old;
 }
 
+/*
+ * Always use locked operations when touching memory shared with a
+ * hypervisor, since the system may be SMP even if the guest kernel
+ * isn't.
+ */
+static inline unsigned long __sync_cmpxchg(volatile void *ptr,
+                                           unsigned long old,
+                                           unsigned long new, int size)
+{
+       unsigned long prev;
+       switch (size) {
+       case 1:
+               __asm__ __volatile__("lock; cmpxchgb %b1,%2"
+                                    : "=a"(prev)
+                                    : "q"(new), "m"(*__xg(ptr)), "0"(old)
+                                    : "memory");
+               return prev;
+       case 2:
+               __asm__ __volatile__("lock; cmpxchgw %w1,%2"
+                                    : "=a"(prev)
+                                    : "r"(new), "m"(*__xg(ptr)), "0"(old)
+                                    : "memory");
+               return prev;
+       case 4:
+               __asm__ __volatile__("lock; cmpxchgl %1,%2"
+                                    : "=a"(prev)
+                                    : "r"(new), "m"(*__xg(ptr)), "0"(old)
+                                    : "memory");
+               return prev;
+       }
+       return old;
+}
+
 #ifndef CONFIG_X86_CMPXCHG
 /*
  * Building a kernel capable running on 80386. It may be necessary to