X-Git-Url: http://pilppa.org/gitweb/?a=blobdiff_plain;f=include%2Fasm-x86_64%2Fsystem.h;h=b7b8021e8c43c871d81044df9f84c92485143c58;hb=4750def52cb2c21732dda9aa1d43a07db37b0186;hp=6bf170bceae1b4a7d114305c340240670698a032;hpb=9b4f2e9576658c4e52d95dc8d309f51b2e2db096;p=linux-2.6-omap-h63xx.git diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index 6bf170bceae..b7b8021e8c4 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -3,7 +3,7 @@ #include #include -#include +#include #ifdef __KERNEL__ @@ -14,12 +14,13 @@ #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" /* frame pointer must be last for get_wchan */ -#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t" -#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\n\t" +#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" +#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" #define __EXTRA_CLOBBER \ ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15" +/* Save restore flags to clear handle leaking NT */ #define switch_to(prev,next,last) \ asm volatile(SAVE_CONTEXT \ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ @@ -88,6 +89,11 @@ static inline unsigned long read_cr3(void) return cr3; } +static inline void write_cr3(unsigned long val) +{ + asm volatile("movq %0,%%cr3" :: "r" (val) : "memory"); +} + static inline unsigned long read_cr4(void) { unsigned long cr4; @@ -97,7 +103,7 @@ static inline unsigned long read_cr4(void) static inline void write_cr4(unsigned long val) { - asm volatile("movq %0,%%cr4" :: "r" (val)); + asm volatile("movq %0,%%cr4" :: "r" (val) : "memory"); } #define stts() write_cr0(8 | read_cr0()) @@ -118,100 +124,6 @@ static inline void sched_cacheflush(void) #define nop() __asm__ __volatile__ ("nop") -#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) - -#define tas(ptr) (xchg((ptr),1)) - -#define __xg(x) ((volatile long *)(x)) - -static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) -{ - *ptr = val; -} - -#define _set_64bit set_64bit - -/* - * Note: no "lock" prefix even on SMP: xchg always implies lock anyway - * Note 2: xchg has side effect, so that attribute volatile is necessary, - * but generally the primitive is invalid, *ptr is output argument. --ANK - */ -static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) -{ - switch (size) { - case 1: - __asm__ __volatile__("xchgb %b0,%1" - :"=q" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 2: - __asm__ __volatile__("xchgw %w0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 4: - __asm__ __volatile__("xchgl %k0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 8: - __asm__ __volatile__("xchgq %0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - } - return x; -} - -/* - * Atomic compare and exchange. Compare OLD with MEM, if identical, - * store NEW in MEM. Return the initial value in MEM. Success is - * indicated by comparing RETURN with OLD. - */ - -#define __HAVE_ARCH_CMPXCHG 1 - -static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, - unsigned long new, int size) -{ - unsigned long prev; - switch (size) { - case 1: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - case 2: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - case 4: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - case 8: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - } - return old; -} - -#define cmpxchg(ptr,o,n)\ - ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ - (unsigned long)(n),sizeof(*(ptr)))) - #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb()