4 #include <linux/kernel.h>
5 #include <asm/segment.h>
6 #include <asm/cpufeature.h>
7 #include <linux/bitops.h> /* for LOCK_PREFIX */
11 struct task_struct; /* one of the stranger aspects of C forward declarations.. */
12 extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
15 * Saving eflags is important. It switches not only IOPL between tasks,
16 * it also protects other tasks from NT leaking through sysenter etc.
18 #define switch_to(prev,next,last) do { \
19 unsigned long esi,edi; \
20 asm volatile("pushfl\n\t" /* Save flags */ \
22 "movl %%esp,%0\n\t" /* save ESP */ \
23 "movl %5,%%esp\n\t" /* restore ESP */ \
24 "movl $1f,%1\n\t" /* save EIP */ \
25 "pushl %6\n\t" /* restore EIP */ \
30 :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
31 "=a" (last),"=S" (esi),"=D" (edi) \
32 :"m" (next->thread.esp),"m" (next->thread.eip), \
33 "2" (prev), "d" (next)); \
36 #define _set_base(addr,base) do { unsigned long __pr; \
37 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
38 "rorl $16,%%edx\n\t" \
48 #define _set_limit(addr,limit) do { unsigned long __lr; \
49 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
50 "rorl $16,%%edx\n\t" \
52 "andb $0xf0,%%dh\n\t" \
61 #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
62 #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
65 * Load a segment. Fall back on loading the zero
66 * segment if something goes wrong..
68 #define loadsegment(seg,value) \
71 "mov %0,%%" #seg "\n" \
73 ".section .fixup,\"ax\"\n" \
76 "popl %%" #seg "\n\t" \
79 ".section __ex_table,\"a\"\n\t" \
86 * Save a segment register away
88 #define savesegment(seg, value) \
89 asm volatile("mov %%" #seg ",%0":"=rm" (value))
92 static inline void native_clts(void)
94 asm volatile ("clts");
97 static inline unsigned long native_read_cr0(void)
100 asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
104 static inline void native_write_cr0(unsigned long val)
106 asm volatile("movl %0,%%cr0": :"r" (val));
109 static inline unsigned long native_read_cr2(void)
112 asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
116 static inline void native_write_cr2(unsigned long val)
118 asm volatile("movl %0,%%cr2": :"r" (val));
121 static inline unsigned long native_read_cr3(void)
124 asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
128 static inline void native_write_cr3(unsigned long val)
130 asm volatile("movl %0,%%cr3": :"r" (val));
133 static inline unsigned long native_read_cr4(void)
136 asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
140 static inline unsigned long native_read_cr4_safe(void)
143 /* This could fault if %cr4 does not exist */
144 asm("1: movl %%cr4, %0 \n"
146 ".section __ex_table,\"a\" \n"
149 : "=r" (val): "0" (0));
153 static inline void native_write_cr4(unsigned long val)
155 asm volatile("movl %0,%%cr4": :"r" (val));
158 static inline void native_wbinvd(void)
160 asm volatile("wbinvd": : :"memory");
164 #ifdef CONFIG_PARAVIRT
165 #include <asm/paravirt.h>
167 #define read_cr0() (native_read_cr0())
168 #define write_cr0(x) (native_write_cr0(x))
169 #define read_cr2() (native_read_cr2())
170 #define write_cr2(x) (native_write_cr2(x))
171 #define read_cr3() (native_read_cr3())
172 #define write_cr3(x) (native_write_cr3(x))
173 #define read_cr4() (native_read_cr4())
174 #define read_cr4_safe() (native_read_cr4_safe())
175 #define write_cr4(x) (native_write_cr4(x))
176 #define wbinvd() (native_wbinvd())
178 /* Clear the 'TS' bit */
179 #define clts() (native_clts())
181 #endif/* CONFIG_PARAVIRT */
183 /* Set the 'TS' bit */
184 #define stts() write_cr0(8 | read_cr0())
186 #endif /* __KERNEL__ */
188 static inline unsigned long get_limit(unsigned long segment)
190 unsigned long __limit;
192 :"=r" (__limit):"r" (segment));
196 #define nop() __asm__ __volatile__ ("nop")
198 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
200 #define tas(ptr) (xchg((ptr),1))
202 struct __xchg_dummy { unsigned long a[100]; };
203 #define __xg(x) ((struct __xchg_dummy *)(x))
206 #ifdef CONFIG_X86_CMPXCHG64
209 * The semantics of XCHGCMP8B are a bit strange, this is why
210 * there is a loop and the loading of %%eax and %%edx has to
211 * be inside. This inlines well in most cases, the cached
212 * cost is around ~38 cycles. (in the future we might want
213 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
214 * might have an implicit FPU-save as a cost, so it's not
215 * clear which path to go.)
217 * cmpxchg8b must be used with the lock prefix here to allow
218 * the instruction to be executed atomically, see page 3-102
219 * of the instruction set reference 24319102.pdf. We need
220 * the reader side to see the coherent 64bit value.
222 static inline void __set_64bit (unsigned long long * ptr,
223 unsigned int low, unsigned int high)
225 __asm__ __volatile__ (
227 "movl (%0), %%eax\n\t"
228 "movl 4(%0), %%edx\n\t"
229 "lock cmpxchg8b (%0)\n\t"
235 : "ax","dx","memory");
238 static inline void __set_64bit_constant (unsigned long long *ptr,
239 unsigned long long value)
241 __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
243 #define ll_low(x) *(((unsigned int*)&(x))+0)
244 #define ll_high(x) *(((unsigned int*)&(x))+1)
246 static inline void __set_64bit_var (unsigned long long *ptr,
247 unsigned long long value)
249 __set_64bit(ptr,ll_low(value), ll_high(value));
252 #define set_64bit(ptr,value) \
253 (__builtin_constant_p(value) ? \
254 __set_64bit_constant(ptr, value) : \
255 __set_64bit_var(ptr, value) )
257 #define _set_64bit(ptr,value) \
258 (__builtin_constant_p(value) ? \
259 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
260 __set_64bit(ptr, ll_low(value), ll_high(value)) )
265 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
266 * Note 2: xchg has side effect, so that attribute volatile is necessary,
267 * but generally the primitive is invalid, *ptr is output argument. --ANK
269 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
273 __asm__ __volatile__("xchgb %b0,%1"
275 :"m" (*__xg(ptr)), "0" (x)
279 __asm__ __volatile__("xchgw %w0,%1"
281 :"m" (*__xg(ptr)), "0" (x)
285 __asm__ __volatile__("xchgl %0,%1"
287 :"m" (*__xg(ptr)), "0" (x)
295 * Atomic compare and exchange. Compare OLD with MEM, if identical,
296 * store NEW in MEM. Return the initial value in MEM. Success is
297 * indicated by comparing RETURN with OLD.
300 #ifdef CONFIG_X86_CMPXCHG
301 #define __HAVE_ARCH_CMPXCHG 1
302 #define cmpxchg(ptr,o,n)\
303 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
304 (unsigned long)(n),sizeof(*(ptr))))
305 #define sync_cmpxchg(ptr,o,n)\
306 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
307 (unsigned long)(n),sizeof(*(ptr))))
308 #define cmpxchg_local(ptr,o,n)\
309 ((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\
310 (unsigned long)(n),sizeof(*(ptr))))
313 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
314 unsigned long new, int size)
319 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
321 : "q"(new), "m"(*__xg(ptr)), "0"(old)
325 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
327 : "r"(new), "m"(*__xg(ptr)), "0"(old)
331 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
333 : "r"(new), "m"(*__xg(ptr)), "0"(old)
341 * Always use locked operations when touching memory shared with a
342 * hypervisor, since the system may be SMP even if the guest kernel
345 static inline unsigned long __sync_cmpxchg(volatile void *ptr,
347 unsigned long new, int size)
352 __asm__ __volatile__("lock; cmpxchgb %b1,%2"
354 : "q"(new), "m"(*__xg(ptr)), "0"(old)
358 __asm__ __volatile__("lock; cmpxchgw %w1,%2"
360 : "r"(new), "m"(*__xg(ptr)), "0"(old)
364 __asm__ __volatile__("lock; cmpxchgl %1,%2"
366 : "r"(new), "m"(*__xg(ptr)), "0"(old)
373 static inline unsigned long __cmpxchg_local(volatile void *ptr,
374 unsigned long old, unsigned long new, int size)
379 __asm__ __volatile__("cmpxchgb %b1,%2"
381 : "q"(new), "m"(*__xg(ptr)), "0"(old)
385 __asm__ __volatile__("cmpxchgw %w1,%2"
387 : "r"(new), "m"(*__xg(ptr)), "0"(old)
391 __asm__ __volatile__("cmpxchgl %1,%2"
393 : "r"(new), "m"(*__xg(ptr)), "0"(old)
400 #ifndef CONFIG_X86_CMPXCHG
402 * Building a kernel capable running on 80386. It may be necessary to
403 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
404 * a function for each of the sizes we support.
407 extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
408 extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
409 extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
411 static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
412 unsigned long new, int size)
416 return cmpxchg_386_u8(ptr, old, new);
418 return cmpxchg_386_u16(ptr, old, new);
420 return cmpxchg_386_u32(ptr, old, new);
425 #define cmpxchg(ptr,o,n) \
427 __typeof__(*(ptr)) __ret; \
428 if (likely(boot_cpu_data.x86 > 3)) \
429 __ret = __cmpxchg((ptr), (unsigned long)(o), \
430 (unsigned long)(n), sizeof(*(ptr))); \
432 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
433 (unsigned long)(n), sizeof(*(ptr))); \
436 #define cmpxchg_local(ptr,o,n) \
438 __typeof__(*(ptr)) __ret; \
439 if (likely(boot_cpu_data.x86 > 3)) \
440 __ret = __cmpxchg_local((ptr), (unsigned long)(o), \
441 (unsigned long)(n), sizeof(*(ptr))); \
443 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
444 (unsigned long)(n), sizeof(*(ptr))); \
449 #ifdef CONFIG_X86_CMPXCHG64
451 static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
452 unsigned long long new)
454 unsigned long long prev;
455 __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
457 : "b"((unsigned long)new),
458 "c"((unsigned long)(new >> 32)),
465 static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
466 unsigned long long old, unsigned long long new)
468 unsigned long long prev;
469 __asm__ __volatile__("cmpxchg8b %3"
471 : "b"((unsigned long)new),
472 "c"((unsigned long)(new >> 32)),
479 #define cmpxchg64(ptr,o,n)\
480 ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
481 (unsigned long long)(n)))
482 #define cmpxchg64_local(ptr,o,n)\
483 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
484 (unsigned long long)(n)))
488 * Force strict CPU ordering.
489 * And yes, this is required on UP too when we're talking
492 * For now, "wmb()" doesn't actually do anything, as all
493 * Intel CPU's follow what Intel calls a *Processor Order*,
494 * in which all writes are seen in the program order even
497 * I expect future Intel CPU's to have a weaker ordering,
498 * but I'd also expect them to finally get their act together
499 * and add some real memory barriers if so.
501 * Some non intel clones support out of order store. wmb() ceases to be a
507 * Actually only lfence would be needed for mb() because all stores done
508 * by the kernel should be already ordered. But keep a full barrier for now.
511 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
512 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
515 * read_barrier_depends - Flush all pending reads that subsequents reads
518 * No data-dependent reads from memory-like regions are ever reordered
519 * over this barrier. All reads preceding this primitive are guaranteed
520 * to access memory (but not necessarily other CPUs' caches) before any
521 * reads following this primitive that depend on the data return by
522 * any of the preceding reads. This primitive is much lighter weight than
523 * rmb() on most CPUs, and is never heavier weight than is
526 * These ordering constraints are respected by both the local CPU
529 * Ordering is not guaranteed by anything other than these primitives,
530 * not even by data dependencies. See the documentation for
531 * memory_barrier() for examples and URLs to more information.
533 * For example, the following code would force ordering (the initial
534 * value of "a" is zero, "b" is one, and "p" is "&a"):
542 * read_barrier_depends();
546 * because the read of "*q" depends on the read of "p" and these
547 * two reads are separated by a read_barrier_depends(). However,
548 * the following code, with the same initial values for "a" and "b":
556 * read_barrier_depends();
560 * does not enforce ordering, since there is no data dependency between
561 * the read of "a" and the read of "b". Therefore, on some CPUs, such
562 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
563 * in cases like this where there are no data dependencies.
566 #define read_barrier_depends() do { } while(0)
568 #ifdef CONFIG_X86_OOSTORE
569 /* Actually there are no OOO store capable CPUs for now that do SSE,
570 but make it already an possibility. */
571 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
573 #define wmb() __asm__ __volatile__ ("": : :"memory")
577 #define smp_mb() mb()
578 #define smp_rmb() rmb()
579 #define smp_wmb() wmb()
580 #define smp_read_barrier_depends() read_barrier_depends()
581 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
583 #define smp_mb() barrier()
584 #define smp_rmb() barrier()
585 #define smp_wmb() barrier()
586 #define smp_read_barrier_depends() do { } while(0)
587 #define set_mb(var, value) do { var = value; barrier(); } while (0)
590 #include <linux/irqflags.h>
593 * disable hlt during certain critical i/o operations
595 #define HAVE_DISABLE_HLT
596 void disable_hlt(void);
597 void enable_hlt(void);
599 extern int es7000_plat;
600 void cpu_idle_wait(void);
603 * On SMP systems, when the scheduler does migration-cost autodetection,
604 * it needs a way to flush as much of the CPU's caches as possible:
606 static inline void sched_cacheflush(void)
611 extern unsigned long arch_align_stack(unsigned long sp);
612 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
614 void default_idle(void);