]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - include/asm-i386/paravirt.h
[PATCH] i386: PARAVIRT: add common patching machinery
[linux-2.6-omap-h63xx.git] / include / asm-i386 / paravirt.h
1 #ifndef __ASM_PARAVIRT_H
2 #define __ASM_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4  * para-virtualization: those hooks are defined here. */
5
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/page.h>
8
9 /* Bitmask of what can be clobbered: usually at least eax. */
10 #define CLBR_NONE 0x0
11 #define CLBR_EAX 0x1
12 #define CLBR_ECX 0x2
13 #define CLBR_EDX 0x4
14 #define CLBR_ANY 0x7
15
16 #ifndef __ASSEMBLY__
17 #include <linux/types.h>
18
19 struct thread_struct;
20 struct Xgt_desc_struct;
21 struct tss_struct;
22 struct mm_struct;
23 struct desc_struct;
24
25 /* Lazy mode for batching updates / context switch */
26 enum paravirt_lazy_mode {
27         PARAVIRT_LAZY_NONE = 0,
28         PARAVIRT_LAZY_MMU = 1,
29         PARAVIRT_LAZY_CPU = 2,
30 };
31
32 struct paravirt_ops
33 {
34         unsigned int kernel_rpl;
35         int shared_kernel_pmd;
36         int paravirt_enabled;
37         const char *name;
38
39         /*
40          * Patch may replace one of the defined code sequences with arbitrary
41          * code, subject to the same register constraints.  This generally
42          * means the code is not free to clobber any registers other than EAX.
43          * The patch function should return the number of bytes of code
44          * generated, as we nop pad the rest in generic code.
45          */
46         unsigned (*patch)(u8 type, u16 clobber, void *firstinsn, unsigned len);
47
48         /* Basic arch-specific setup */
49         void (*arch_setup)(void);
50         char *(*memory_setup)(void);
51         void (*init_IRQ)(void);
52         void (*time_init)(void);
53
54         /*
55          * Called before/after init_mm pagetable setup. setup_start
56          * may reset %cr3, and may pre-install parts of the pagetable;
57          * pagetable setup is expected to preserve any existing
58          * mapping.
59          */
60         void (*pagetable_setup_start)(pgd_t *pgd_base);
61         void (*pagetable_setup_done)(pgd_t *pgd_base);
62
63         /* Print a banner to identify the environment */
64         void (*banner)(void);
65
66         /* Set and set time of day */
67         unsigned long (*get_wallclock)(void);
68         int (*set_wallclock)(unsigned long);
69
70         /* cpuid emulation, mostly so that caps bits can be disabled */
71         void (*cpuid)(unsigned int *eax, unsigned int *ebx,
72                       unsigned int *ecx, unsigned int *edx);
73
74         /* hooks for various privileged instructions */
75         unsigned long (*get_debugreg)(int regno);
76         void (*set_debugreg)(int regno, unsigned long value);
77
78         void (*clts)(void);
79
80         unsigned long (*read_cr0)(void);
81         void (*write_cr0)(unsigned long);
82
83         unsigned long (*read_cr2)(void);
84         void (*write_cr2)(unsigned long);
85
86         unsigned long (*read_cr3)(void);
87         void (*write_cr3)(unsigned long);
88
89         unsigned long (*read_cr4_safe)(void);
90         unsigned long (*read_cr4)(void);
91         void (*write_cr4)(unsigned long);
92
93         /*
94          * Get/set interrupt state.  save_fl and restore_fl are only
95          * expected to use X86_EFLAGS_IF; all other bits
96          * returned from save_fl are undefined, and may be ignored by
97          * restore_fl.
98          */
99         unsigned long (*save_fl)(void);
100         void (*restore_fl)(unsigned long);
101         void (*irq_disable)(void);
102         void (*irq_enable)(void);
103         void (*safe_halt)(void);
104         void (*halt)(void);
105
106         void (*wbinvd)(void);
107
108         /* MSR, PMC and TSR operations.
109            err = 0/-EFAULT.  wrmsr returns 0/-EFAULT. */
110         u64 (*read_msr)(unsigned int msr, int *err);
111         int (*write_msr)(unsigned int msr, u64 val);
112
113         u64 (*read_tsc)(void);
114         u64 (*read_pmc)(void);
115         u64 (*get_scheduled_cycles)(void);
116         unsigned long (*get_cpu_khz)(void);
117
118         /* Segment descriptor handling */
119         void (*load_tr_desc)(void);
120         void (*load_gdt)(const struct Xgt_desc_struct *);
121         void (*load_idt)(const struct Xgt_desc_struct *);
122         void (*store_gdt)(struct Xgt_desc_struct *);
123         void (*store_idt)(struct Xgt_desc_struct *);
124         void (*set_ldt)(const void *desc, unsigned entries);
125         unsigned long (*store_tr)(void);
126         void (*load_tls)(struct thread_struct *t, unsigned int cpu);
127         void (*write_ldt_entry)(struct desc_struct *,
128                                 int entrynum, u32 low, u32 high);
129         void (*write_gdt_entry)(struct desc_struct *,
130                                 int entrynum, u32 low, u32 high);
131         void (*write_idt_entry)(struct desc_struct *,
132                                 int entrynum, u32 low, u32 high);
133         void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t);
134
135         void (*set_iopl_mask)(unsigned mask);
136         void (*io_delay)(void);
137
138         /*
139          * Hooks for intercepting the creation/use/destruction of an
140          * mm_struct.
141          */
142         void (*activate_mm)(struct mm_struct *prev,
143                             struct mm_struct *next);
144         void (*dup_mmap)(struct mm_struct *oldmm,
145                          struct mm_struct *mm);
146         void (*exit_mmap)(struct mm_struct *mm);
147
148 #ifdef CONFIG_X86_LOCAL_APIC
149         /*
150          * Direct APIC operations, principally for VMI.  Ideally
151          * these shouldn't be in this interface.
152          */
153         void (*apic_write)(unsigned long reg, unsigned long v);
154         void (*apic_write_atomic)(unsigned long reg, unsigned long v);
155         unsigned long (*apic_read)(unsigned long reg);
156         void (*setup_boot_clock)(void);
157         void (*setup_secondary_clock)(void);
158
159         void (*startup_ipi_hook)(int phys_apicid,
160                                  unsigned long start_eip,
161                                  unsigned long start_esp);
162 #endif
163
164         /* TLB operations */
165         void (*flush_tlb_user)(void);
166         void (*flush_tlb_kernel)(void);
167         void (*flush_tlb_single)(unsigned long addr);
168
169         void (*map_pt_hook)(int type, pte_t *va, u32 pfn);
170
171         /* Hooks for allocating/releasing pagetable pages */
172         void (*alloc_pt)(u32 pfn);
173         void (*alloc_pd)(u32 pfn);
174         void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
175         void (*release_pt)(u32 pfn);
176         void (*release_pd)(u32 pfn);
177
178         /* Pagetable manipulation functions */
179         void (*set_pte)(pte_t *ptep, pte_t pteval);
180         void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
181                            pte_t *ptep, pte_t pteval);
182         void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
183         void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
184         void (*pte_update_defer)(struct mm_struct *mm,
185                                  unsigned long addr, pte_t *ptep);
186
187         pte_t (*ptep_get_and_clear)(pte_t *ptep);
188
189 #ifdef CONFIG_X86_PAE
190         void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
191         void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
192         void (*set_pud)(pud_t *pudp, pud_t pudval);
193         void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
194         void (*pmd_clear)(pmd_t *pmdp);
195
196         unsigned long long (*pte_val)(pte_t);
197         unsigned long long (*pmd_val)(pmd_t);
198         unsigned long long (*pgd_val)(pgd_t);
199
200         pte_t (*make_pte)(unsigned long long pte);
201         pmd_t (*make_pmd)(unsigned long long pmd);
202         pgd_t (*make_pgd)(unsigned long long pgd);
203 #else
204         unsigned long (*pte_val)(pte_t);
205         unsigned long (*pgd_val)(pgd_t);
206
207         pte_t (*make_pte)(unsigned long pte);
208         pgd_t (*make_pgd)(unsigned long pgd);
209 #endif
210
211         /* Set deferred update mode, used for batching operations. */
212         void (*set_lazy_mode)(enum paravirt_lazy_mode mode);
213
214         /* These two are jmp to, not actually called. */
215         void (*irq_enable_sysexit)(void);
216         void (*iret)(void);
217 };
218
219 /* Mark a paravirt probe function. */
220 #define paravirt_probe(fn)                                              \
221  static asmlinkage void (*__paravirtprobe_##fn)(void) __attribute_used__ \
222                 __attribute__((__section__(".paravirtprobe"))) = fn
223
224 extern struct paravirt_ops paravirt_ops;
225
226 #define PARAVIRT_PATCH(x)                                       \
227         (offsetof(struct paravirt_ops, x) / sizeof(void *))
228
229 #define paravirt_type(type)                                     \
230         [paravirt_typenum] "i" (PARAVIRT_PATCH(type))
231 #define paravirt_clobber(clobber)               \
232         [paravirt_clobber] "i" (clobber)
233
234 /*
235  * Generate some code, and mark it as patchable by the
236  * apply_paravirt() alternate instruction patcher.
237  */
238 #define _paravirt_alt(insn_string, type, clobber)       \
239         "771:\n\t" insn_string "\n" "772:\n"            \
240         ".pushsection .parainstructions,\"a\"\n"        \
241         "  .long 771b\n"                                \
242         "  .byte " type "\n"                            \
243         "  .byte 772b-771b\n"                           \
244         "  .short " clobber "\n"                        \
245         ".popsection\n"
246
247 /* Generate patchable code, with the default asm parameters. */
248 #define paravirt_alt(insn_string)                                       \
249         _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
250
251 unsigned paravirt_patch_nop(void);
252 unsigned paravirt_patch_ignore(unsigned len);
253 unsigned paravirt_patch_call(void *target, u16 tgt_clobbers,
254                              void *site, u16 site_clobbers,
255                              unsigned len);
256 unsigned paravirt_patch_jmp(void *target, void *site, unsigned len);
257 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *site, unsigned len);
258
259 unsigned paravirt_patch_insns(void *site, unsigned len,
260                               const char *start, const char *end);
261
262
263 /*
264  * This generates an indirect call based on the operation type number.
265  * The type number, computed in PARAVIRT_PATCH, is derived from the
266  * offset into the paravirt_ops structure, and can therefore be freely
267  * converted back into a structure offset.
268  */
269 #define PARAVIRT_CALL   "call *(paravirt_ops+%c[paravirt_typenum]*4);"
270
271 /*
272  * These macros are intended to wrap calls into a paravirt_ops
273  * operation, so that they can be later identified and patched at
274  * runtime.
275  *
276  * Normally, a call to a pv_op function is a simple indirect call:
277  * (paravirt_ops.operations)(args...).
278  *
279  * Unfortunately, this is a relatively slow operation for modern CPUs,
280  * because it cannot necessarily determine what the destination
281  * address is.  In this case, the address is a runtime constant, so at
282  * the very least we can patch the call to e a simple direct call, or
283  * ideally, patch an inline implementation into the callsite.  (Direct
284  * calls are essentially free, because the call and return addresses
285  * are completely predictable.)
286  *
287  * These macros rely on the standard gcc "regparm(3)" calling
288  * convention, in which the first three arguments are placed in %eax,
289  * %edx, %ecx (in that order), and the remaining arguments are placed
290  * on the stack.  All caller-save registers (eax,edx,ecx) are expected
291  * to be modified (either clobbered or used for return values).
292  *
293  * The call instruction itself is marked by placing its start address
294  * and size into the .parainstructions section, so that
295  * apply_paravirt() in arch/i386/kernel/alternative.c can do the
296  * appropriate patching under the control of the backend paravirt_ops
297  * implementation.
298  *
299  * Unfortunately there's no way to get gcc to generate the args setup
300  * for the call, and then allow the call itself to be generated by an
301  * inline asm.  Because of this, we must do the complete arg setup and
302  * return value handling from within these macros.  This is fairly
303  * cumbersome.
304  *
305  * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
306  * It could be extended to more arguments, but there would be little
307  * to be gained from that.  For each number of arguments, there are
308  * the two VCALL and CALL variants for void and non-void functions.
309  *
310  * When there is a return value, the invoker of the macro must specify
311  * the return type.  The macro then uses sizeof() on that type to
312  * determine whether its a 32 or 64 bit value, and places the return
313  * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
314  * 64-bit).
315  *
316  * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
317  * in low,high order.
318  *
319  * Small structures are passed and returned in registers.  The macro
320  * calling convention can't directly deal with this, so the wrapper
321  * functions must do this.
322  *
323  * These PVOP_* macros are only defined within this header.  This
324  * means that all uses must be wrapped in inline functions.  This also
325  * makes sure the incoming and outgoing types are always correct.
326  */
327 #define PVOP_CALL0(__rettype, __op)                                     \
328         ({                                                              \
329                 __rettype __ret;                                        \
330                 if (sizeof(__rettype) > sizeof(unsigned long)) {        \
331                         unsigned long long __tmp;                       \
332                         unsigned long __ecx;                            \
333                         asm volatile(paravirt_alt(PARAVIRT_CALL)        \
334                                      : "=A" (__tmp), "=c" (__ecx)       \
335                                      : paravirt_type(__op),             \
336                                        paravirt_clobber(CLBR_ANY)       \
337                                      : "memory", "cc");                 \
338                         __ret = (__rettype)__tmp;                       \
339                 } else {                                                \
340                         unsigned long __tmp, __edx, __ecx;              \
341                         asm volatile(paravirt_alt(PARAVIRT_CALL)        \
342                                      : "=a" (__tmp), "=d" (__edx),      \
343                                        "=c" (__ecx)                     \
344                                      : paravirt_type(__op),             \
345                                        paravirt_clobber(CLBR_ANY)       \
346                                      : "memory", "cc");                 \
347                         __ret = (__rettype)__tmp;                       \
348                 }                                                       \
349                 __ret;                                                  \
350         })
351 #define PVOP_VCALL0(__op)                                               \
352         ({                                                              \
353                 unsigned long __eax, __edx, __ecx;                      \
354                 asm volatile(paravirt_alt(PARAVIRT_CALL)                \
355                              : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
356                              : paravirt_type(__op),                     \
357                                paravirt_clobber(CLBR_ANY)               \
358                              : "memory", "cc");                         \
359         })
360
361 #define PVOP_CALL1(__rettype, __op, arg1)                               \
362         ({                                                              \
363                 __rettype __ret;                                        \
364                 if (sizeof(__rettype) > sizeof(unsigned long)) {        \
365                         unsigned long long __tmp;                       \
366                         unsigned long __ecx;                            \
367                         asm volatile(paravirt_alt(PARAVIRT_CALL)        \
368                                      : "=A" (__tmp), "=c" (__ecx)       \
369                                      : "a" ((u32)(arg1)),               \
370                                        paravirt_type(__op),             \
371                                        paravirt_clobber(CLBR_ANY)       \
372                                      : "memory", "cc");                 \
373                         __ret = (__rettype)__tmp;                       \
374                 } else {                                                \
375                         unsigned long __tmp, __edx, __ecx;              \
376                         asm volatile(paravirt_alt(PARAVIRT_CALL)        \
377                                      : "=a" (__tmp), "=d" (__edx),      \
378                                        "=c" (__ecx)                     \
379                                      : "0" ((u32)(arg1)),               \
380                                        paravirt_type(__op),             \
381                                        paravirt_clobber(CLBR_ANY)       \
382                                      : "memory", "cc");                 \
383                         __ret = (__rettype)__tmp;                       \
384                 }                                                       \
385                 __ret;                                                  \
386         })
387 #define PVOP_VCALL1(__op, arg1)                                         \
388         ({                                                              \
389                 unsigned long __eax, __edx, __ecx;                      \
390                 asm volatile(paravirt_alt(PARAVIRT_CALL)                \
391                              : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
392                              : "0" ((u32)(arg1)),                       \
393                                paravirt_type(__op),                     \
394                                paravirt_clobber(CLBR_ANY)               \
395                              : "memory", "cc");                         \
396         })
397
398 #define PVOP_CALL2(__rettype, __op, arg1, arg2)                         \
399         ({                                                              \
400                 __rettype __ret;                                        \
401                 if (sizeof(__rettype) > sizeof(unsigned long)) {        \
402                         unsigned long long __tmp;                       \
403                         unsigned long __ecx;                            \
404                         asm volatile(paravirt_alt(PARAVIRT_CALL)        \
405                                      : "=A" (__tmp), "=c" (__ecx)       \
406                                      : "a" ((u32)(arg1)),               \
407                                        "d" ((u32)(arg2)),               \
408                                        paravirt_type(__op),             \
409                                        paravirt_clobber(CLBR_ANY)       \
410                                      : "memory", "cc");                 \
411                         __ret = (__rettype)__tmp;                       \
412                 } else {                                                \
413                         unsigned long __tmp, __edx, __ecx;              \
414                         asm volatile(paravirt_alt(PARAVIRT_CALL)        \
415                                      : "=a" (__tmp), "=d" (__edx),      \
416                                        "=c" (__ecx)                     \
417                                      : "0" ((u32)(arg1)),               \
418                                        "1" ((u32)(arg2)),               \
419                                        paravirt_type(__op),             \
420                                        paravirt_clobber(CLBR_ANY)       \
421                                      : "memory", "cc");                 \
422                         __ret = (__rettype)__tmp;                       \
423                 }                                                       \
424                 __ret;                                                  \
425         })
426 #define PVOP_VCALL2(__op, arg1, arg2)                                   \
427         ({                                                              \
428                 unsigned long __eax, __edx, __ecx;                      \
429                 asm volatile(paravirt_alt(PARAVIRT_CALL)                \
430                              : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
431                              : "0" ((u32)(arg1)),                       \
432                                "1" ((u32)(arg2)),                       \
433                                paravirt_type(__op),                     \
434                                paravirt_clobber(CLBR_ANY)               \
435                              : "memory", "cc");                         \
436         })
437
438 #define PVOP_CALL3(__rettype, __op, arg1, arg2, arg3)                   \
439         ({                                                              \
440                 __rettype __ret;                                        \
441                 if (sizeof(__rettype) > sizeof(unsigned long)) {        \
442                         unsigned long long __tmp;                       \
443                         unsigned long __ecx;                            \
444                         asm volatile(paravirt_alt(PARAVIRT_CALL)        \
445                                      : "=A" (__tmp), "=c" (__ecx)       \
446                                      : "a" ((u32)(arg1)),               \
447                                        "d" ((u32)(arg2)),               \
448                                        "1" ((u32)(arg3)),               \
449                                        paravirt_type(__op),             \
450                                        paravirt_clobber(CLBR_ANY)       \
451                                      : "memory", "cc");                 \
452                         __ret = (__rettype)__tmp;                       \
453                 } else {                                                \
454                         unsigned long __tmp, __edx, __ecx;      \
455                         asm volatile(paravirt_alt(PARAVIRT_CALL)        \
456                                      : "=a" (__tmp), "=d" (__edx),      \
457                                        "=c" (__ecx)                     \
458                                      : "0" ((u32)(arg1)),               \
459                                        "1" ((u32)(arg2)),               \
460                                        "2" ((u32)(arg3)),               \
461                                        paravirt_type(__op),             \
462                                        paravirt_clobber(CLBR_ANY)       \
463                                      : "memory", "cc");                 \
464                         __ret = (__rettype)__tmp;                       \
465                 }                                                       \
466                 __ret;                                                  \
467         })
468 #define PVOP_VCALL3(__op, arg1, arg2, arg3)                             \
469         ({                                                              \
470                 unsigned long __eax, __edx, __ecx;                      \
471                 asm volatile(paravirt_alt(PARAVIRT_CALL)                \
472                              : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
473                              : "0" ((u32)(arg1)),                       \
474                                "1" ((u32)(arg2)),                       \
475                                "2" ((u32)(arg3)),                       \
476                                paravirt_type(__op),                     \
477                                paravirt_clobber(CLBR_ANY)               \
478                              : "memory", "cc");                         \
479         })
480
481 #define PVOP_CALL4(__rettype, __op, arg1, arg2, arg3, arg4)             \
482         ({                                                              \
483                 __rettype __ret;                                        \
484                 if (sizeof(__rettype) > sizeof(unsigned long)) {        \
485                         unsigned long long __tmp;                       \
486                         unsigned long __ecx;                            \
487                         asm volatile("push %[_arg4]; "                  \
488                                      paravirt_alt(PARAVIRT_CALL)        \
489                                      "lea 4(%%esp),%%esp"               \
490                                      : "=A" (__tmp), "=c" (__ecx)       \
491                                      : "a" ((u32)(arg1)),               \
492                                        "d" ((u32)(arg2)),               \
493                                        "1" ((u32)(arg3)),               \
494                                        [_arg4] "mr" ((u32)(arg4)),      \
495                                        paravirt_type(__op),             \
496                                        paravirt_clobber(CLBR_ANY)       \
497                                      : "memory", "cc",);                \
498                         __ret = (__rettype)__tmp;                       \
499                 } else {                                                \
500                         unsigned long __tmp, __edx, __ecx;              \
501                         asm volatile("push %[_arg4]; "                  \
502                                      paravirt_alt(PARAVIRT_CALL)        \
503                                      "lea 4(%%esp),%%esp"               \
504                                      : "=a" (__tmp), "=d" (__edx), "=c" (__ecx) \
505                                      : "0" ((u32)(arg1)),               \
506                                        "1" ((u32)(arg2)),               \
507                                        "2" ((u32)(arg3)),               \
508                                        [_arg4]"mr" ((u32)(arg4)),       \
509                                        paravirt_type(__op),             \
510                                        paravirt_clobber(CLBR_ANY)       \
511                                      : "memory", "cc");                 \
512                         __ret = (__rettype)__tmp;                       \
513                 }                                                       \
514                 __ret;                                                  \
515         })
516 #define PVOP_VCALL4(__op, arg1, arg2, arg3, arg4)                       \
517         ({                                                              \
518                 unsigned long __eax, __edx, __ecx;                      \
519                 asm volatile("push %[_arg4]; "                          \
520                              paravirt_alt(PARAVIRT_CALL)                \
521                              "lea 4(%%esp),%%esp"                       \
522                              : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
523                              : "0" ((u32)(arg1)),                       \
524                                "1" ((u32)(arg2)),                       \
525                                "2" ((u32)(arg3)),                       \
526                                [_arg4]"mr" ((u32)(arg4)),               \
527                                paravirt_type(__op),                     \
528                                paravirt_clobber(CLBR_ANY)               \
529                              : "memory", "cc");                         \
530         })
531
532 static inline int paravirt_enabled(void)
533 {
534         return paravirt_ops.paravirt_enabled;
535 }
536
537 static inline void load_esp0(struct tss_struct *tss,
538                              struct thread_struct *thread)
539 {
540         PVOP_VCALL2(load_esp0, tss, thread);
541 }
542
543 #define ARCH_SETUP                      paravirt_ops.arch_setup();
544 static inline unsigned long get_wallclock(void)
545 {
546         return PVOP_CALL0(unsigned long, get_wallclock);
547 }
548
549 static inline int set_wallclock(unsigned long nowtime)
550 {
551         return PVOP_CALL1(int, set_wallclock, nowtime);
552 }
553
554 static inline void (*choose_time_init(void))(void)
555 {
556         return paravirt_ops.time_init;
557 }
558
559 /* The paravirtualized CPUID instruction. */
560 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
561                            unsigned int *ecx, unsigned int *edx)
562 {
563         PVOP_VCALL4(cpuid, eax, ebx, ecx, edx);
564 }
565
566 /*
567  * These special macros can be used to get or set a debugging register
568  */
569 static inline unsigned long paravirt_get_debugreg(int reg)
570 {
571         return PVOP_CALL1(unsigned long, get_debugreg, reg);
572 }
573 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
574 static inline void set_debugreg(unsigned long val, int reg)
575 {
576         PVOP_VCALL2(set_debugreg, reg, val);
577 }
578
579 static inline void clts(void)
580 {
581         PVOP_VCALL0(clts);
582 }
583
584 static inline unsigned long read_cr0(void)
585 {
586         return PVOP_CALL0(unsigned long, read_cr0);
587 }
588
589 static inline void write_cr0(unsigned long x)
590 {
591         PVOP_VCALL1(write_cr0, x);
592 }
593
594 static inline unsigned long read_cr2(void)
595 {
596         return PVOP_CALL0(unsigned long, read_cr2);
597 }
598
599 static inline void write_cr2(unsigned long x)
600 {
601         PVOP_VCALL1(write_cr2, x);
602 }
603
604 static inline unsigned long read_cr3(void)
605 {
606         return PVOP_CALL0(unsigned long, read_cr3);
607 }
608
609 static inline void write_cr3(unsigned long x)
610 {
611         PVOP_VCALL1(write_cr3, x);
612 }
613
614 static inline unsigned long read_cr4(void)
615 {
616         return PVOP_CALL0(unsigned long, read_cr4);
617 }
618 static inline unsigned long read_cr4_safe(void)
619 {
620         return PVOP_CALL0(unsigned long, read_cr4_safe);
621 }
622
623 static inline void write_cr4(unsigned long x)
624 {
625         PVOP_VCALL1(write_cr4, x);
626 }
627
628 static inline void raw_safe_halt(void)
629 {
630         PVOP_VCALL0(safe_halt);
631 }
632
633 static inline void halt(void)
634 {
635         PVOP_VCALL0(safe_halt);
636 }
637
638 static inline void wbinvd(void)
639 {
640         PVOP_VCALL0(wbinvd);
641 }
642
643 #define get_kernel_rpl()  (paravirt_ops.kernel_rpl)
644
645 static inline u64 paravirt_read_msr(unsigned msr, int *err)
646 {
647         return PVOP_CALL2(u64, read_msr, msr, err);
648 }
649 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
650 {
651         return PVOP_CALL3(int, write_msr, msr, low, high);
652 }
653
654 /* These should all do BUG_ON(_err), but our headers are too tangled. */
655 #define rdmsr(msr,val1,val2) do {               \
656         int _err;                               \
657         u64 _l = paravirt_read_msr(msr, &_err); \
658         val1 = (u32)_l;                         \
659         val2 = _l >> 32;                        \
660 } while(0)
661
662 #define wrmsr(msr,val1,val2) do {               \
663         paravirt_write_msr(msr, val1, val2);    \
664 } while(0)
665
666 #define rdmsrl(msr,val) do {                    \
667         int _err;                               \
668         val = paravirt_read_msr(msr, &_err);    \
669 } while(0)
670
671 #define wrmsrl(msr,val)         ((void)paravirt_write_msr(msr, val, 0))
672 #define wrmsr_safe(msr,a,b)     paravirt_write_msr(msr, a, b)
673
674 /* rdmsr with exception handling */
675 #define rdmsr_safe(msr,a,b) ({                  \
676         int _err;                               \
677         u64 _l = paravirt_read_msr(msr, &_err); \
678         (*a) = (u32)_l;                         \
679         (*b) = _l >> 32;                        \
680         _err; })
681
682
683 static inline u64 paravirt_read_tsc(void)
684 {
685         return PVOP_CALL0(u64, read_tsc);
686 }
687 #define rdtsc(low,high) do {                    \
688         u64 _l = paravirt_read_tsc();           \
689         low = (u32)_l;                          \
690         high = _l >> 32;                        \
691 } while(0)
692
693 #define rdtscl(low) do {                        \
694         u64 _l = paravirt_read_tsc();           \
695         low = (int)_l;                          \
696 } while(0)
697
698 #define rdtscll(val) (val = paravirt_read_tsc())
699
700 #define get_scheduled_cycles(val) (val = paravirt_ops.get_scheduled_cycles())
701 #define calculate_cpu_khz() (paravirt_ops.get_cpu_khz())
702
703 #define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
704
705 static inline unsigned long long paravirt_read_pmc(int counter)
706 {
707         return PVOP_CALL1(u64, read_pmc, counter);
708 }
709
710 #define rdpmc(counter,low,high) do {            \
711         u64 _l = paravirt_read_pmc(counter);    \
712         low = (u32)_l;                          \
713         high = _l >> 32;                        \
714 } while(0)
715
716 static inline void load_TR_desc(void)
717 {
718         PVOP_VCALL0(load_tr_desc);
719 }
720 static inline void load_gdt(const struct Xgt_desc_struct *dtr)
721 {
722         PVOP_VCALL1(load_gdt, dtr);
723 }
724 static inline void load_idt(const struct Xgt_desc_struct *dtr)
725 {
726         PVOP_VCALL1(load_idt, dtr);
727 }
728 static inline void set_ldt(const void *addr, unsigned entries)
729 {
730         PVOP_VCALL2(set_ldt, addr, entries);
731 }
732 static inline void store_gdt(struct Xgt_desc_struct *dtr)
733 {
734         PVOP_VCALL1(store_gdt, dtr);
735 }
736 static inline void store_idt(struct Xgt_desc_struct *dtr)
737 {
738         PVOP_VCALL1(store_idt, dtr);
739 }
740 static inline unsigned long paravirt_store_tr(void)
741 {
742         return PVOP_CALL0(unsigned long, store_tr);
743 }
744 #define store_tr(tr)    ((tr) = paravirt_store_tr())
745 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
746 {
747         PVOP_VCALL2(load_tls, t, cpu);
748 }
749 static inline void write_ldt_entry(void *dt, int entry, u32 low, u32 high)
750 {
751         PVOP_VCALL4(write_ldt_entry, dt, entry, low, high);
752 }
753 static inline void write_gdt_entry(void *dt, int entry, u32 low, u32 high)
754 {
755         PVOP_VCALL4(write_gdt_entry, dt, entry, low, high);
756 }
757 static inline void write_idt_entry(void *dt, int entry, u32 low, u32 high)
758 {
759         PVOP_VCALL4(write_idt_entry, dt, entry, low, high);
760 }
761 static inline void set_iopl_mask(unsigned mask)
762 {
763         PVOP_VCALL1(set_iopl_mask, mask);
764 }
765
766 /* The paravirtualized I/O functions */
767 static inline void slow_down_io(void) {
768         paravirt_ops.io_delay();
769 #ifdef REALLY_SLOW_IO
770         paravirt_ops.io_delay();
771         paravirt_ops.io_delay();
772         paravirt_ops.io_delay();
773 #endif
774 }
775
776 #ifdef CONFIG_X86_LOCAL_APIC
777 /*
778  * Basic functions accessing APICs.
779  */
780 static inline void apic_write(unsigned long reg, unsigned long v)
781 {
782         PVOP_VCALL2(apic_write, reg, v);
783 }
784
785 static inline void apic_write_atomic(unsigned long reg, unsigned long v)
786 {
787         PVOP_VCALL2(apic_write_atomic, reg, v);
788 }
789
790 static inline unsigned long apic_read(unsigned long reg)
791 {
792         return PVOP_CALL1(unsigned long, apic_read, reg);
793 }
794
795 static inline void setup_boot_clock(void)
796 {
797         PVOP_VCALL0(setup_boot_clock);
798 }
799
800 static inline void setup_secondary_clock(void)
801 {
802         PVOP_VCALL0(setup_secondary_clock);
803 }
804 #endif
805
806 static inline void paravirt_pagetable_setup_start(pgd_t *base)
807 {
808         if (paravirt_ops.pagetable_setup_start)
809                 (*paravirt_ops.pagetable_setup_start)(base);
810 }
811
812 static inline void paravirt_pagetable_setup_done(pgd_t *base)
813 {
814         if (paravirt_ops.pagetable_setup_done)
815                 (*paravirt_ops.pagetable_setup_done)(base);
816 }
817
818 #ifdef CONFIG_SMP
819 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
820                                     unsigned long start_esp)
821 {
822         PVOP_VCALL3(startup_ipi_hook, phys_apicid, start_eip, start_esp);
823 }
824 #endif
825
826 static inline void paravirt_activate_mm(struct mm_struct *prev,
827                                         struct mm_struct *next)
828 {
829         PVOP_VCALL2(activate_mm, prev, next);
830 }
831
832 static inline void arch_dup_mmap(struct mm_struct *oldmm,
833                                  struct mm_struct *mm)
834 {
835         PVOP_VCALL2(dup_mmap, oldmm, mm);
836 }
837
838 static inline void arch_exit_mmap(struct mm_struct *mm)
839 {
840         PVOP_VCALL1(exit_mmap, mm);
841 }
842
843 static inline void __flush_tlb(void)
844 {
845         PVOP_VCALL0(flush_tlb_user);
846 }
847 static inline void __flush_tlb_global(void)
848 {
849         PVOP_VCALL0(flush_tlb_kernel);
850 }
851 static inline void __flush_tlb_single(unsigned long addr)
852 {
853         PVOP_VCALL1(flush_tlb_single, addr);
854 }
855
856 static inline void paravirt_map_pt_hook(int type, pte_t *va, u32 pfn)
857 {
858         PVOP_VCALL3(map_pt_hook, type, va, pfn);
859 }
860
861 static inline void paravirt_alloc_pt(unsigned pfn)
862 {
863         PVOP_VCALL1(alloc_pt, pfn);
864 }
865 static inline void paravirt_release_pt(unsigned pfn)
866 {
867         PVOP_VCALL1(release_pt, pfn);
868 }
869
870 static inline void paravirt_alloc_pd(unsigned pfn)
871 {
872         PVOP_VCALL1(alloc_pd, pfn);
873 }
874
875 static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn,
876                                            unsigned start, unsigned count)
877 {
878         PVOP_VCALL4(alloc_pd_clone, pfn, clonepfn, start, count);
879 }
880 static inline void paravirt_release_pd(unsigned pfn)
881 {
882         PVOP_VCALL1(release_pd, pfn);
883 }
884
885 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
886                               pte_t *ptep)
887 {
888         PVOP_VCALL3(pte_update, mm, addr, ptep);
889 }
890
891 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
892                                     pte_t *ptep)
893 {
894         PVOP_VCALL3(pte_update_defer, mm, addr, ptep);
895 }
896
897 #ifdef CONFIG_X86_PAE
898 static inline pte_t __pte(unsigned long long val)
899 {
900         unsigned long long ret = PVOP_CALL2(unsigned long long, make_pte,
901                                             val, val >> 32);
902         return (pte_t) { ret, ret >> 32 };
903 }
904
905 static inline pmd_t __pmd(unsigned long long val)
906 {
907         return (pmd_t) { PVOP_CALL2(unsigned long long, make_pmd, val, val >> 32) };
908 }
909
910 static inline pgd_t __pgd(unsigned long long val)
911 {
912         return (pgd_t) { PVOP_CALL2(unsigned long long, make_pgd, val, val >> 32) };
913 }
914
915 static inline unsigned long long pte_val(pte_t x)
916 {
917         return PVOP_CALL2(unsigned long long, pte_val, x.pte_low, x.pte_high);
918 }
919
920 static inline unsigned long long pmd_val(pmd_t x)
921 {
922         return PVOP_CALL2(unsigned long long, pmd_val, x.pmd, x.pmd >> 32);
923 }
924
925 static inline unsigned long long pgd_val(pgd_t x)
926 {
927         return PVOP_CALL2(unsigned long long, pgd_val, x.pgd, x.pgd >> 32);
928 }
929
930 static inline void set_pte(pte_t *ptep, pte_t pteval)
931 {
932         PVOP_VCALL3(set_pte, ptep, pteval.pte_low, pteval.pte_high);
933 }
934
935 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
936                               pte_t *ptep, pte_t pteval)
937 {
938         /* 5 arg words */
939         paravirt_ops.set_pte_at(mm, addr, ptep, pteval);
940 }
941
942 static inline void set_pte_atomic(pte_t *ptep, pte_t pteval)
943 {
944         PVOP_VCALL3(set_pte_atomic, ptep, pteval.pte_low, pteval.pte_high);
945 }
946
947 static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
948                                    pte_t *ptep, pte_t pte)
949 {
950         /* 5 arg words */
951         paravirt_ops.set_pte_present(mm, addr, ptep, pte);
952 }
953
954 static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
955 {
956         PVOP_VCALL3(set_pmd, pmdp, pmdval.pmd, pmdval.pmd >> 32);
957 }
958
959 static inline void set_pud(pud_t *pudp, pud_t pudval)
960 {
961         PVOP_VCALL3(set_pud, pudp, pudval.pgd.pgd, pudval.pgd.pgd >> 32);
962 }
963
964 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
965 {
966         PVOP_VCALL3(pte_clear, mm, addr, ptep);
967 }
968
969 static inline void pmd_clear(pmd_t *pmdp)
970 {
971         PVOP_VCALL1(pmd_clear, pmdp);
972 }
973
974 static inline pte_t raw_ptep_get_and_clear(pte_t *p)
975 {
976         unsigned long long val = PVOP_CALL1(unsigned long long, ptep_get_and_clear, p);
977         return (pte_t) { val, val >> 32 };
978 }
979 #else  /* !CONFIG_X86_PAE */
980 static inline pte_t __pte(unsigned long val)
981 {
982         return (pte_t) { PVOP_CALL1(unsigned long, make_pte, val) };
983 }
984
985 static inline pgd_t __pgd(unsigned long val)
986 {
987         return (pgd_t) { PVOP_CALL1(unsigned long, make_pgd, val) };
988 }
989
990 static inline unsigned long pte_val(pte_t x)
991 {
992         return PVOP_CALL1(unsigned long, pte_val, x.pte_low);
993 }
994
995 static inline unsigned long pgd_val(pgd_t x)
996 {
997         return PVOP_CALL1(unsigned long, pgd_val, x.pgd);
998 }
999
1000 static inline void set_pte(pte_t *ptep, pte_t pteval)
1001 {
1002         PVOP_VCALL2(set_pte, ptep, pteval.pte_low);
1003 }
1004
1005 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1006                               pte_t *ptep, pte_t pteval)
1007 {
1008         PVOP_VCALL4(set_pte_at, mm, addr, ptep, pteval.pte_low);
1009 }
1010
1011 static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
1012 {
1013         PVOP_VCALL2(set_pmd, pmdp, pmdval.pud.pgd.pgd);
1014 }
1015
1016 static inline pte_t raw_ptep_get_and_clear(pte_t *p)
1017 {
1018         return (pte_t) { PVOP_CALL1(unsigned long, ptep_get_and_clear, p) };
1019 }
1020 #endif  /* CONFIG_X86_PAE */
1021
1022 /* Lazy mode for batching updates / context switch */
1023 #define PARAVIRT_LAZY_NONE 0
1024 #define PARAVIRT_LAZY_MMU  1
1025 #define PARAVIRT_LAZY_CPU  2
1026 #define PARAVIRT_LAZY_FLUSH 3
1027
1028 #define  __HAVE_ARCH_ENTER_LAZY_CPU_MODE
1029 static inline void arch_enter_lazy_cpu_mode(void)
1030 {
1031         PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_CPU);
1032 }
1033
1034 static inline void arch_leave_lazy_cpu_mode(void)
1035 {
1036         PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE);
1037 }
1038
1039 static inline void arch_flush_lazy_cpu_mode(void)
1040 {
1041         PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH);
1042 }
1043
1044
1045 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1046 static inline void arch_enter_lazy_mmu_mode(void)
1047 {
1048         PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_MMU);
1049 }
1050
1051 static inline void arch_leave_lazy_mmu_mode(void)
1052 {
1053         PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE);
1054 }
1055
1056 static inline void arch_flush_lazy_mmu_mode(void)
1057 {
1058         PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH);
1059 }
1060
1061 void _paravirt_nop(void);
1062 #define paravirt_nop    ((void *)_paravirt_nop)
1063
1064 /* These all sit in the .parainstructions section to tell us what to patch. */
1065 struct paravirt_patch_site {
1066         u8 *instr;              /* original instructions */
1067         u8 instrtype;           /* type of this instruction */
1068         u8 len;                 /* length of original instruction */
1069         u16 clobbers;           /* what registers you may clobber */
1070 };
1071
1072 extern struct paravirt_patch_site __parainstructions[],
1073         __parainstructions_end[];
1074
1075 static inline unsigned long __raw_local_save_flags(void)
1076 {
1077         unsigned long f;
1078
1079         asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
1080                                   PARAVIRT_CALL
1081                                   "popl %%edx; popl %%ecx")
1082                      : "=a"(f)
1083                      : paravirt_type(save_fl),
1084                        paravirt_clobber(CLBR_EAX)
1085                      : "memory", "cc");
1086         return f;
1087 }
1088
1089 static inline void raw_local_irq_restore(unsigned long f)
1090 {
1091         asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
1092                                   PARAVIRT_CALL
1093                                   "popl %%edx; popl %%ecx")
1094                      : "=a"(f)
1095                      : "0"(f),
1096                        paravirt_type(restore_fl),
1097                        paravirt_clobber(CLBR_EAX)
1098                      : "memory", "cc");
1099 }
1100
1101 static inline void raw_local_irq_disable(void)
1102 {
1103         asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
1104                                   PARAVIRT_CALL
1105                                   "popl %%edx; popl %%ecx")
1106                      :
1107                      : paravirt_type(irq_disable),
1108                        paravirt_clobber(CLBR_EAX)
1109                      : "memory", "eax", "cc");
1110 }
1111
1112 static inline void raw_local_irq_enable(void)
1113 {
1114         asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
1115                                   PARAVIRT_CALL
1116                                   "popl %%edx; popl %%ecx")
1117                      :
1118                      : paravirt_type(irq_enable),
1119                        paravirt_clobber(CLBR_EAX)
1120                      : "memory", "eax", "cc");
1121 }
1122
1123 static inline unsigned long __raw_local_irq_save(void)
1124 {
1125         unsigned long f;
1126
1127         f = __raw_local_save_flags();
1128         raw_local_irq_disable();
1129         return f;
1130 }
1131
1132 #define CLI_STRING                                                      \
1133         _paravirt_alt("pushl %%ecx; pushl %%edx;"                       \
1134                       "call *paravirt_ops+%c[paravirt_cli_type]*4;"     \
1135                       "popl %%edx; popl %%ecx",                         \
1136                       "%c[paravirt_cli_type]", "%c[paravirt_clobber]")
1137
1138 #define STI_STRING                                                      \
1139         _paravirt_alt("pushl %%ecx; pushl %%edx;"                       \
1140                       "call *paravirt_ops+%c[paravirt_sti_type]*4;"     \
1141                       "popl %%edx; popl %%ecx",                         \
1142                       "%c[paravirt_sti_type]", "%c[paravirt_clobber]")
1143
1144 #define CLI_STI_CLOBBERS , "%eax"
1145 #define CLI_STI_INPUT_ARGS                                              \
1146         ,                                                               \
1147         [paravirt_cli_type] "i" (PARAVIRT_PATCH(irq_disable)),          \
1148         [paravirt_sti_type] "i" (PARAVIRT_PATCH(irq_enable)),           \
1149         paravirt_clobber(CLBR_EAX)
1150
1151 /* Make sure as little as possible of this mess escapes. */
1152 #undef PARAVIRT_CALL
1153 #undef PVOP_VCALL0
1154 #undef PVOP_CALL0
1155 #undef PVOP_VCALL1
1156 #undef PVOP_CALL1
1157 #undef PVOP_VCALL2
1158 #undef PVOP_CALL2
1159 #undef PVOP_VCALL3
1160 #undef PVOP_CALL3
1161 #undef PVOP_VCALL4
1162 #undef PVOP_CALL4
1163
1164 #else  /* __ASSEMBLY__ */
1165
1166 #define PARA_PATCH(off) ((off) / 4)
1167
1168 #define PARA_SITE(ptype, clobbers, ops)         \
1169 771:;                                           \
1170         ops;                                    \
1171 772:;                                           \
1172         .pushsection .parainstructions,"a";     \
1173          .long 771b;                            \
1174          .byte ptype;                           \
1175          .byte 772b-771b;                       \
1176          .short clobbers;                       \
1177         .popsection
1178
1179 #define INTERRUPT_RETURN                                        \
1180         PARA_SITE(PARA_PATCH(PARAVIRT_iret), CLBR_NONE,         \
1181                   jmp *%cs:paravirt_ops+PARAVIRT_iret)
1182
1183 #define DISABLE_INTERRUPTS(clobbers)                                    \
1184         PARA_SITE(PARA_PATCH(PARAVIRT_irq_disable), clobbers,           \
1185                   pushl %eax; pushl %ecx; pushl %edx;                   \
1186                   call *%cs:paravirt_ops+PARAVIRT_irq_disable;          \
1187                   popl %edx; popl %ecx; popl %eax)                      \
1188
1189 #define ENABLE_INTERRUPTS(clobbers)                                     \
1190         PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable), clobbers,            \
1191                   pushl %eax; pushl %ecx; pushl %edx;                   \
1192                   call *%cs:paravirt_ops+PARAVIRT_irq_enable;           \
1193                   popl %edx; popl %ecx; popl %eax)
1194
1195 #define ENABLE_INTERRUPTS_SYSEXIT                                       \
1196         PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable_sysexit), CLBR_NONE,   \
1197                   jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit)
1198
1199 #define GET_CR0_INTO_EAX                        \
1200         push %ecx; push %edx;                   \
1201         call *paravirt_ops+PARAVIRT_read_cr0;   \
1202         pop %edx; pop %ecx
1203
1204 #endif /* __ASSEMBLY__ */
1205 #endif /* CONFIG_PARAVIRT */
1206 #endif  /* __ASM_PARAVIRT_H */