unsigned long (*read_cr4)(void);
void (*write_cr4)(unsigned long);
+#ifdef CONFIG_X86_64
+ unsigned long (*read_cr8)(void);
+ void (*write_cr8)(unsigned long);
+#endif
+
/* Segment descriptor handling */
void (*load_tr_desc)(void);
void (*load_gdt)(const struct desc_ptr *);
unsigned long va);
/* Hooks for allocating/releasing pagetable pages */
- void (*alloc_pt)(struct mm_struct *mm, u32 pfn);
- void (*alloc_pd)(u32 pfn);
- void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
- void (*release_pt)(u32 pfn);
- void (*release_pd)(u32 pfn);
+ void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
+ void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
+ void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
+ void (*alloc_pud)(struct mm_struct *mm, u32 pfn);
+ void (*release_pte)(u32 pfn);
+ void (*release_pmd)(u32 pfn);
+ void (*release_pud)(u32 pfn);
/* Pagetable manipulation functions */
void (*set_pte)(pte_t *ptep, pte_t pteval);
void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval);
void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
- void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+ void (*pte_update)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
void (*pte_update_defer)(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
- void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+ void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
void (*pmd_clear)(pmd_t *pmdp);
#endif /* CONFIG_X86_PAE */
#if PAGETABLE_LEVELS == 4
pudval_t (*pud_val)(pud_t);
pud_t (*make_pud)(pudval_t pud);
+
+ void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
#endif /* PAGETABLE_LEVELS == 4 */
#endif /* PAGETABLE_LEVELS >= 3 */
/* This contains all the paravirt structures: we get a convenient
* number for each function using the offset which we use to indicate
* what to patch. */
-struct paravirt_patch_template
-{
+struct paravirt_patch_template {
struct pv_init_ops pv_init_ops;
struct pv_time_ops pv_time_ops;
struct pv_cpu_ops pv_cpu_ops;
PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
}
+#ifdef CONFIG_X86_64
+static inline unsigned long read_cr8(void)
+{
+ return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
+}
+
+static inline void write_cr8(unsigned long x)
+{
+ PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
+}
+#endif
+
static inline void raw_safe_halt(void)
{
PVOP_VCALL0(pv_irq_ops.safe_halt);
}
/* These should all do BUG_ON(_err), but our headers are too tangled. */
-#define rdmsr(msr,val1,val2) do { \
+#define rdmsr(msr, val1, val2) \
+do { \
int _err; \
u64 _l = paravirt_read_msr(msr, &_err); \
val1 = (u32)_l; \
val2 = _l >> 32; \
-} while(0)
+} while (0)
-#define wrmsr(msr,val1,val2) do { \
+#define wrmsr(msr, val1, val2) \
+do { \
paravirt_write_msr(msr, val1, val2); \
-} while(0)
+} while (0)
-#define rdmsrl(msr,val) do { \
+#define rdmsrl(msr, val) \
+do { \
int _err; \
val = paravirt_read_msr(msr, &_err); \
-} while(0)
+} while (0)
-#define wrmsrl(msr,val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
-#define wrmsr_safe(msr,a,b) paravirt_write_msr(msr, a, b)
+#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
+#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
/* rdmsr with exception handling */
-#define rdmsr_safe(msr,a,b) ({ \
+#define rdmsr_safe(msr, a, b) \
+({ \
int _err; \
u64 _l = paravirt_read_msr(msr, &_err); \
(*a) = (u32)_l; \
(*b) = _l >> 32; \
- _err; })
+ _err; \
+})
+
+static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
+{
+ int err;
+ *p = paravirt_read_msr(msr, &err);
+ return err;
+}
static inline u64 paravirt_read_tsc(void)
{
return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
}
-#define rdtscl(low) do { \
+#define rdtscl(low) \
+do { \
u64 _l = paravirt_read_tsc(); \
low = (int)_l; \
-} while(0)
+} while (0)
#define rdtscll(val) (val = paravirt_read_tsc())
return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
}
-#define rdpmc(counter,low,high) do { \
+#define rdpmc(counter, low, high) \
+do { \
u64 _l = paravirt_read_pmc(counter); \
low = (u32)_l; \
high = _l >> 32; \
-} while(0)
+} while (0)
static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
{
}
/* The paravirtualized I/O functions */
-static inline void slow_down_io(void) {
+static inline void slow_down_io(void)
+{
pv_cpu_ops.io_delay();
#ifdef REALLY_SLOW_IO
pv_cpu_ops.io_delay();
PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
}
-static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn)
+static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
{
- PVOP_VCALL2(pv_mmu_ops.alloc_pt, mm, pfn);
+ PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
}
-static inline void paravirt_release_pt(unsigned pfn)
+static inline void paravirt_release_pte(unsigned pfn)
{
- PVOP_VCALL1(pv_mmu_ops.release_pt, pfn);
+ PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
}
-static inline void paravirt_alloc_pd(unsigned pfn)
+static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn)
{
- PVOP_VCALL1(pv_mmu_ops.alloc_pd, pfn);
+ PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
}
-static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn,
- unsigned start, unsigned count)
+static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn,
+ unsigned start, unsigned count)
{
- PVOP_VCALL4(pv_mmu_ops.alloc_pd_clone, pfn, clonepfn, start, count);
+ PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
}
-static inline void paravirt_release_pd(unsigned pfn)
+static inline void paravirt_release_pmd(unsigned pfn)
{
- PVOP_VCALL1(pv_mmu_ops.release_pd, pfn);
+ PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
+}
+
+static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn)
+{
+ PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
+}
+static inline void paravirt_release_pud(unsigned pfn)
+{
+ PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
}
#ifdef CONFIG_HIGHPTE
PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
}
+#if PAGETABLE_LEVELS >= 3
+static inline pmd_t __pmd(pmdval_t val)
+{
+ pmdval_t ret;
+
+ if (sizeof(pmdval_t) > sizeof(long))
+ ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
+ val, (u64)val >> 32);
+ else
+ ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
+ val);
+
+ return (pmd_t) { ret };
+}
+
+static inline pmdval_t pmd_val(pmd_t pmd)
+{
+ pmdval_t ret;
+
+ if (sizeof(pmdval_t) > sizeof(long))
+ ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
+ pmd.pmd, (u64)pmd.pmd >> 32);
+ else
+ ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
+ pmd.pmd);
+
+ return ret;
+}
+
+static inline void set_pud(pud_t *pudp, pud_t pud)
+{
+ pudval_t val = native_pud_val(pud);
+
+ if (sizeof(pudval_t) > sizeof(long))
+ PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
+ val, (u64)val >> 32);
+ else
+ PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
+ val);
+}
+#if PAGETABLE_LEVELS == 4
+static inline pud_t __pud(pudval_t val)
+{
+ pudval_t ret;
+
+ if (sizeof(pudval_t) > sizeof(long))
+ ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
+ val, (u64)val >> 32);
+ else
+ ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
+ val);
+
+ return (pud_t) { ret };
+}
+
+static inline pudval_t pud_val(pud_t pud)
+{
+ pudval_t ret;
+
+ if (sizeof(pudval_t) > sizeof(long))
+ ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
+ pud.pud, (u64)pud.pud >> 32);
+ else
+ ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
+ pud.pud);
+
+ return ret;
+}
+
+static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+ pgdval_t val = native_pgd_val(pgd);
+
+ if (sizeof(pgdval_t) > sizeof(long))
+ PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
+ val, (u64)val >> 32);
+ else
+ PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
+ val);
+}
+
+static inline void pgd_clear(pgd_t *pgdp)
+{
+ set_pgd(pgdp, __pgd(0));
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+ set_pud(pudp, __pud(0));
+}
+
+#endif /* PAGETABLE_LEVELS == 4 */
+
+#endif /* PAGETABLE_LEVELS >= 3 */
+
#ifdef CONFIG_X86_PAE
/* Special-case pte-setting operations for PAE, which can't update a
64-bit pte atomically */
}
#endif /* CONFIG_X86_PAE */
-#if PAGETABLE_LEVELS >= 3
-static inline pmd_t __pmd(pmdval_t val)
-{
- pmdval_t ret;
-
- if (sizeof(pmdval_t) > sizeof(long))
- ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
- val, (u64)val >> 32);
- else
- ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
- val);
-
- return (pmd_t) { ret };
-}
-
-static inline pmdval_t pmd_val(pmd_t pmd)
-{
- pmdval_t ret;
-
- if (sizeof(pmdval_t) > sizeof(long))
- ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
- pmd.pmd, (u64)pmd.pmd >> 32);
- else
- ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
- pmd.pmd);
-
- return ret;
-}
-
-static inline void set_pud(pud_t *pudp, pud_t pud)
-{
- pudval_t val = native_pud_val(pud);
-
- if (sizeof(pudval_t) > sizeof(long))
- PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
- val, (u64)val >> 32);
- else
- PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
- val);
-}
-#endif /* PAGETABLE_LEVELS >= 3 */
-
/* Lazy mode for batching updates / context switch */
enum paravirt_lazy_mode {
PARAVIRT_LAZY_NONE,