*/
#include "kvm.h"
+#include "x86_emulate.h"
#include "vmx.h"
+#include "segment_descriptor.h"
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/profile.h>
#include <linux/sched.h>
+
#include <asm/io.h>
#include <asm/desc.h>
-#include "segment_descriptor.h"
-
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
-static DEFINE_PER_CPU(struct vmcs *, vmxarea);
-static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
+struct vmcs {
+ u32 revision_id;
+ u32 abort;
+ char data[0];
+};
+struct vcpu_vmx {
+ struct kvm_vcpu vcpu;
+ int launched;
+ struct kvm_msr_entry *guest_msrs;
+ struct kvm_msr_entry *host_msrs;
+ int nmsrs;
+ int save_nmsrs;
+ int msr_offset_efer;
#ifdef CONFIG_X86_64
-#define HOST_IS_64 1
-#else
-#define HOST_IS_64 0
+ int msr_offset_kernel_gs_base;
#endif
+ struct vmcs *vmcs;
+ struct {
+ int loaded;
+ u16 fs_sel, gs_sel, ldt_sel;
+ int gs_ldt_reload_needed;
+ int fs_reload_needed;
+ }host_state;
-static struct vmcs_descriptor {
+};
+
+static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
+{
+ return container_of(vcpu, struct vcpu_vmx, vcpu);
+}
+
+static int init_rmode_tss(struct kvm *kvm);
+
+static DEFINE_PER_CPU(struct vmcs *, vmxarea);
+static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
+
+static struct page *vmx_io_bitmap_a;
+static struct page *vmx_io_bitmap_b;
+
+#define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
+
+static struct vmcs_config {
int size;
int order;
u32 revision_id;
-} vmcs_descriptor;
+ u32 pin_based_exec_ctrl;
+ u32 cpu_based_exec_ctrl;
+ u32 vmexit_ctrl;
+ u32 vmentry_ctrl;
+} vmcs_config;
#define VMX_SEGMENT_FIELD(seg) \
[VCPU_SREG_##seg] = { \
};
#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
-#ifdef CONFIG_X86_64
-static unsigned msr_offset_kernel_gs_base;
-#define NR_64BIT_MSRS 4
-/*
- * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt
- * mechanism (cpu bug AA24)
- */
-#define NR_BAD_MSRS 2
-#else
-#define NR_64BIT_MSRS 0
-#define NR_BAD_MSRS 0
-#endif
+static void load_msrs(struct kvm_msr_entry *e, int n)
+{
+ int i;
+
+ for (i = 0; i < n; ++i)
+ wrmsrl(e[i].index, e[i].data);
+}
+
+static void save_msrs(struct kvm_msr_entry *e, int n)
+{
+ int i;
+
+ for (i = 0; i < n; ++i)
+ rdmsrl(e[i].index, e[i].data);
+}
+
+static inline u64 msr_efer_save_restore_bits(struct kvm_msr_entry msr)
+{
+ return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
+}
+
+static inline int msr_efer_need_save_restore(struct vcpu_vmx *vmx)
+{
+ int efer_offset = vmx->msr_offset_efer;
+ return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=
+ msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
+}
static inline int is_page_fault(u32 intr_info)
{
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
}
-static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
+static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
+{
+ int i;
+
+ for (i = 0; i < vmx->nmsrs; ++i)
+ if (vmx->guest_msrs[i].index == msr)
+ return i;
+ return -1;
+}
+
+static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
{
int i;
- for (i = 0; i < vcpu->nmsrs; ++i)
- if (vcpu->guest_msrs[i].index == msr)
- return &vcpu->guest_msrs[i];
+ i = __find_msr_index(vmx, msr);
+ if (i >= 0)
+ return &vmx->guest_msrs[i];
return NULL;
}
static void __vcpu_clear(void *arg)
{
- struct kvm_vcpu *vcpu = arg;
+ struct vcpu_vmx *vmx = arg;
int cpu = raw_smp_processor_id();
- if (vcpu->cpu == cpu)
- vmcs_clear(vcpu->vmcs);
- if (per_cpu(current_vmcs, cpu) == vcpu->vmcs)
+ if (vmx->vcpu.cpu == cpu)
+ vmcs_clear(vmx->vmcs);
+ if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
+ rdtscll(vmx->vcpu.host_tsc);
}
-static void vcpu_clear(struct kvm_vcpu *vcpu)
+static void vcpu_clear(struct vcpu_vmx *vmx)
{
- if (vcpu->cpu != raw_smp_processor_id() && vcpu->cpu != -1)
- smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1);
+ if (vmx->vcpu.cpu != raw_smp_processor_id() && vmx->vcpu.cpu != -1)
+ smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear,
+ vmx, 0, 1);
else
- __vcpu_clear(vcpu);
- vcpu->launched = 0;
+ __vcpu_clear(vmx);
+ vmx->launched = 0;
}
static unsigned long vmcs_readl(unsigned long field)
vmcs_writel(field, vmcs_readl(field) | mask);
}
+static void update_exception_bitmap(struct kvm_vcpu *vcpu)
+{
+ u32 eb;
+
+ eb = 1u << PF_VECTOR;
+ if (!vcpu->fpu_active)
+ eb |= 1u << NM_VECTOR;
+ if (vcpu->guest_debug.enabled)
+ eb |= 1u << 1;
+ if (vcpu->rmode.active)
+ eb = ~0;
+ vmcs_write32(EXCEPTION_BITMAP, eb);
+}
+
+static void reload_tss(void)
+{
+#ifndef CONFIG_X86_64
+
+ /*
+ * VT restores TR but not its size. Useless.
+ */
+ struct descriptor_table gdt;
+ struct segment_descriptor *descs;
+
+ get_gdt(&gdt);
+ descs = (void *)gdt.base;
+ descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
+ load_TR_desc();
+#endif
+}
+
+static void load_transition_efer(struct vcpu_vmx *vmx)
+{
+ u64 trans_efer;
+ int efer_offset = vmx->msr_offset_efer;
+
+ trans_efer = vmx->host_msrs[efer_offset].data;
+ trans_efer &= ~EFER_SAVE_RESTORE_BITS;
+ trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
+ wrmsrl(MSR_EFER, trans_efer);
+ vmx->vcpu.stat.efer_reload++;
+}
+
+static void vmx_save_host_state(struct vcpu_vmx *vmx)
+{
+ if (vmx->host_state.loaded)
+ return;
+
+ vmx->host_state.loaded = 1;
+ /*
+ * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
+ * allow segment selectors with cpl > 0 or ti == 1.
+ */
+ vmx->host_state.ldt_sel = read_ldt();
+ vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
+ vmx->host_state.fs_sel = read_fs();
+ if (!(vmx->host_state.fs_sel & 7)) {
+ vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
+ vmx->host_state.fs_reload_needed = 0;
+ } else {
+ vmcs_write16(HOST_FS_SELECTOR, 0);
+ vmx->host_state.fs_reload_needed = 1;
+ }
+ vmx->host_state.gs_sel = read_gs();
+ if (!(vmx->host_state.gs_sel & 7))
+ vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
+ else {
+ vmcs_write16(HOST_GS_SELECTOR, 0);
+ vmx->host_state.gs_ldt_reload_needed = 1;
+ }
+
+#ifdef CONFIG_X86_64
+ vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
+ vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
+#else
+ vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
+ vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
+#endif
+
+#ifdef CONFIG_X86_64
+ if (is_long_mode(&vmx->vcpu)) {
+ save_msrs(vmx->host_msrs +
+ vmx->msr_offset_kernel_gs_base, 1);
+ }
+#endif
+ load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
+ if (msr_efer_need_save_restore(vmx))
+ load_transition_efer(vmx);
+}
+
+static void vmx_load_host_state(struct vcpu_vmx *vmx)
+{
+ unsigned long flags;
+
+ if (!vmx->host_state.loaded)
+ return;
+
+ vmx->host_state.loaded = 0;
+ if (vmx->host_state.fs_reload_needed)
+ load_fs(vmx->host_state.fs_sel);
+ if (vmx->host_state.gs_ldt_reload_needed) {
+ load_ldt(vmx->host_state.ldt_sel);
+ /*
+ * If we have to reload gs, we must take care to
+ * preserve our gs base.
+ */
+ local_irq_save(flags);
+ load_gs(vmx->host_state.gs_sel);
+#ifdef CONFIG_X86_64
+ wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
+#endif
+ local_irq_restore(flags);
+ }
+ reload_tss();
+ save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
+ load_msrs(vmx->host_msrs, vmx->save_nmsrs);
+ if (msr_efer_need_save_restore(vmx))
+ load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
+}
+
/*
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
* vcpu mutex is already taken.
*/
-static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
+static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- u64 phys_addr = __pa(vcpu->vmcs);
- int cpu;
-
- cpu = get_cpu();
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u64 phys_addr = __pa(vmx->vmcs);
+ u64 tsc_this, delta;
if (vcpu->cpu != cpu)
- vcpu_clear(vcpu);
+ vcpu_clear(vmx);
- if (per_cpu(current_vmcs, cpu) != vcpu->vmcs) {
+ if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
u8 error;
- per_cpu(current_vmcs, cpu) = vcpu->vmcs;
+ per_cpu(current_vmcs, cpu) = vmx->vmcs;
asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
: "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
: "cc");
if (error)
printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
- vcpu->vmcs, phys_addr);
+ vmx->vmcs, phys_addr);
}
if (vcpu->cpu != cpu) {
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+
+ /*
+ * Make sure the time stamp counter is monotonous.
+ */
+ rdtscll(tsc_this);
+ delta = vcpu->host_tsc - tsc_this;
+ vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
}
}
static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
{
+ vmx_load_host_state(to_vmx(vcpu));
kvm_put_guest_fpu(vcpu);
- put_cpu();
+}
+
+static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->fpu_active)
+ return;
+ vcpu->fpu_active = 1;
+ vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
+ if (vcpu->cr0 & X86_CR0_TS)
+ vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
+ update_exception_bitmap(vcpu);
+}
+
+static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu->fpu_active)
+ return;
+ vcpu->fpu_active = 0;
+ vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
+ update_exception_bitmap(vcpu);
}
static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
{
- vcpu_clear(vcpu);
+ vcpu_clear(to_vmx(vcpu));
}
static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
INTR_INFO_VALID_MASK);
}
+/*
+ * Swap MSR entry in host/guest MSR entry array.
+ */
+#ifdef CONFIG_X86_64
+static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
+{
+ struct kvm_msr_entry tmp;
+
+ tmp = vmx->guest_msrs[to];
+ vmx->guest_msrs[to] = vmx->guest_msrs[from];
+ vmx->guest_msrs[from] = tmp;
+ tmp = vmx->host_msrs[to];
+ vmx->host_msrs[to] = vmx->host_msrs[from];
+ vmx->host_msrs[from] = tmp;
+}
+#endif
+
/*
* Set up the vmcs to automatically save and restore system
* msrs. Don't touch the 64-bit msrs if the guest is in legacy
* mode, as fiddling with msrs is very expensive.
*/
-static void setup_msrs(struct kvm_vcpu *vcpu)
+static void setup_msrs(struct vcpu_vmx *vmx)
{
- int nr_skip, nr_good_msrs;
-
- if (is_long_mode(vcpu))
- nr_skip = NR_BAD_MSRS;
- else
- nr_skip = NR_64BIT_MSRS;
- nr_good_msrs = vcpu->nmsrs - nr_skip;
+ int save_nmsrs;
- /*
- * MSR_K6_STAR is only needed on long mode guests, and only
- * if efer.sce is enabled.
- */
- if (find_msr_entry(vcpu, MSR_K6_STAR)) {
- --nr_good_msrs;
+ save_nmsrs = 0;
#ifdef CONFIG_X86_64
- if (is_long_mode(vcpu) && (vcpu->shadow_efer & EFER_SCE))
- ++nr_good_msrs;
-#endif
+ if (is_long_mode(&vmx->vcpu)) {
+ int index;
+
+ index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
+ if (index >= 0)
+ move_msr_up(vmx, index, save_nmsrs++);
+ index = __find_msr_index(vmx, MSR_LSTAR);
+ if (index >= 0)
+ move_msr_up(vmx, index, save_nmsrs++);
+ index = __find_msr_index(vmx, MSR_CSTAR);
+ if (index >= 0)
+ move_msr_up(vmx, index, save_nmsrs++);
+ index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
+ if (index >= 0)
+ move_msr_up(vmx, index, save_nmsrs++);
+ /*
+ * MSR_K6_STAR is only needed on long mode guests, and only
+ * if efer.sce is enabled.
+ */
+ index = __find_msr_index(vmx, MSR_K6_STAR);
+ if ((index >= 0) && (vmx->vcpu.shadow_efer & EFER_SCE))
+ move_msr_up(vmx, index, save_nmsrs++);
}
+#endif
+ vmx->save_nmsrs = save_nmsrs;
- vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR,
- virt_to_phys(vcpu->guest_msrs + nr_skip));
- vmcs_writel(VM_EXIT_MSR_STORE_ADDR,
- virt_to_phys(vcpu->guest_msrs + nr_skip));
- vmcs_writel(VM_EXIT_MSR_LOAD_ADDR,
- virt_to_phys(vcpu->host_msrs + nr_skip));
- vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2 */
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
+#ifdef CONFIG_X86_64
+ vmx->msr_offset_kernel_gs_base =
+ __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
+#endif
+ vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
}
/*
vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
}
-static void reload_tss(void)
-{
-#ifndef CONFIG_X86_64
-
- /*
- * VT restores TR but not its size. Useless.
- */
- struct descriptor_table gdt;
- struct segment_descriptor *descs;
-
- get_gdt(&gdt);
- descs = (void *)gdt.base;
- descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
- load_TR_desc();
-#endif
-}
-
/*
* Reads an msr value (of 'msr_index') into 'pdata'.
* Returns 0 on success, non-0 otherwise.
static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
{
u64 data;
- struct vmx_msr_entry *msr;
+ struct kvm_msr_entry *msr;
if (!pdata) {
printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
default:
- msr = find_msr_entry(vcpu, msr_index);
+ msr = find_msr_entry(to_vmx(vcpu), msr_index);
if (msr) {
data = msr->data;
break;
*/
static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
{
- struct vmx_msr_entry *msr;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_msr_entry *msr;
+ int ret = 0;
+
switch (msr_index) {
#ifdef CONFIG_X86_64
case MSR_EFER:
- return kvm_set_msr_common(vcpu, msr_index, data);
+ ret = kvm_set_msr_common(vcpu, msr_index, data);
+ if (vmx->host_state.loaded)
+ load_transition_efer(vmx);
+ break;
case MSR_FS_BASE:
vmcs_writel(GUEST_FS_BASE, data);
break;
guest_write_tsc(data);
break;
default:
- msr = find_msr_entry(vcpu, msr_index);
+ msr = find_msr_entry(vmx, msr_index);
if (msr) {
msr->data = data;
+ if (vmx->host_state.loaded)
+ load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
break;
}
- return kvm_set_msr_common(vcpu, msr_index, data);
- msr->data = data;
- break;
+ ret = kvm_set_msr_common(vcpu, msr_index, data);
}
- return 0;
+ return ret;
}
/*
static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
{
unsigned long dr7 = 0x400;
- u32 exception_bitmap;
int old_singlestep;
- exception_bitmap = vmcs_read32(EXCEPTION_BITMAP);
old_singlestep = vcpu->guest_debug.singlestep;
vcpu->guest_debug.enabled = dbg->enabled;
dr7 |= 0 << (i*4+16); /* execution breakpoint */
}
- exception_bitmap |= (1u << 1); /* Trap debug exceptions */
-
vcpu->guest_debug.singlestep = dbg->singlestep;
- } else {
- exception_bitmap &= ~(1u << 1); /* Ignore debug exceptions */
+ } else
vcpu->guest_debug.singlestep = 0;
- }
if (old_singlestep && !vcpu->guest_debug.singlestep) {
unsigned long flags;
vmcs_writel(GUEST_RFLAGS, flags);
}
- vmcs_write32(EXCEPTION_BITMAP, exception_bitmap);
+ update_exception_bitmap(vcpu);
vmcs_writel(GUEST_DR7, dr7);
return 0;
u64 msr;
rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
- return (msr & 5) == 1; /* locked but not enabled */
+ return (msr & (MSR_IA32_FEATURE_CONTROL_LOCKED |
+ MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
+ == MSR_IA32_FEATURE_CONTROL_LOCKED;
+ /* locked but not enabled */
}
static void hardware_enable(void *garbage)
u64 old;
rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
- if ((old & 5) != 5)
+ if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED |
+ MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
+ != (MSR_IA32_FEATURE_CONTROL_LOCKED |
+ MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
/* enable and lock */
- wrmsrl(MSR_IA32_FEATURE_CONTROL, old | 5);
- write_cr4(read_cr4() | CR4_VMXE); /* FIXME: not cpu hotplug safe */
+ wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
+ MSR_IA32_FEATURE_CONTROL_LOCKED |
+ MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED);
+ write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
: "memory", "cc");
}
asm volatile (ASM_VMX_VMXOFF : : : "cc");
}
-static __init void setup_vmcs_descriptor(void)
+static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
+ u32 msr, u32* result)
+{
+ u32 vmx_msr_low, vmx_msr_high;
+ u32 ctl = ctl_min | ctl_opt;
+
+ rdmsr(msr, vmx_msr_low, vmx_msr_high);
+
+ ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
+ ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
+
+ /* Ensure minimum (required) set of control bits are supported. */
+ if (ctl_min & ~ctl)
+ return -EIO;
+
+ *result = ctl;
+ return 0;
+}
+
+static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
{
u32 vmx_msr_low, vmx_msr_high;
+ u32 min, opt;
+ u32 _pin_based_exec_control = 0;
+ u32 _cpu_based_exec_control = 0;
+ u32 _vmexit_control = 0;
+ u32 _vmentry_control = 0;
+
+ min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
+ opt = 0;
+ if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
+ &_pin_based_exec_control) < 0)
+ return -EIO;
+
+ min = CPU_BASED_HLT_EXITING |
+#ifdef CONFIG_X86_64
+ CPU_BASED_CR8_LOAD_EXITING |
+ CPU_BASED_CR8_STORE_EXITING |
+#endif
+ CPU_BASED_USE_IO_BITMAPS |
+ CPU_BASED_MOV_DR_EXITING |
+ CPU_BASED_USE_TSC_OFFSETING;
+ opt = 0;
+ if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
+ &_cpu_based_exec_control) < 0)
+ return -EIO;
+
+ min = 0;
+#ifdef CONFIG_X86_64
+ min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
+#endif
+ opt = 0;
+ if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
+ &_vmexit_control) < 0)
+ return -EIO;
+
+ min = opt = 0;
+ if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
+ &_vmentry_control) < 0)
+ return -EIO;
rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
- vmcs_descriptor.size = vmx_msr_high & 0x1fff;
- vmcs_descriptor.order = get_order(vmcs_descriptor.size);
- vmcs_descriptor.revision_id = vmx_msr_low;
+
+ /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
+ if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
+ return -EIO;
+
+#ifdef CONFIG_X86_64
+ /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
+ if (vmx_msr_high & (1u<<16))
+ return -EIO;
+#endif
+
+ /* Require Write-Back (WB) memory type for VMCS accesses. */
+ if (((vmx_msr_high >> 18) & 15) != 6)
+ return -EIO;
+
+ vmcs_conf->size = vmx_msr_high & 0x1fff;
+ vmcs_conf->order = get_order(vmcs_config.size);
+ vmcs_conf->revision_id = vmx_msr_low;
+
+ vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
+ vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
+ vmcs_conf->vmexit_ctrl = _vmexit_control;
+ vmcs_conf->vmentry_ctrl = _vmentry_control;
+
+ return 0;
}
static struct vmcs *alloc_vmcs_cpu(int cpu)
struct page *pages;
struct vmcs *vmcs;
- pages = alloc_pages_node(node, GFP_KERNEL, vmcs_descriptor.order);
+ pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
if (!pages)
return NULL;
vmcs = page_address(pages);
- memset(vmcs, 0, vmcs_descriptor.size);
- vmcs->revision_id = vmcs_descriptor.revision_id; /* vmcs revision id */
+ memset(vmcs, 0, vmcs_config.size);
+ vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
return vmcs;
}
static void free_vmcs(struct vmcs *vmcs)
{
- free_pages((unsigned long)vmcs, vmcs_descriptor.order);
+ free_pages((unsigned long)vmcs, vmcs_config.order);
}
static void free_kvm_area(void)
free_vmcs(per_cpu(vmxarea, cpu));
}
-extern struct vmcs *alloc_vmcs_cpu(int cpu);
-
static __init int alloc_kvm_area(void)
{
int cpu;
static __init int hardware_setup(void)
{
- setup_vmcs_descriptor();
+ if (setup_vmcs_config(&vmcs_config) < 0)
+ return -EIO;
return alloc_kvm_area();
}
free_kvm_area();
}
-static void update_exception_bitmap(struct kvm_vcpu *vcpu)
-{
- if (vcpu->rmode.active)
- vmcs_write32(EXCEPTION_BITMAP, ~0);
- else
- vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
-}
-
static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
{
struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
vmcs_writel(GUEST_RFLAGS, flags);
- vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~CR4_VME_MASK) |
- (vmcs_readl(CR4_READ_SHADOW) & CR4_VME_MASK));
+ vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
+ (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
update_exception_bitmap(vcpu);
vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
}
-static int rmode_tss_base(struct kvm* kvm)
+static gva_t rmode_tss_base(struct kvm* kvm)
{
gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
return base_gfn << PAGE_SHIFT;
flags |= IOPL_MASK | X86_EFLAGS_VM;
vmcs_writel(GUEST_RFLAGS, flags);
- vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | CR4_VME_MASK);
+ vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
update_exception_bitmap(vcpu);
vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
+
+ init_rmode_tss(vcpu->kvm);
}
#ifdef CONFIG_X86_64
vcpu->shadow_efer |= EFER_LMA;
- find_msr_entry(vcpu, MSR_EFER)->data |= EFER_LMA | EFER_LME;
+ find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS)
- | VM_ENTRY_CONTROLS_IA32E_MASK);
+ | VM_ENTRY_IA32E_MODE);
}
static void exit_lmode(struct kvm_vcpu *vcpu)
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS)
- & ~VM_ENTRY_CONTROLS_IA32E_MASK);
+ & ~VM_ENTRY_IA32E_MODE);
}
#endif
static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
- if (vcpu->rmode.active && (cr0 & CR0_PE_MASK))
+ vmx_fpu_deactivate(vcpu);
+
+ if (vcpu->rmode.active && (cr0 & X86_CR0_PE))
enter_pmode(vcpu);
- if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK))
+ if (!vcpu->rmode.active && !(cr0 & X86_CR0_PE))
enter_rmode(vcpu);
#ifdef CONFIG_X86_64
if (vcpu->shadow_efer & EFER_LME) {
- if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK))
+ if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
enter_lmode(vcpu);
- if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK))
+ if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
exit_lmode(vcpu);
}
#endif
- if (!(cr0 & CR0_TS_MASK)) {
- vcpu->fpu_active = 1;
- vmcs_clear_bits(EXCEPTION_BITMAP, CR0_TS_MASK);
- }
-
vmcs_writel(CR0_READ_SHADOW, cr0);
vmcs_writel(GUEST_CR0,
(cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
vcpu->cr0 = cr0;
+
+ if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
+ vmx_fpu_activate(vcpu);
}
static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
vmcs_writel(GUEST_CR3, cr3);
-
- if (!(vcpu->cr0 & CR0_TS_MASK)) {
- vcpu->fpu_active = 0;
- vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
- vmcs_set_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
- }
+ if (vcpu->cr0 & X86_CR0_PE)
+ vmx_fpu_deactivate(vcpu);
}
static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
- struct vmx_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
vcpu->shadow_efer = efer;
if (efer & EFER_LMA) {
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS) |
- VM_ENTRY_CONTROLS_IA32E_MASK);
+ VM_ENTRY_IA32E_MODE);
msr->data = efer;
} else {
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS) &
- ~VM_ENTRY_CONTROLS_IA32E_MASK);
+ ~VM_ENTRY_IA32E_MODE);
msr->data = efer & ~EFER_LME;
}
- setup_msrs(vcpu);
+ setup_msrs(vmx);
}
#endif
var->unusable = (ar >> 16) & 1;
}
-static void vmx_set_segment(struct kvm_vcpu *vcpu,
- struct kvm_segment *var, int seg)
+static u32 vmx_segment_access_rights(struct kvm_segment *var)
{
- struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
u32 ar;
- vmcs_writel(sf->base, var->base);
- vmcs_write32(sf->limit, var->limit);
- vmcs_write16(sf->selector, var->selector);
- if (vcpu->rmode.active && var->s) {
- /*
- * Hack real-mode segments into vm86 compatibility.
- */
- if (var->base == 0xffff0000 && var->selector == 0xf000)
- vmcs_writel(sf->base, 0xf0000);
- ar = 0xf3;
- } else if (var->unusable)
+ if (var->unusable)
ar = 1 << 16;
else {
ar = var->type & 15;
}
if (ar == 0) /* a 0 value means unusable */
ar = AR_UNUSABLE_MASK;
+
+ return ar;
+}
+
+static void vmx_set_segment(struct kvm_vcpu *vcpu,
+ struct kvm_segment *var, int seg)
+{
+ struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+ u32 ar;
+
+ if (vcpu->rmode.active && seg == VCPU_SREG_TR) {
+ vcpu->rmode.tr.selector = var->selector;
+ vcpu->rmode.tr.base = var->base;
+ vcpu->rmode.tr.limit = var->limit;
+ vcpu->rmode.tr.ar = vmx_segment_access_rights(var);
+ return;
+ }
+ vmcs_writel(sf->base, var->base);
+ vmcs_write32(sf->limit, var->limit);
+ vmcs_write16(sf->selector, var->selector);
+ if (vcpu->rmode.active && var->s) {
+ /*
+ * Hack real-mode segments into vm86 compatibility.
+ */
+ if (var->base == 0xffff0000 && var->selector == 0xf000)
+ vmcs_writel(sf->base, 0xf0000);
+ ar = 0xf3;
+ } else
+ ar = vmx_segment_access_rights(var);
vmcs_write32(sf->ar_bytes, ar);
}
}
page = kmap_atomic(p1, KM_USER0);
- memset(page, 0, PAGE_SIZE);
+ clear_page(page);
*(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
kunmap_atomic(page, KM_USER0);
page = kmap_atomic(p2, KM_USER0);
- memset(page, 0, PAGE_SIZE);
+ clear_page(page);
kunmap_atomic(page, KM_USER0);
page = kmap_atomic(p3, KM_USER0);
- memset(page, 0, PAGE_SIZE);
+ clear_page(page);
*(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
kunmap_atomic(page, KM_USER0);
return 1;
}
-static void vmcs_write32_fixedbits(u32 msr, u32 vmcs_field, u32 val)
-{
- u32 msr_high, msr_low;
-
- rdmsr(msr, msr_low, msr_high);
-
- val &= msr_high;
- val |= msr_low;
- vmcs_write32(vmcs_field, val);
-}
-
static void seg_setup(int seg)
{
struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
/*
* Sets up the vmcs for emulated real mode.
*/
-static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
+static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
{
u32 host_sysenter_cs;
u32 junk;
struct descriptor_table dt;
int i;
int ret = 0;
- extern asmlinkage void kvm_vmx_return(void);
+ unsigned long kvm_vmx_return;
- if (!init_rmode_tss(vcpu->kvm)) {
+ if (!init_rmode_tss(vmx->vcpu.kvm)) {
ret = -ENOMEM;
goto out;
}
- memset(vcpu->regs, 0, sizeof(vcpu->regs));
- vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
- vcpu->cr8 = 0;
- vcpu->apic_base = 0xfee00000 |
- /*for vcpu 0*/ MSR_IA32_APICBASE_BSP |
- MSR_IA32_APICBASE_ENABLE;
+ vmx->vcpu.regs[VCPU_REGS_RDX] = get_rdx_init_val();
+ vmx->vcpu.cr8 = 0;
+ vmx->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+ if (vmx->vcpu.vcpu_id == 0)
+ vmx->vcpu.apic_base |= MSR_IA32_APICBASE_BSP;
- fx_init(vcpu);
+ fx_init(&vmx->vcpu);
/*
* GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
/* I/O */
- vmcs_write64(IO_BITMAP_A, 0);
- vmcs_write64(IO_BITMAP_B, 0);
+ vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
+ vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
guest_write_tsc(0);
vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
/* Control */
- vmcs_write32_fixedbits(MSR_IA32_VMX_PINBASED_CTLS,
- PIN_BASED_VM_EXEC_CONTROL,
- PIN_BASED_EXT_INTR_MASK /* 20.6.1 */
- | PIN_BASED_NMI_EXITING /* 20.6.1 */
- );
- vmcs_write32_fixedbits(MSR_IA32_VMX_PROCBASED_CTLS,
- CPU_BASED_VM_EXEC_CONTROL,
- CPU_BASED_HLT_EXITING /* 20.6.2 */
- | CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */
- | CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */
- | CPU_BASED_UNCOND_IO_EXITING /* 20.6.2 */
- | CPU_BASED_MOV_DR_EXITING
- | CPU_BASED_USE_TSC_OFFSETING /* 21.3 */
- );
-
- vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
+ vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
+ vmcs_config.pin_based_exec_ctrl);
+ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
+ vmcs_config.cpu_based_exec_ctrl);
+
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
get_idt(&dt);
vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
-
- vmcs_writel(HOST_RIP, (unsigned long)kvm_vmx_return); /* 22.2.5 */
+ asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
+ vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
u32 index = vmx_msr_index[i];
u32 data_low, data_high;
u64 data;
- int j = vcpu->nmsrs;
+ int j = vmx->nmsrs;
if (rdmsr_safe(index, &data_low, &data_high) < 0)
continue;
if (wrmsr_safe(index, data_low, data_high) < 0)
continue;
data = data_low | ((u64)data_high << 32);
- vcpu->host_msrs[j].index = index;
- vcpu->host_msrs[j].reserved = 0;
- vcpu->host_msrs[j].data = data;
- vcpu->guest_msrs[j] = vcpu->host_msrs[j];
-#ifdef CONFIG_X86_64
- if (index == MSR_KERNEL_GS_BASE)
- msr_offset_kernel_gs_base = j;
-#endif
- ++vcpu->nmsrs;
+ vmx->host_msrs[j].index = index;
+ vmx->host_msrs[j].reserved = 0;
+ vmx->host_msrs[j].data = data;
+ vmx->guest_msrs[j] = vmx->host_msrs[j];
+ ++vmx->nmsrs;
}
- setup_msrs(vcpu);
+ setup_msrs(vmx);
- vmcs_write32_fixedbits(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_CONTROLS,
- (HOST_IS_64 << 9)); /* 22.2,1, 20.7.1 */
+ vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
/* 22.2.1, 20.8.1 */
- vmcs_write32_fixedbits(MSR_IA32_VMX_ENTRY_CTLS,
- VM_ENTRY_CONTROLS, 0);
+ vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
+
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
#ifdef CONFIG_X86_64
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
- vcpu->cr0 = 0x60000010;
- vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode
- vmx_set_cr4(vcpu, 0);
+ vmx->vcpu.cr0 = 0x60000010;
+ vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); // enter rmode
+ vmx_set_cr4(&vmx->vcpu, 0);
#ifdef CONFIG_X86_64
- vmx_set_efer(vcpu, 0);
+ vmx_set_efer(&vmx->vcpu, 0);
#endif
+ vmx_fpu_activate(&vmx->vcpu);
+ update_exception_bitmap(&vmx->vcpu);
return 0;
return;
}
- if (kvm_read_guest(vcpu, irq * sizeof(ent), sizeof(ent), &ent) !=
- sizeof(ent)) {
+ if (emulator_read_std(irq * sizeof(ent), &ent, sizeof(ent), vcpu) !=
+ X86EMUL_CONTINUE) {
vcpu_printf(vcpu, "%s: read guest err\n", __FUNCTION__);
return;
}
ip = vmcs_readl(GUEST_RIP);
- if (kvm_write_guest(vcpu, ss_base + sp - 2, 2, &flags) != 2 ||
- kvm_write_guest(vcpu, ss_base + sp - 4, 2, &cs) != 2 ||
- kvm_write_guest(vcpu, ss_base + sp - 6, 2, &ip) != 2) {
+ if (emulator_write_emulated(ss_base + sp - 2, &flags, 2, vcpu) != X86EMUL_CONTINUE ||
+ emulator_write_emulated(ss_base + sp - 4, &cs, 2, vcpu) != X86EMUL_CONTINUE ||
+ emulator_write_emulated(ss_base + sp - 6, &ip, 2, vcpu) != X86EMUL_CONTINUE) {
vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__);
return;
}
if (!vcpu->rmode.active)
return 0;
- if (vec == GP_VECTOR && err_code == 0)
+ /*
+ * Instruction with address size override prefix opcode 0x67
+ * Cause the #SS fault with 0 error code in VM86 mode.
+ */
+ if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
return 1;
return 0;
}
if (is_no_device(intr_info)) {
- vcpu->fpu_active = 1;
- vmcs_clear_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
- if (!(vcpu->cr0 & CR0_TS_MASK))
- vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
+ vmx_fpu_activate(vcpu);
return 1;
}
if (is_page_fault(intr_info)) {
cr2 = vmcs_readl(EXIT_QUALIFICATION);
- spin_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->lock);
r = kvm_mmu_page_fault(vcpu, cr2, error_code);
if (r < 0) {
- spin_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->lock);
return r;
}
if (!r) {
- spin_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->lock);
return 1;
}
er = emulate_instruction(vcpu, kvm_run, cr2, error_code);
- spin_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->lock);
switch (er) {
case EMULATE_DONE:
return 1;
case EMULATE_DO_MMIO:
++vcpu->stat.mmio_exits;
- kvm_run->exit_reason = KVM_EXIT_MMIO;
return 0;
case EMULATE_FAIL:
vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
if (vcpu->rmode.active &&
handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
- error_code))
+ error_code)) {
+ if (vcpu->halt_request) {
+ vcpu->halt_request = 0;
+ return kvm_emulate_halt(vcpu);
+ }
return 1;
+ }
if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
kvm_run->exit_reason = KVM_EXIT_DEBUG;
return 0;
}
-static int get_io_count(struct kvm_vcpu *vcpu, unsigned long *count)
-{
- u64 inst;
- gva_t rip;
- int countr_size;
- int i, n;
-
- if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_VM)) {
- countr_size = 2;
- } else {
- u32 cs_ar = vmcs_read32(GUEST_CS_AR_BYTES);
-
- countr_size = (cs_ar & AR_L_MASK) ? 8:
- (cs_ar & AR_DB_MASK) ? 4: 2;
- }
-
- rip = vmcs_readl(GUEST_RIP);
- if (countr_size != 8)
- rip += vmcs_readl(GUEST_CS_BASE);
-
- n = kvm_read_guest(vcpu, rip, sizeof(inst), &inst);
-
- for (i = 0; i < n; i++) {
- switch (((u8*)&inst)[i]) {
- case 0xf0:
- case 0xf2:
- case 0xf3:
- case 0x2e:
- case 0x36:
- case 0x3e:
- case 0x26:
- case 0x64:
- case 0x65:
- case 0x66:
- break;
- case 0x67:
- countr_size = (countr_size == 2) ? 4: (countr_size >> 1);
- default:
- goto done;
- }
- }
- return 0;
-done:
- countr_size *= 8;
- *count = vcpu->regs[VCPU_REGS_RCX] & (~0ULL >> (64 - countr_size));
- //printk("cx: %lx\n", vcpu->regs[VCPU_REGS_RCX]);
- return 1;
-}
-
static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
u64 exit_qualification;
int size, down, in, string, rep;
unsigned port;
- unsigned long count;
- gva_t address;
++vcpu->stat.io_exits;
exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
- in = (exit_qualification & 8) != 0;
- size = (exit_qualification & 7) + 1;
string = (exit_qualification & 16) != 0;
+
+ if (string) {
+ if (emulate_instruction(vcpu, kvm_run, 0, 0) == EMULATE_DO_MMIO)
+ return 0;
+ return 1;
+ }
+
+ size = (exit_qualification & 7) + 1;
+ in = (exit_qualification & 8) != 0;
down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
- count = 1;
rep = (exit_qualification & 32) != 0;
port = exit_qualification >> 16;
- address = 0;
- if (string) {
- if (rep && !get_io_count(vcpu, &count))
- return 1;
- address = vmcs_readl(GUEST_LINEAR_ADDRESS);
- }
- return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
- address, rep, port);
+
+ return kvm_emulate_pio(vcpu, kvm_run, in, size, port);
}
static void
vcpu_load_rsp_rip(vcpu);
set_cr8(vcpu, vcpu->regs[reg]);
skip_emulated_instruction(vcpu);
- return 1;
+ kvm_run->exit_reason = KVM_EXIT_SET_TPR;
+ return 0;
};
break;
case 2: /* clts */
vcpu_load_rsp_rip(vcpu);
- vcpu->fpu_active = 1;
- vmcs_clear_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
- vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
- vcpu->cr0 &= ~CR0_TS_MASK;
+ vmx_fpu_deactivate(vcpu);
+ vcpu->cr0 &= ~X86_CR0_TS;
vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
+ vmx_fpu_activate(vcpu);
skip_emulated_instruction(vcpu);
return 1;
case 1: /*mov from cr*/
break;
}
kvm_run->exit_reason = 0;
- printk(KERN_ERR "kvm: unhandled control register: op %d cr %d\n",
+ pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
(int)(exit_qualification >> 4) & 3, cr);
return 0;
}
static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
skip_emulated_instruction(vcpu);
- if (vcpu->irq_summary)
- return 1;
-
- kvm_run->exit_reason = KVM_EXIT_HLT;
- ++vcpu->stat.halt_exits;
- return 0;
+ return kvm_emulate_halt(vcpu);
}
static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
};
static const int kvm_vmx_max_exit_handlers =
- sizeof(kvm_vmx_exit_handlers) / sizeof(*kvm_vmx_exit_handlers);
+ ARRAY_SIZE(kvm_vmx_exit_handlers);
/*
* The guest has exited. See if we can fix it or if we need userspace
(vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
}
+static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
+{
+}
+
static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
u8 fail;
- u16 fs_sel, gs_sel, ldt_sel;
- int fs_gs_ldt_reload_needed;
int r;
-again:
- /*
- * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
- * allow segment selectors with cpl > 0 or ti == 1.
- */
- fs_sel = read_fs();
- gs_sel = read_gs();
- ldt_sel = read_ldt();
- fs_gs_ldt_reload_needed = (fs_sel & 7) | (gs_sel & 7) | ldt_sel;
- if (!fs_gs_ldt_reload_needed) {
- vmcs_write16(HOST_FS_SELECTOR, fs_sel);
- vmcs_write16(HOST_GS_SELECTOR, gs_sel);
- } else {
- vmcs_write16(HOST_FS_SELECTOR, 0);
- vmcs_write16(HOST_GS_SELECTOR, 0);
- }
-
-#ifdef CONFIG_X86_64
- vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
- vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
-#else
- vmcs_writel(HOST_FS_BASE, segment_base(fs_sel));
- vmcs_writel(HOST_GS_BASE, segment_base(gs_sel));
-#endif
-
- if (!vcpu->mmio_read_completed)
- do_interrupt_requests(vcpu, kvm_run);
-
+preempted:
if (vcpu->guest_debug.enabled)
kvm_guest_debug_pre(vcpu);
+again:
+ r = kvm_mmu_reload(vcpu);
+ if (unlikely(r))
+ goto out;
+
+ preempt_disable();
+
+ vmx_save_host_state(vmx);
kvm_load_guest_fpu(vcpu);
/*
*/
vmcs_writel(HOST_CR0, read_cr0());
-#ifdef CONFIG_X86_64
- if (is_long_mode(vcpu)) {
- save_msrs(vcpu->host_msrs + msr_offset_kernel_gs_base, 1);
- load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
+ local_irq_disable();
+
+ if (signal_pending(current)) {
+ local_irq_enable();
+ preempt_enable();
+ r = -EINTR;
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ ++vcpu->stat.signal_exits;
+ goto out;
}
-#endif
+
+ if (!vcpu->mmio_read_completed)
+ do_interrupt_requests(vcpu, kvm_run);
+
+ vcpu->guest_mode = 1;
+ if (vcpu->requests)
+ if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
+ vmx_flush_tlb(vcpu);
asm (
/* Store host registers */
- "pushf \n\t"
#ifdef CONFIG_X86_64
"push %%rax; push %%rbx; push %%rdx;"
"push %%rsi; push %%rdi; push %%rbp;"
"mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
#endif
/* Enter guest mode */
- "jne launched \n\t"
+ "jne .Llaunched \n\t"
ASM_VMX_VMLAUNCH "\n\t"
- "jmp kvm_vmx_return \n\t"
- "launched: " ASM_VMX_VMRESUME "\n\t"
- ".globl kvm_vmx_return \n\t"
- "kvm_vmx_return: "
+ "jmp .Lkvm_vmx_return \n\t"
+ ".Llaunched: " ASM_VMX_VMRESUME "\n\t"
+ ".Lkvm_vmx_return: "
/* Save guest registers, load host registers, keep flags */
#ifdef CONFIG_X86_64
"xchg %3, (%%rsp) \n\t"
"pop %%ecx; popa \n\t"
#endif
"setbe %0 \n\t"
- "popf \n\t"
: "=q" (fail)
- : "r"(vcpu->launched), "d"((unsigned long)HOST_RSP),
+ : "r"(vmx->launched), "d"((unsigned long)HOST_RSP),
"c"(vcpu),
[rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
[rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
[cr2]"i"(offsetof(struct kvm_vcpu, cr2))
: "cc", "memory" );
- /*
- * Reload segment selectors ASAP. (it's needed for a functional
- * kernel: x86 relies on having __KERNEL_PDA in %fs and x86_64
- * relies on having 0 in %gs for the CPU PDA to work.)
- */
- if (fs_gs_ldt_reload_needed) {
- load_ldt(ldt_sel);
- load_fs(fs_sel);
- /*
- * If we have to reload gs, we must take care to
- * preserve our gs base.
- */
- local_irq_disable();
- load_gs(gs_sel);
-#ifdef CONFIG_X86_64
- wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
-#endif
- local_irq_enable();
+ vcpu->guest_mode = 0;
+ local_irq_enable();
- reload_tss();
- }
++vcpu->stat.exits;
-#ifdef CONFIG_X86_64
- if (is_long_mode(vcpu)) {
- save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
- load_msrs(vcpu->host_msrs, NR_BAD_MSRS);
- }
-#endif
-
vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
+ vmx->launched = 1;
- if (fail) {
+ preempt_enable();
+
+ if (unlikely(fail)) {
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry.hardware_entry_failure_reason
= vmcs_read32(VM_INSTRUCTION_ERROR);
r = 0;
- } else {
- /*
- * Profile KVM exit RIPs:
- */
- if (unlikely(prof_on == KVM_PROFILING))
- profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
-
- vcpu->launched = 1;
- r = kvm_handle_exit(kvm_run, vcpu);
- if (r > 0) {
- /* Give scheduler a change to reschedule. */
- if (signal_pending(current)) {
- ++vcpu->stat.signal_exits;
- post_kvm_run_save(vcpu, kvm_run);
- kvm_run->exit_reason = KVM_EXIT_INTR;
- return -EINTR;
- }
-
- if (dm_request_for_irq_injection(vcpu, kvm_run)) {
- ++vcpu->stat.request_irq_exits;
- post_kvm_run_save(vcpu, kvm_run);
- kvm_run->exit_reason = KVM_EXIT_INTR;
- return -EINTR;
- }
-
- kvm_resched(vcpu);
+ goto out;
+ }
+ /*
+ * Profile KVM exit RIPs:
+ */
+ if (unlikely(prof_on == KVM_PROFILING))
+ profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
+
+ r = kvm_handle_exit(kvm_run, vcpu);
+ if (r > 0) {
+ if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+ r = -EINTR;
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ ++vcpu->stat.request_irq_exits;
+ goto out;
+ }
+ if (!need_resched()) {
+ ++vcpu->stat.light_exits;
goto again;
}
}
+out:
+ if (r > 0) {
+ kvm_resched(vcpu);
+ goto preempted;
+ }
+
post_kvm_run_save(vcpu, kvm_run);
return r;
}
-static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
-{
- vmcs_writel(GUEST_CR3, vmcs_readl(GUEST_CR3));
-}
-
static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
unsigned long addr,
u32 err_code)
static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
{
- if (vcpu->vmcs) {
- on_each_cpu(__vcpu_clear, vcpu, 0, 1);
- free_vmcs(vcpu->vmcs);
- vcpu->vmcs = NULL;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (vmx->vmcs) {
+ on_each_cpu(__vcpu_clear, vmx, 0, 1);
+ free_vmcs(vmx->vmcs);
+ vmx->vmcs = NULL;
}
}
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
vmx_free_vmcs(vcpu);
+ kfree(vmx->host_msrs);
+ kfree(vmx->guest_msrs);
+ kvm_vcpu_uninit(vcpu);
+ kmem_cache_free(kvm_vcpu_cache, vmx);
}
-static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
+static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
{
- struct vmcs *vmcs;
-
- vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!vcpu->guest_msrs)
- return -ENOMEM;
+ int err;
+ struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ int cpu;
- vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!vcpu->host_msrs)
- goto out_free_guest_msrs;
+ if (!vmx)
+ return ERR_PTR(-ENOMEM);
- vmcs = alloc_vmcs();
- if (!vmcs)
- goto out_free_msrs;
+ err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
+ if (err)
+ goto free_vcpu;
- vmcs_clear(vmcs);
- vcpu->vmcs = vmcs;
- vcpu->launched = 0;
- vcpu->fpu_active = 1;
+ vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!vmx->guest_msrs) {
+ err = -ENOMEM;
+ goto uninit_vcpu;
+ }
- return 0;
+ vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!vmx->host_msrs)
+ goto free_guest_msrs;
-out_free_msrs:
- kfree(vcpu->host_msrs);
- vcpu->host_msrs = NULL;
+ vmx->vmcs = alloc_vmcs();
+ if (!vmx->vmcs)
+ goto free_msrs;
-out_free_guest_msrs:
- kfree(vcpu->guest_msrs);
- vcpu->guest_msrs = NULL;
+ vmcs_clear(vmx->vmcs);
- return -ENOMEM;
+ cpu = get_cpu();
+ vmx_vcpu_load(&vmx->vcpu, cpu);
+ err = vmx_vcpu_setup(vmx);
+ vmx_vcpu_put(&vmx->vcpu);
+ put_cpu();
+ if (err)
+ goto free_vmcs;
+
+ return &vmx->vcpu;
+
+free_vmcs:
+ free_vmcs(vmx->vmcs);
+free_msrs:
+ kfree(vmx->host_msrs);
+free_guest_msrs:
+ kfree(vmx->guest_msrs);
+uninit_vcpu:
+ kvm_vcpu_uninit(&vmx->vcpu);
+free_vcpu:
+ kmem_cache_free(kvm_vcpu_cache, vmx);
+ return ERR_PTR(err);
+}
+
+static void __init vmx_check_processor_compat(void *rtn)
+{
+ struct vmcs_config vmcs_conf;
+
+ *(int *)rtn = 0;
+ if (setup_vmcs_config(&vmcs_conf) < 0)
+ *(int *)rtn = -EIO;
+ if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
+ printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
+ smp_processor_id());
+ *(int *)rtn = -EIO;
+ }
}
static struct kvm_arch_ops vmx_arch_ops = {
.disabled_by_bios = vmx_disabled_by_bios,
.hardware_setup = hardware_setup,
.hardware_unsetup = hardware_unsetup,
+ .check_processor_compatibility = vmx_check_processor_compat,
.hardware_enable = hardware_enable,
.hardware_disable = hardware_disable,
.run = vmx_vcpu_run,
.skip_emulated_instruction = skip_emulated_instruction,
- .vcpu_setup = vmx_vcpu_setup,
.patch_hypercall = vmx_patch_hypercall,
};
static int __init vmx_init(void)
{
- return kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
+ void *iova;
+ int r;
+
+ vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+ if (!vmx_io_bitmap_a)
+ return -ENOMEM;
+
+ vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+ if (!vmx_io_bitmap_b) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Allow direct access to the PC debug port (it is often used for I/O
+ * delays, but the vmexits simply slow things down).
+ */
+ iova = kmap(vmx_io_bitmap_a);
+ memset(iova, 0xff, PAGE_SIZE);
+ clear_bit(0x80, iova);
+ kunmap(vmx_io_bitmap_a);
+
+ iova = kmap(vmx_io_bitmap_b);
+ memset(iova, 0xff, PAGE_SIZE);
+ kunmap(vmx_io_bitmap_b);
+
+ r = kvm_init_arch(&vmx_arch_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
+ if (r)
+ goto out1;
+
+ return 0;
+
+out1:
+ __free_page(vmx_io_bitmap_b);
+out:
+ __free_page(vmx_io_bitmap_a);
+ return r;
}
static void __exit vmx_exit(void)
{
+ __free_page(vmx_io_bitmap_b);
+ __free_page(vmx_io_bitmap_a);
+
kvm_exit_arch();
}