#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/sched.h>
+#include <linux/moduleparam.h>
#include <asm/io.h>
#include <asm/desc.h>
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+static int bypass_guest_pf = 1;
+module_param(bypass_guest_pf, bool, 0);
+
struct vmcs {
u32 revision_id;
u32 abort;
u16 fs_sel, gs_sel, ldt_sel;
int gs_ldt_reload_needed;
int fs_reload_needed;
+ int guest_efer_loaded;
}host_state;
};
static struct page *vmx_io_bitmap_a;
static struct page *vmx_io_bitmap_b;
-#define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
-
static struct vmcs_config {
int size;
int order;
rdmsrl(e[i].index, e[i].data);
}
-static inline u64 msr_efer_save_restore_bits(struct kvm_msr_entry msr)
-{
- return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
-}
-
-static inline int msr_efer_need_save_restore(struct vcpu_vmx *vmx)
-{
- int efer_offset = vmx->msr_offset_efer;
- return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=
- msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
-}
-
static inline int is_page_fault(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
(INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
}
+static inline int is_invalid_opcode(u32 intr_info)
+{
+ return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
+ INTR_INFO_VALID_MASK)) ==
+ (INTR_TYPE_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
+}
+
static inline int is_external_interrupt(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
static void vcpu_clear(struct vcpu_vmx *vmx)
{
- if (vmx->vcpu.cpu != raw_smp_processor_id() && vmx->vcpu.cpu != -1)
- smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear,
- vmx, 0, 1);
- else
- __vcpu_clear(vmx);
+ if (vmx->vcpu.cpu == -1)
+ return;
+ smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1);
vmx->launched = 0;
}
{
u32 eb;
- eb = 1u << PF_VECTOR;
+ eb = (1u << PF_VECTOR) | (1u << UD_VECTOR);
if (!vcpu->fpu_active)
eb |= 1u << NM_VECTOR;
if (vcpu->guest_debug.enabled)
static void load_transition_efer(struct vcpu_vmx *vmx)
{
- u64 trans_efer;
int efer_offset = vmx->msr_offset_efer;
+ u64 host_efer = vmx->host_msrs[efer_offset].data;
+ u64 guest_efer = vmx->guest_msrs[efer_offset].data;
+ u64 ignore_bits;
- trans_efer = vmx->host_msrs[efer_offset].data;
- trans_efer &= ~EFER_SAVE_RESTORE_BITS;
- trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
- wrmsrl(MSR_EFER, trans_efer);
+ if (efer_offset < 0)
+ return;
+ /*
+ * NX is emulated; LMA and LME handled by hardware; SCE meaninless
+ * outside long mode
+ */
+ ignore_bits = EFER_NX | EFER_SCE;
+#ifdef CONFIG_X86_64
+ ignore_bits |= EFER_LMA | EFER_LME;
+ /* SCE is meaningful only in long mode on Intel */
+ if (guest_efer & EFER_LMA)
+ ignore_bits &= ~(u64)EFER_SCE;
+#endif
+ if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits))
+ return;
+
+ vmx->host_state.guest_efer_loaded = 1;
+ guest_efer &= ~ignore_bits;
+ guest_efer |= host_efer & ignore_bits;
+ wrmsrl(MSR_EFER, guest_efer);
vmx->vcpu.stat.efer_reload++;
}
+static void reload_host_efer(struct vcpu_vmx *vmx)
+{
+ if (vmx->host_state.guest_efer_loaded) {
+ vmx->host_state.guest_efer_loaded = 0;
+ load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
+ }
+}
+
static void vmx_save_host_state(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
}
#endif
load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
- if (msr_efer_need_save_restore(vmx))
- load_transition_efer(vmx);
+ load_transition_efer(vmx);
}
static void vmx_load_host_state(struct vcpu_vmx *vmx)
reload_tss();
save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
load_msrs(vmx->host_msrs, vmx->save_nmsrs);
- if (msr_efer_need_save_restore(vmx))
- load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
+ reload_host_efer(vmx);
}
/*
static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
+ if (vcpu->rmode.active)
+ rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
vmcs_writel(GUEST_RFLAGS, rflags);
}
INTR_INFO_VALID_MASK);
}
+static void vmx_inject_ud(struct kvm_vcpu *vcpu)
+{
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+ UD_VECTOR |
+ INTR_TYPE_EXCEPTION |
+ INTR_INFO_VALID_MASK);
+}
+
/*
* Swap MSR entry in host/guest MSR entry array.
*/
#ifdef CONFIG_X86_64
case MSR_EFER:
ret = kvm_set_msr_common(vcpu, msr_index, data);
- if (vmx->host_state.loaded)
+ if (vmx->host_state.loaded) {
+ reload_host_efer(vmx);
load_transition_efer(vmx);
+ }
break;
case MSR_FS_BASE:
vmcs_writel(GUEST_FS_BASE, data);
vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
flags = vmcs_readl(GUEST_RFLAGS);
- flags &= ~(IOPL_MASK | X86_EFLAGS_VM);
+ flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
vmcs_writel(GUEST_RFLAGS, flags);
vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
flags = vmcs_readl(GUEST_RFLAGS);
- vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
+ vcpu->rmode.save_iopl = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
- flags |= IOPL_MASK | X86_EFLAGS_VM;
+ flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
vmcs_writel(GUEST_RFLAGS, flags);
vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
+ kvm_mmu_reset_context(vcpu);
init_rmode_tss(vcpu->kvm);
}
static int init_rmode_tss(struct kvm* kvm)
{
- struct page *p1, *p2, *p3;
gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
- char *page;
-
- p1 = gfn_to_page(kvm, fn++);
- p2 = gfn_to_page(kvm, fn++);
- p3 = gfn_to_page(kvm, fn);
+ u16 data = 0;
+ int r;
- if (!p1 || !p2 || !p3) {
- kvm_printf(kvm,"%s: gfn_to_page failed\n", __FUNCTION__);
+ r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
+ if (r < 0)
+ return 0;
+ data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
+ r = kvm_write_guest_page(kvm, fn++, &data, 0x66, sizeof(u16));
+ if (r < 0)
+ return 0;
+ r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
+ if (r < 0)
+ return 0;
+ r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
+ if (r < 0)
+ return 0;
+ data = ~0;
+ r = kvm_write_guest_page(kvm, fn, &data, RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
+ sizeof(u8));
+ if (r < 0)
return 0;
- }
-
- page = kmap_atomic(p1, KM_USER0);
- clear_page(page);
- *(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
- kunmap_atomic(page, KM_USER0);
-
- page = kmap_atomic(p2, KM_USER0);
- clear_page(page);
- kunmap_atomic(page, KM_USER0);
-
- page = kmap_atomic(p3, KM_USER0);
- clear_page(page);
- *(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
- kunmap_atomic(page, KM_USER0);
-
return 1;
}
}
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */
* Cause the #SS fault with 0 error code in VM86 mode.
*/
if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
- if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
+ if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE)
return 1;
return 0;
}
set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
}
- if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
- asm ("int $2");
- return 1;
- }
+ if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
+ return 1; /* already handled by vmx_vcpu_run() */
if (is_no_device(intr_info)) {
vmx_fpu_activate(vcpu);
return 1;
}
+ if (is_invalid_opcode(intr_info)) {
+ er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
+ if (er != EMULATE_DONE)
+ vmx_inject_ud(vcpu);
+
+ return 1;
+ }
+
error_code = 0;
rip = vmcs_readl(GUEST_RIP);
if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
return 1;
}
- er = emulate_instruction(vcpu, kvm_run, cr2, error_code);
+ er = emulate_instruction(vcpu, kvm_run, cr2, error_code, 0);
mutex_unlock(&vcpu->kvm->lock);
switch (er) {
string = (exit_qualification & 16) != 0;
if (string) {
- if (emulate_instruction(vcpu, kvm_run, 0, 0) == EMULATE_DO_MMIO)
+ if (emulate_instruction(vcpu,
+ kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
return 0;
return 1;
}
hypercall[0] = 0x0f;
hypercall[1] = 0x01;
hypercall[2] = 0xc1;
- hypercall[3] = 0xc3;
}
static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
skip_emulated_instruction(vcpu);
- return kvm_hypercall(vcpu, kvm_run);
+ kvm_emulate_hypercall(vcpu);
+ return 1;
}
/*
static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 intr_info;
/*
* Loading guest fpu may have cleared host cr0.ts
asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
vmx->launched = 1;
+
+ intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+
+ /* We need to handle NMIs before interrupts are enabled */
+ if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
+ asm("int $2");
}
static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
if (err)
goto free_vcpu;
- if (irqchip_in_kernel(kvm)) {
- err = kvm_create_lapic(&vmx->vcpu);
- if (err < 0)
- goto free_vcpu;
- }
-
vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!vmx->guest_msrs) {
err = -ENOMEM;
if (r)
goto out1;
+ if (bypass_guest_pf)
+ kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
+
return 0;
out1: