#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/highmem.h>
-#include <linux/profile.h>
#include <linux/sched.h>
+#include <linux/moduleparam.h>
#include <asm/io.h>
#include <asm/desc.h>
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+static int bypass_guest_pf = 1;
+module_param(bypass_guest_pf, bool, 0);
+
struct vmcs {
u32 revision_id;
u32 abort;
struct vcpu_vmx {
struct kvm_vcpu vcpu;
int launched;
+ u8 fail;
struct kvm_msr_entry *guest_msrs;
struct kvm_msr_entry *host_msrs;
int nmsrs;
u16 fs_sel, gs_sel, ldt_sel;
int gs_ldt_reload_needed;
int fs_reload_needed;
+ int guest_efer_loaded;
}host_state;
};
static struct page *vmx_io_bitmap_a;
static struct page *vmx_io_bitmap_b;
-#define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
-
static struct vmcs_config {
int size;
int order;
rdmsrl(e[i].index, e[i].data);
}
-static inline u64 msr_efer_save_restore_bits(struct kvm_msr_entry msr)
-{
- return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
-}
-
-static inline int msr_efer_need_save_restore(struct vcpu_vmx *vmx)
-{
- int efer_offset = vmx->msr_offset_efer;
- return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=
- msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
-}
-
static inline int is_page_fault(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
(INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
}
+static inline int is_invalid_opcode(u32 intr_info)
+{
+ return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
+ INTR_INFO_VALID_MASK)) ==
+ (INTR_TYPE_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
+}
+
static inline int is_external_interrupt(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
}
+static inline int cpu_has_vmx_tpr_shadow(void)
+{
+ return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
+}
+
+static inline int vm_need_tpr_shadow(struct kvm *kvm)
+{
+ return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)));
+}
+
static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
{
int i;
static void vcpu_clear(struct vcpu_vmx *vmx)
{
- if (vmx->vcpu.cpu != raw_smp_processor_id() && vmx->vcpu.cpu != -1)
- smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear,
- vmx, 0, 1);
- else
- __vcpu_clear(vmx);
+ if (vmx->vcpu.cpu == -1)
+ return;
+ smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1);
vmx->launched = 0;
}
{
u32 eb;
- eb = 1u << PF_VECTOR;
+ eb = (1u << PF_VECTOR) | (1u << UD_VECTOR);
if (!vcpu->fpu_active)
eb |= 1u << NM_VECTOR;
if (vcpu->guest_debug.enabled)
static void load_transition_efer(struct vcpu_vmx *vmx)
{
- u64 trans_efer;
int efer_offset = vmx->msr_offset_efer;
+ u64 host_efer = vmx->host_msrs[efer_offset].data;
+ u64 guest_efer = vmx->guest_msrs[efer_offset].data;
+ u64 ignore_bits;
+
+ if (efer_offset < 0)
+ return;
+ /*
+ * NX is emulated; LMA and LME handled by hardware; SCE meaninless
+ * outside long mode
+ */
+ ignore_bits = EFER_NX | EFER_SCE;
+#ifdef CONFIG_X86_64
+ ignore_bits |= EFER_LMA | EFER_LME;
+ /* SCE is meaningful only in long mode on Intel */
+ if (guest_efer & EFER_LMA)
+ ignore_bits &= ~(u64)EFER_SCE;
+#endif
+ if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits))
+ return;
- trans_efer = vmx->host_msrs[efer_offset].data;
- trans_efer &= ~EFER_SAVE_RESTORE_BITS;
- trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
- wrmsrl(MSR_EFER, trans_efer);
+ vmx->host_state.guest_efer_loaded = 1;
+ guest_efer &= ~ignore_bits;
+ guest_efer |= host_efer & ignore_bits;
+ wrmsrl(MSR_EFER, guest_efer);
vmx->vcpu.stat.efer_reload++;
}
-static void vmx_save_host_state(struct vcpu_vmx *vmx)
+static void reload_host_efer(struct vcpu_vmx *vmx)
{
+ if (vmx->host_state.guest_efer_loaded) {
+ vmx->host_state.guest_efer_loaded = 0;
+ load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
+ }
+}
+
+static void vmx_save_host_state(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
if (vmx->host_state.loaded)
return;
}
#endif
load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
- if (msr_efer_need_save_restore(vmx))
- load_transition_efer(vmx);
+ load_transition_efer(vmx);
}
static void vmx_load_host_state(struct vcpu_vmx *vmx)
reload_tss();
save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
load_msrs(vmx->host_msrs, vmx->save_nmsrs);
- if (msr_efer_need_save_restore(vmx))
- load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
+ reload_host_efer(vmx);
}
/*
u64 phys_addr = __pa(vmx->vmcs);
u64 tsc_this, delta;
- if (vcpu->cpu != cpu)
+ if (vcpu->cpu != cpu) {
vcpu_clear(vmx);
+ kvm_migrate_apic_timer(vcpu);
+ }
if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
u8 error;
static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
+ if (vcpu->rmode.active)
+ rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
vmcs_writel(GUEST_RFLAGS, rflags);
}
INTR_INFO_VALID_MASK);
}
+static void vmx_inject_ud(struct kvm_vcpu *vcpu)
+{
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+ UD_VECTOR |
+ INTR_TYPE_EXCEPTION |
+ INTR_INFO_VALID_MASK);
+}
+
/*
* Swap MSR entry in host/guest MSR entry array.
*/
#ifdef CONFIG_X86_64
case MSR_EFER:
ret = kvm_set_msr_common(vcpu, msr_index, data);
- if (vmx->host_state.loaded)
+ if (vmx->host_state.loaded) {
+ reload_host_efer(vmx);
load_transition_efer(vmx);
+ }
break;
case MSR_FS_BASE:
vmcs_writel(GUEST_FS_BASE, data);
CPU_BASED_USE_IO_BITMAPS |
CPU_BASED_MOV_DR_EXITING |
CPU_BASED_USE_TSC_OFFSETING;
+#ifdef CONFIG_X86_64
+ opt = CPU_BASED_TPR_SHADOW;
+#else
opt = 0;
+#endif
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
&_cpu_based_exec_control) < 0)
return -EIO;
+#ifdef CONFIG_X86_64
+ if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
+ _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
+ ~CPU_BASED_CR8_STORE_EXITING;
+#endif
min = 0;
#ifdef CONFIG_X86_64
vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
flags = vmcs_readl(GUEST_RFLAGS);
- flags &= ~(IOPL_MASK | X86_EFLAGS_VM);
+ flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
vmcs_writel(GUEST_RFLAGS, flags);
vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
flags = vmcs_readl(GUEST_RFLAGS);
- vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
+ vcpu->rmode.save_iopl = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
- flags |= IOPL_MASK | X86_EFLAGS_VM;
+ flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
vmcs_writel(GUEST_RFLAGS, flags);
vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
+ kvm_mmu_reset_context(vcpu);
init_rmode_tss(vcpu->kvm);
}
static int init_rmode_tss(struct kvm* kvm)
{
- struct page *p1, *p2, *p3;
gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
- char *page;
-
- p1 = gfn_to_page(kvm, fn++);
- p2 = gfn_to_page(kvm, fn++);
- p3 = gfn_to_page(kvm, fn);
+ u16 data = 0;
+ int r;
- if (!p1 || !p2 || !p3) {
- kvm_printf(kvm,"%s: gfn_to_page failed\n", __FUNCTION__);
+ r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
+ if (r < 0)
+ return 0;
+ data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
+ r = kvm_write_guest_page(kvm, fn++, &data, 0x66, sizeof(u16));
+ if (r < 0)
+ return 0;
+ r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
+ if (r < 0)
+ return 0;
+ r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
+ if (r < 0)
+ return 0;
+ data = ~0;
+ r = kvm_write_guest_page(kvm, fn, &data, RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
+ sizeof(u8));
+ if (r < 0)
return 0;
- }
-
- page = kmap_atomic(p1, KM_USER0);
- clear_page(page);
- *(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
- kunmap_atomic(page, KM_USER0);
-
- page = kmap_atomic(p2, KM_USER0);
- clear_page(page);
- kunmap_atomic(page, KM_USER0);
-
- page = kmap_atomic(p3, KM_USER0);
- clear_page(page);
- *(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
- kunmap_atomic(page, KM_USER0);
-
return 1;
}
int ret = 0;
unsigned long kvm_vmx_return;
u64 msr;
+ u32 exec_control;
if (!init_rmode_tss(vmx->vcpu.kvm)) {
ret = -ENOMEM;
goto out;
}
+ vmx->vcpu.rmode.active = 0;
+
vmx->vcpu.regs[VCPU_REGS_RDX] = get_rdx_init_val();
set_cr8(&vmx->vcpu, 0);
msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
* GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
* insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
*/
- vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
- vmcs_writel(GUEST_CS_BASE, 0x000f0000);
+ if (vmx->vcpu.vcpu_id == 0) {
+ vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
+ vmcs_writel(GUEST_CS_BASE, 0x000f0000);
+ } else {
+ vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.sipi_vector << 8);
+ vmcs_writel(GUEST_CS_BASE, vmx->vcpu.sipi_vector << 12);
+ }
vmcs_write32(GUEST_CS_LIMIT, 0xffff);
vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
vmcs_writel(GUEST_SYSENTER_EIP, 0);
vmcs_writel(GUEST_RFLAGS, 0x02);
- vmcs_writel(GUEST_RIP, 0xfff0);
+ if (vmx->vcpu.vcpu_id == 0)
+ vmcs_writel(GUEST_RIP, 0xfff0);
+ else
+ vmcs_writel(GUEST_RIP, 0);
vmcs_writel(GUEST_RSP, 0);
//todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0
/* Control */
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
vmcs_config.pin_based_exec_ctrl);
- vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
- vmcs_config.cpu_based_exec_ctrl);
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
+ exec_control = vmcs_config.cpu_based_exec_ctrl;
+ if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
+ exec_control &= ~CPU_BASED_TPR_SHADOW;
+#ifdef CONFIG_X86_64
+ exec_control |= CPU_BASED_CR8_STORE_EXITING |
+ CPU_BASED_CR8_LOAD_EXITING;
+#endif
+ }
+ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
+
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
#ifdef CONFIG_X86_64
- vmcs_writel(VIRTUAL_APIC_PAGE_ADDR, 0);
- vmcs_writel(TPR_THRESHOLD, 0);
+ vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
+ if (vm_need_tpr_shadow(vmx->vcpu.kvm))
+ vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
+ page_to_phys(vmx->vcpu.apic->regs_page));
+ vmcs_write32(TPR_THRESHOLD, 0);
#endif
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
return ret;
}
+static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ vmx_vcpu_setup(vmx);
+}
+
static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
{
u16 ent[2];
* Cause the #SS fault with 0 error code in VM86 mode.
*/
if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
- if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
+ if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE)
return 1;
return 0;
}
set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
}
- if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
- asm ("int $2");
- return 1;
- }
+ if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
+ return 1; /* already handled by vmx_vcpu_run() */
if (is_no_device(intr_info)) {
vmx_fpu_activate(vcpu);
return 1;
}
+ if (is_invalid_opcode(intr_info)) {
+ er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
+ if (er != EMULATE_DONE)
+ vmx_inject_ud(vcpu);
+
+ return 1;
+ }
+
error_code = 0;
rip = vmcs_readl(GUEST_RIP);
if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
return 1;
}
- er = emulate_instruction(vcpu, kvm_run, cr2, error_code);
+ er = emulate_instruction(vcpu, kvm_run, cr2, error_code, 0);
mutex_unlock(&vcpu->kvm->lock);
switch (er) {
++vcpu->stat.mmio_exits;
return 0;
case EMULATE_FAIL:
- vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
+ kvm_report_emulation_failure(vcpu, "pagetable");
break;
default:
BUG();
static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
- u64 exit_qualification;
+ unsigned long exit_qualification;
int size, down, in, string, rep;
unsigned port;
++vcpu->stat.io_exits;
- exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
string = (exit_qualification & 16) != 0;
if (string) {
- if (emulate_instruction(vcpu, kvm_run, 0, 0) == EMULATE_DO_MMIO)
+ if (emulate_instruction(vcpu,
+ kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
return 0;
return 1;
}
hypercall[0] = 0x0f;
hypercall[1] = 0x01;
hypercall[2] = 0xc1;
- hypercall[3] = 0xc3;
}
static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
- u64 exit_qualification;
+ unsigned long exit_qualification;
int cr;
int reg;
- exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
cr = exit_qualification & 15;
reg = (exit_qualification >> 8) & 15;
switch ((exit_qualification >> 4) & 3) {
static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
- u64 exit_qualification;
+ unsigned long exit_qualification;
unsigned long val;
int dr, reg;
* FIXME: this code assumes the host is debugging the guest.
* need to deal with guest debugging itself too.
*/
- exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
dr = exit_qualification & 7;
reg = (exit_qualification >> 8) & 15;
vcpu_load_rsp_rip(vcpu);
return 1;
}
-static void post_kvm_run_save(struct kvm_vcpu *vcpu,
- struct kvm_run *kvm_run)
+static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu,
+ struct kvm_run *kvm_run)
{
- kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0;
- kvm_run->cr8 = get_cr8(vcpu);
- kvm_run->apic_base = kvm_get_apic_base(vcpu);
- if (irqchip_in_kernel(vcpu->kvm))
- kvm_run->ready_for_interrupt_injection = 1;
- else
- kvm_run->ready_for_interrupt_injection =
- (vcpu->interrupt_window_open &&
- vcpu->irq_summary == 0);
+ return 1;
}
static int handle_interrupt_window(struct kvm_vcpu *vcpu,
static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
skip_emulated_instruction(vcpu);
- return kvm_hypercall(vcpu, kvm_run);
+ kvm_emulate_hypercall(vcpu);
+ return 1;
}
/*
[EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
[EXIT_REASON_HLT] = handle_halt,
[EXIT_REASON_VMCALL] = handle_vmcall,
+ [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold
};
static const int kvm_vmx_max_exit_handlers =
{
u32 vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (unlikely(vmx->fail)) {
+ kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ kvm_run->fail_entry.hardware_entry_failure_reason
+ = vmcs_read32(VM_INSTRUCTION_ERROR);
+ return 0;
+ }
if ( (vectoring_info & VECTORING_INFO_VALID_MASK) &&
exit_reason != EXIT_REASON_EXCEPTION_NMI )
return 0;
}
-/*
- * Check if userspace requested an interrupt window, and that the
- * interrupt window is open.
- *
- * No need to exit to userspace if we already have an interrupt queued.
- */
-static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
- struct kvm_run *kvm_run)
+static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
{
- return (!vcpu->irq_summary &&
- kvm_run->request_interrupt_window &&
- vcpu->interrupt_window_open &&
- (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
}
-static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
+static void update_tpr_threshold(struct kvm_vcpu *vcpu)
{
+ int max_irr, tpr;
+
+ if (!vm_need_tpr_shadow(vcpu->kvm))
+ return;
+
+ if (!kvm_lapic_enabled(vcpu) ||
+ ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) {
+ vmcs_write32(TPR_THRESHOLD, 0);
+ return;
+ }
+
+ tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4;
+ vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4);
}
static void enable_irq_window(struct kvm_vcpu *vcpu)
{
u32 idtv_info_field, intr_info_field;
int has_ext_irq, interrupt_window_open;
+ int vector;
+
+ kvm_inject_pending_timer_irqs(vcpu);
+ update_tpr_threshold(vcpu);
has_ext_irq = kvm_cpu_has_interrupt(vcpu);
intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
interrupt_window_open =
((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
- if (interrupt_window_open)
- vmx_inject_irq(vcpu, kvm_cpu_get_interrupt(vcpu));
- else
+ if (interrupt_window_open) {
+ vector = kvm_cpu_get_interrupt(vcpu);
+ vmx_inject_irq(vcpu, vector);
+ kvm_timer_intr_post(vcpu, vector);
+ } else
enable_irq_window(vcpu);
}
-static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- u8 fail;
- int r;
-
-preempted:
- if (vcpu->guest_debug.enabled)
- kvm_guest_debug_pre(vcpu);
-
-again:
- r = kvm_mmu_reload(vcpu);
- if (unlikely(r))
- goto out;
-
- preempt_disable();
-
- vmx_save_host_state(vmx);
- kvm_load_guest_fpu(vcpu);
+ u32 intr_info;
/*
* Loading guest fpu may have cleared host cr0.ts
*/
vmcs_writel(HOST_CR0, read_cr0());
- local_irq_disable();
-
- if (signal_pending(current)) {
- local_irq_enable();
- preempt_enable();
- r = -EINTR;
- kvm_run->exit_reason = KVM_EXIT_INTR;
- ++vcpu->stat.signal_exits;
- goto out;
- }
-
- if (irqchip_in_kernel(vcpu->kvm))
- vmx_intr_assist(vcpu);
- else if (!vcpu->mmio_read_completed)
- do_interrupt_requests(vcpu, kvm_run);
-
- vcpu->guest_mode = 1;
- if (vcpu->requests)
- if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
- vmx_flush_tlb(vcpu);
-
asm (
/* Store host registers */
#ifdef CONFIG_X86_64
"pop %%ecx; popa \n\t"
#endif
"setbe %0 \n\t"
- : "=q" (fail)
+ : "=q" (vmx->fail)
: "r"(vmx->launched), "d"((unsigned long)HOST_RSP),
"c"(vcpu),
[rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
[cr2]"i"(offsetof(struct kvm_vcpu, cr2))
: "cc", "memory" );
- vcpu->guest_mode = 0;
- local_irq_enable();
-
- ++vcpu->stat.exits;
-
vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
vmx->launched = 1;
- preempt_enable();
-
- if (unlikely(fail)) {
- kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
- kvm_run->fail_entry.hardware_entry_failure_reason
- = vmcs_read32(VM_INSTRUCTION_ERROR);
- r = 0;
- goto out;
- }
- /*
- * Profile KVM exit RIPs:
- */
- if (unlikely(prof_on == KVM_PROFILING))
- profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
-
- r = kvm_handle_exit(kvm_run, vcpu);
- if (r > 0) {
- if (dm_request_for_irq_injection(vcpu, kvm_run)) {
- r = -EINTR;
- kvm_run->exit_reason = KVM_EXIT_INTR;
- ++vcpu->stat.request_irq_exits;
- goto out;
- }
- if (!need_resched()) {
- ++vcpu->stat.light_exits;
- goto again;
- }
- }
-
-out:
- if (r > 0) {
- kvm_resched(vcpu);
- goto preempted;
- }
+ intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
- post_kvm_run_save(vcpu, kvm_run);
- return r;
+ /* We need to handle NMIs before interrupts are enabled */
+ if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
+ asm("int $2");
}
static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
if (err)
goto free_vcpu;
- if (irqchip_in_kernel(kvm)) {
- err = kvm_create_lapic(&vmx->vcpu);
- if (err < 0)
- goto free_vcpu;
- }
-
vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!vmx->guest_msrs) {
err = -ENOMEM;
}
}
-static struct kvm_arch_ops vmx_arch_ops = {
+static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
.hardware_setup = hardware_setup,
.vcpu_create = vmx_create_vcpu,
.vcpu_free = vmx_free_vcpu,
+ .vcpu_reset = vmx_vcpu_reset,
+ .prepare_guest_switch = vmx_save_host_state,
.vcpu_load = vmx_vcpu_load,
.vcpu_put = vmx_vcpu_put,
.vcpu_decache = vmx_vcpu_decache,
.set_guest_debug = set_guest_debug,
+ .guest_debug_pre = kvm_guest_debug_pre,
.get_msr = vmx_get_msr,
.set_msr = vmx_set_msr,
.get_segment_base = vmx_get_segment_base,
.inject_gp = vmx_inject_gp,
.run = vmx_vcpu_run,
+ .handle_exit = kvm_handle_exit,
.skip_emulated_instruction = skip_emulated_instruction,
.patch_hypercall = vmx_patch_hypercall,
.get_irq = vmx_get_irq,
.set_irq = vmx_inject_irq,
+ .inject_pending_irq = vmx_intr_assist,
+ .inject_pending_vectors = do_interrupt_requests,
};
static int __init vmx_init(void)
memset(iova, 0xff, PAGE_SIZE);
kunmap(vmx_io_bitmap_b);
- r = kvm_init_arch(&vmx_arch_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
+ r = kvm_init_x86(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
if (r)
goto out1;
+ if (bypass_guest_pf)
+ kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
+
return 0;
out1:
__free_page(vmx_io_bitmap_b);
__free_page(vmx_io_bitmap_a);
- kvm_exit_arch();
+ kvm_exit_x86();
}
module_init(vmx_init)