]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kvm/vmx.c
Merge branch 'linux-next' of git://git.infradead.org/~dedekind/ubi-2.6
[linux-2.6-omap-h63xx.git] / arch / x86 / kvm / vmx.c
index a4018b01e1f973bad1619d6ca16e0e67e29aceab..6259d7467648e1209456abff3666ad9a4e4a3cb4 100644 (file)
@@ -16,7 +16,6 @@
  */
 
 #include "irq.h"
-#include "vmx.h"
 #include "mmu.h"
 
 #include <linux/kvm_host.h>
@@ -31,6 +30,8 @@
 
 #include <asm/io.h>
 #include <asm/desc.h>
+#include <asm/vmx.h>
+#include <asm/virtext.h>
 
 #define __ex(x) __kvm_handle_fault_on_reboot(x)
 
@@ -90,6 +91,11 @@ struct vcpu_vmx {
        } rmode;
        int vpid;
        bool emulation_required;
+
+       /* Support for vnmi-less CPUs */
+       int soft_vnmi_blocked;
+       ktime_t entry_time;
+       s64 vnmi_blocked_time;
 };
 
 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -122,7 +128,7 @@ static struct vmcs_config {
        u32 vmentry_ctrl;
 } vmcs_config;
 
-struct vmx_capability {
+static struct vmx_capability {
        u32 ept;
        u32 vpid;
 } vmx_capability;
@@ -957,6 +963,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
                pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", msr_index, data);
 
                break;
+       case MSR_IA32_CR_PAT:
+               if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
+                       vmcs_write64(GUEST_IA32_PAT, data);
+                       vcpu->arch.pat = data;
+                       break;
+               }
+               /* Otherwise falls through to kvm_set_msr_common */
        default:
                vmx_load_host_state(vmx);
                msr = find_msr_entry(vmx, msr_index);
@@ -1032,8 +1045,7 @@ static int vmx_get_irq(struct kvm_vcpu *vcpu)
 
 static __init int cpu_has_kvm_support(void)
 {
-       unsigned long ecx = cpuid_ecx(1);
-       return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
+       return cpu_has_vmx();
 }
 
 static __init int vmx_disabled_by_bios(void)
@@ -1079,13 +1091,22 @@ static void vmclear_local_vcpus(void)
                __vcpu_clear(vmx);
 }
 
-static void hardware_disable(void *garbage)
+
+/* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
+ * tricks.
+ */
+static void kvm_cpu_vmxoff(void)
 {
-       vmclear_local_vcpus();
        asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
        write_cr4(read_cr4() & ~X86_CR4_VMXE);
 }
 
+static void hardware_disable(void *garbage)
+{
+       vmclear_local_vcpus();
+       kvm_cpu_vmxoff();
+}
+
 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
                                      u32 msr, u32 *result)
 {
@@ -1176,12 +1197,13 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
 #ifdef CONFIG_X86_64
        min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
 #endif
-       opt = 0;
+       opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
        if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
                                &_vmexit_control) < 0)
                return -EIO;
 
-       min = opt = 0;
+       min = 0;
+       opt = VM_ENTRY_LOAD_IA32_PAT;
        if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
                                &_vmentry_control) < 0)
                return -EIO;
@@ -2087,8 +2109,9 @@ static void vmx_disable_intercept_for_msr(struct page *msr_bitmap, u32 msr)
  */
 static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 {
-       u32 host_sysenter_cs;
+       u32 host_sysenter_cs, msr_low, msr_high;
        u32 junk;
+       u64 host_pat;
        unsigned long a;
        struct descriptor_table dt;
        int i;
@@ -2176,6 +2199,20 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        rdmsrl(MSR_IA32_SYSENTER_EIP, a);
        vmcs_writel(HOST_IA32_SYSENTER_EIP, a);   /* 22.2.3 */
 
+       if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
+               rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
+               host_pat = msr_low | ((u64) msr_high << 32);
+               vmcs_write64(HOST_IA32_PAT, host_pat);
+       }
+       if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
+               rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
+               host_pat = msr_low | ((u64) msr_high << 32);
+               /* Write the default value follow host pat */
+               vmcs_write64(GUEST_IA32_PAT, host_pat);
+               /* Keep arch.pat sync with GUEST_IA32_PAT */
+               vmx->vcpu.arch.pat = host_pat;
+       }
+
        for (i = 0; i < NR_VMX_MSR; ++i) {
                u32 index = vmx_msr_index[i];
                u32 data_low, data_high;
@@ -2230,6 +2267,8 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
 
        vmx->vcpu.arch.rmode.active = 0;
 
+       vmx->soft_vnmi_blocked = 0;
+
        vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
        kvm_set_cr8(&vmx->vcpu, 0);
        msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
@@ -2335,6 +2374,29 @@ out:
        return ret;
 }
 
+static void enable_irq_window(struct kvm_vcpu *vcpu)
+{
+       u32 cpu_based_vm_exec_control;
+
+       cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+       cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
+       vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
+}
+
+static void enable_nmi_window(struct kvm_vcpu *vcpu)
+{
+       u32 cpu_based_vm_exec_control;
+
+       if (!cpu_has_virtual_nmis()) {
+               enable_irq_window(vcpu);
+               return;
+       }
+
+       cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+       cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
+       vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
+}
+
 static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -2358,10 +2420,54 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
 
 static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
 {
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       if (!cpu_has_virtual_nmis()) {
+               /*
+                * Tracking the NMI-blocked state in software is built upon
+                * finding the next open IRQ window. This, in turn, depends on
+                * well-behaving guests: They have to keep IRQs disabled at
+                * least as long as the NMI handler runs. Otherwise we may
+                * cause NMI nesting, maybe breaking the guest. But as this is
+                * highly unlikely, we can live with the residual risk.
+                */
+               vmx->soft_vnmi_blocked = 1;
+               vmx->vnmi_blocked_time = 0;
+       }
+
+       ++vcpu->stat.nmi_injections;
+       if (vcpu->arch.rmode.active) {
+               vmx->rmode.irq.pending = true;
+               vmx->rmode.irq.vector = NMI_VECTOR;
+               vmx->rmode.irq.rip = kvm_rip_read(vcpu);
+               vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+                            NMI_VECTOR | INTR_TYPE_SOFT_INTR |
+                            INTR_INFO_VALID_MASK);
+               vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
+               kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
+               return;
+       }
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
                        INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
 }
 
+static void vmx_update_window_states(struct kvm_vcpu *vcpu)
+{
+       u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
+
+       vcpu->arch.nmi_window_open =
+               !(guest_intr & (GUEST_INTR_STATE_STI |
+                               GUEST_INTR_STATE_MOV_SS |
+                               GUEST_INTR_STATE_NMI));
+       if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
+               vcpu->arch.nmi_window_open = 0;
+
+       vcpu->arch.interrupt_window_open =
+               ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
+                !(guest_intr & (GUEST_INTR_STATE_STI |
+                                GUEST_INTR_STATE_MOV_SS)));
+}
+
 static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
 {
        int word_index = __ffs(vcpu->arch.irq_summary);
@@ -2374,40 +2480,49 @@ static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
        kvm_queue_interrupt(vcpu, irq);
 }
 
-
 static void do_interrupt_requests(struct kvm_vcpu *vcpu,
                                       struct kvm_run *kvm_run)
 {
-       u32 cpu_based_vm_exec_control;
-
-       vcpu->arch.interrupt_window_open =
-               ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
-                (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
+       vmx_update_window_states(vcpu);
 
-       if (vcpu->arch.interrupt_window_open &&
-           vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending)
-               kvm_do_inject_irq(vcpu);
+       if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
+               if (vcpu->arch.interrupt.pending) {
+                       enable_nmi_window(vcpu);
+               } else if (vcpu->arch.nmi_window_open) {
+                       vcpu->arch.nmi_pending = false;
+                       vcpu->arch.nmi_injected = true;
+               } else {
+                       enable_nmi_window(vcpu);
+                       return;
+               }
+       }
+       if (vcpu->arch.nmi_injected) {
+               vmx_inject_nmi(vcpu);
+               if (vcpu->arch.nmi_pending)
+                       enable_nmi_window(vcpu);
+               else if (vcpu->arch.irq_summary
+                        || kvm_run->request_interrupt_window)
+                       enable_irq_window(vcpu);
+               return;
+       }
 
-       if (vcpu->arch.interrupt_window_open && vcpu->arch.interrupt.pending)
-               vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
+       if (vcpu->arch.interrupt_window_open) {
+               if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending)
+                       kvm_do_inject_irq(vcpu);
 
-       cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+               if (vcpu->arch.interrupt.pending)
+                       vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
+       }
        if (!vcpu->arch.interrupt_window_open &&
            (vcpu->arch.irq_summary || kvm_run->request_interrupt_window))
-               /*
-                * Interrupts blocked.  Wait for unblock.
-                */
-               cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
-       else
-               cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
-       vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
+               enable_irq_window(vcpu);
 }
 
 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
 {
        int ret;
        struct kvm_userspace_memory_region tss_mem = {
-               .slot = 8,
+               .slot = TSS_PRIVATE_MEMSLOT,
                .guest_phys_addr = addr,
                .memory_size = PAGE_SIZE * 3,
                .flags = 0,
@@ -2492,7 +2607,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
        }
 
-       if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
+       if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
                return 1;  /* already handled by vmx_vcpu_run() */
 
        if (is_no_device(intr_info)) {
@@ -2581,6 +2696,7 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        rep = (exit_qualification & 32) != 0;
        port = exit_qualification >> 16;
 
+       skip_emulated_instruction(vcpu);
        return kvm_emulate_pio(vcpu, kvm_run, in, size, port);
 }
 
@@ -2767,6 +2883,7 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu,
        vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
 
        KVMTRACE_0D(PEND_INTR, vcpu, handler);
+       ++vcpu->stat.irq_window_exits;
 
        /*
         * If the user space waits to inject interrupts, exit as soon as
@@ -2775,7 +2892,6 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu,
        if (kvm_run->request_interrupt_window &&
            !vcpu->arch.irq_summary) {
                kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
-               ++vcpu->stat.irq_window_exits;
                return 0;
        }
        return 1;
@@ -2832,6 +2948,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
 static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long exit_qualification;
        u16 tss_selector;
        int reason;
@@ -2839,6 +2956,15 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
 
        reason = (u32)exit_qualification >> 30;
+       if (reason == TASK_SWITCH_GATE && vmx->vcpu.arch.nmi_injected &&
+           (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
+           (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK)
+           == INTR_TYPE_NMI_INTR) {
+               vcpu->arch.nmi_injected = false;
+               if (cpu_has_virtual_nmis())
+                       vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+                                     GUEST_INTR_STATE_NMI);
+       }
        tss_selector = exit_qualification;
 
        return kvm_task_switch(vcpu, tss_selector, reason);
@@ -2927,16 +3053,12 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu,
        while (!guest_state_valid(vcpu)) {
                err = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
 
-               switch (err) {
-                       case EMULATE_DONE:
-                               break;
-                       case EMULATE_DO_MMIO:
-                               kvm_report_emulation_failure(vcpu, "mmio");
-                               /* TODO: Handle MMIO */
-                               return;
-                       default:
-                               kvm_report_emulation_failure(vcpu, "emulation failure");
-                               return;
+               if (err == EMULATE_DO_MMIO)
+                       break;
+
+               if (err != EMULATE_DONE) {
+                       kvm_report_emulation_failure(vcpu, "emulation failure");
+                       return;
                }
 
                if (signal_pending(current))
@@ -2948,8 +3070,10 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu,
        local_irq_disable();
        preempt_disable();
 
-       /* Guest state should be valid now, no more emulation should be needed */
-       vmx->emulation_required = 0;
+       /* Guest state should be valid now except if we need to
+        * emulate an MMIO */
+       if (guest_state_valid(vcpu))
+               vmx->emulation_required = 0;
 }
 
 /*
@@ -2996,6 +3120,11 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu),
                    (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit);
 
+       /* If we need to emulate an MMIO from handle_invalid_guest_state
+        * we just return 0 */
+       if (vmx->emulation_required && emulate_invalid_guest_state)
+               return 0;
+
        /* Access CR3 don't cause VMExit in paging mode, so we need
         * to sync with guest real CR3. */
        if (vm_need_ept() && is_paging(vcpu)) {
@@ -3012,9 +3141,32 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 
        if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
                        (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
-                       exit_reason != EXIT_REASON_EPT_VIOLATION))
-               printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
-                      "exit reason is 0x%x\n", __func__, exit_reason);
+                       exit_reason != EXIT_REASON_EPT_VIOLATION &&
+                       exit_reason != EXIT_REASON_TASK_SWITCH))
+               printk(KERN_WARNING "%s: unexpected, valid vectoring info "
+                      "(0x%x) and exit reason is 0x%x\n",
+                      __func__, vectoring_info, exit_reason);
+
+       if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
+               if (vcpu->arch.interrupt_window_open) {
+                       vmx->soft_vnmi_blocked = 0;
+                       vcpu->arch.nmi_window_open = 1;
+               } else if (vmx->vnmi_blocked_time > 1000000000LL &&
+                          vcpu->arch.nmi_pending) {
+                       /*
+                        * This CPU don't support us in finding the end of an
+                        * NMI-blocked window if the guest runs with IRQs
+                        * disabled. So we pull the trigger after 1 s of
+                        * futile waiting, but inform the user about this.
+                        */
+                       printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
+                              "state on VCPU %d after 1 s timeout\n",
+                              __func__, vcpu->vcpu_id);
+                       vmx->soft_vnmi_blocked = 0;
+                       vmx->vcpu.arch.nmi_window_open = 1;
+               }
+       }
+
        if (exit_reason < kvm_vmx_max_exit_handlers
            && kvm_vmx_exit_handlers[exit_reason])
                return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
@@ -3042,51 +3194,6 @@ static void update_tpr_threshold(struct kvm_vcpu *vcpu)
        vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4);
 }
 
-static void enable_irq_window(struct kvm_vcpu *vcpu)
-{
-       u32 cpu_based_vm_exec_control;
-
-       cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
-       cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
-       vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
-}
-
-static void enable_nmi_window(struct kvm_vcpu *vcpu)
-{
-       u32 cpu_based_vm_exec_control;
-
-       if (!cpu_has_virtual_nmis())
-               return;
-
-       cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
-       cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
-       vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
-}
-
-static int vmx_nmi_enabled(struct kvm_vcpu *vcpu)
-{
-       u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
-       return !(guest_intr & (GUEST_INTR_STATE_NMI |
-                              GUEST_INTR_STATE_MOV_SS |
-                              GUEST_INTR_STATE_STI));
-}
-
-static int vmx_irq_enabled(struct kvm_vcpu *vcpu)
-{
-       u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
-       return (!(guest_intr & (GUEST_INTR_STATE_MOV_SS |
-                              GUEST_INTR_STATE_STI)) &&
-               (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
-}
-
-static void enable_intr_window(struct kvm_vcpu *vcpu)
-{
-       if (vcpu->arch.nmi_pending)
-               enable_nmi_window(vcpu);
-       else if (kvm_cpu_has_interrupt(vcpu))
-               enable_irq_window(vcpu);
-}
-
 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
 {
        u32 exit_intr_info;
@@ -3109,7 +3216,9 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
                if (unblock_nmi && vector != DF_VECTOR)
                        vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
                                      GUEST_INTR_STATE_NMI);
-       }
+       } else if (unlikely(vmx->soft_vnmi_blocked))
+               vmx->vnmi_blocked_time +=
+                       ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
 
        idt_vectoring_info = vmx->idt_vectoring_info;
        idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
@@ -3147,26 +3256,29 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
 {
        update_tpr_threshold(vcpu);
 
-       if (cpu_has_virtual_nmis()) {
-               if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
-                       if (vcpu->arch.interrupt.pending) {
-                               enable_nmi_window(vcpu);
-                       } else if (vmx_nmi_enabled(vcpu)) {
-                               vcpu->arch.nmi_pending = false;
-                               vcpu->arch.nmi_injected = true;
-                       } else {
-                               enable_intr_window(vcpu);
-                               return;
-                       }
-               }
-               if (vcpu->arch.nmi_injected) {
-                       vmx_inject_nmi(vcpu);
-                       enable_intr_window(vcpu);
+       vmx_update_window_states(vcpu);
+
+       if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
+               if (vcpu->arch.interrupt.pending) {
+                       enable_nmi_window(vcpu);
+               } else if (vcpu->arch.nmi_window_open) {
+                       vcpu->arch.nmi_pending = false;
+                       vcpu->arch.nmi_injected = true;
+               } else {
+                       enable_nmi_window(vcpu);
                        return;
                }
        }
+       if (vcpu->arch.nmi_injected) {
+               vmx_inject_nmi(vcpu);
+               if (vcpu->arch.nmi_pending)
+                       enable_nmi_window(vcpu);
+               else if (kvm_cpu_has_interrupt(vcpu))
+                       enable_irq_window(vcpu);
+               return;
+       }
        if (!vcpu->arch.interrupt.pending && kvm_cpu_has_interrupt(vcpu)) {
-               if (vmx_irq_enabled(vcpu))
+               if (vcpu->arch.interrupt_window_open)
                        kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
                else
                        enable_irq_window(vcpu);
@@ -3174,6 +3286,8 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
        if (vcpu->arch.interrupt.pending) {
                vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
                kvm_timer_intr_post(vcpu, vcpu->arch.interrupt.nr);
+               if (kvm_cpu_has_interrupt(vcpu))
+                       enable_irq_window(vcpu);
        }
 }
 
@@ -3213,6 +3327,10 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u32 intr_info;
 
+       /* Record the guest's net vcpu time for enforced NMI injections. */
+       if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
+               vmx->entry_time = ktime_get();
+
        /* Handle invalid guest state instead of entering VMX */
        if (vmx->emulation_required && emulate_invalid_guest_state) {
                handle_invalid_guest_state(vcpu, kvm_run);
@@ -3327,9 +3445,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        if (vmx->rmode.irq.pending)
                fixup_rmode_irq(vmx);
 
-       vcpu->arch.interrupt_window_open =
-               (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
-                (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)) == 0;
+       vmx_update_window_states(vcpu);
 
        asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
        vmx->launched = 1;
@@ -3337,7 +3453,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
 
        /* We need to handle NMIs before interrupts are enabled */
-       if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200 &&
+       if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
            (intr_info & INTR_INFO_VALID_MASK)) {
                KVMTRACE_0D(NMI, vcpu, handler);
                asm("int $2");
@@ -3455,6 +3571,11 @@ static int get_ept_level(void)
        return VMX_EPT_DEFAULT_GAW + 1;
 }
 
+static int vmx_get_mt_mask_shift(void)
+{
+       return VMX_EPT_MT_EPTE_SHIFT;
+}
+
 static struct kvm_x86_ops vmx_x86_ops = {
        .cpu_has_kvm_support = cpu_has_kvm_support,
        .disabled_by_bios = vmx_disabled_by_bios,
@@ -3510,6 +3631,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
 
        .set_tss_addr = vmx_set_tss_addr,
        .get_tdp_level = get_ept_level,
+       .get_mt_mask_shift = vmx_get_mt_mask_shift,
 };
 
 static int __init vmx_init(void)
@@ -3566,10 +3688,10 @@ static int __init vmx_init(void)
                bypass_guest_pf = 0;
                kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
                        VMX_EPT_WRITABLE_MASK |
-                       VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT |
                        VMX_EPT_IGMT_BIT);
                kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
-                               VMX_EPT_EXECUTABLE_MASK);
+                               VMX_EPT_EXECUTABLE_MASK,
+                               VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
                kvm_enable_tdp();
        } else
                kvm_disable_tdp();