]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/kvm/vmx.c
KVM: Handle cpuid in the kernel instead of punting to userspace
[linux-2.6-omap-h63xx.git] / drivers / kvm / vmx.c
index bfa0ce42ea92b35518345244b26604cc3855bd6d..71410a65bb90bd6974ea05bca3a3c868989a8a85 100644 (file)
@@ -618,7 +618,7 @@ static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
 {
        struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
 
-       if (vmcs_readl(sf->base) == save->base) {
+       if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
                vmcs_write16(sf->selector, save->selector);
                vmcs_writel(sf->base, save->base);
                vmcs_write32(sf->limit, save->limit);
@@ -1459,12 +1459,14 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
        kvm_run->io.rep = (exit_qualification & 32) != 0;
        kvm_run->io.port = exit_qualification >> 16;
+       kvm_run->io.count = 1;
        if (kvm_run->io.string) {
                if (!get_io_count(vcpu, &kvm_run->io.count))
                        return 1;
                kvm_run->io.address = vmcs_readl(GUEST_LINEAR_ADDRESS);
        } else
                kvm_run->io.value = vcpu->regs[VCPU_REGS_RAX]; /* rax */
+       vcpu->pio_pending = 1;
        return 0;
 }
 
@@ -1583,8 +1585,8 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
 static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
-       kvm_run->exit_reason = KVM_EXIT_CPUID;
-       return 0;
+       kvm_emulate_cpuid(vcpu);
+       return 1;
 }
 
 static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -1658,7 +1660,7 @@ static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
 static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
-       vmcs_writel(GUEST_RIP, vmcs_readl(GUEST_RIP)+3);
+       skip_emulated_instruction(vcpu);
        return kvm_hypercall(vcpu, kvm_run);
 }
 
@@ -1888,6 +1890,27 @@ again:
                [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
              : "cc", "memory" );
 
+       /*
+        * Reload segment selectors ASAP. (it's needed for a functional
+        * kernel: x86 relies on having __KERNEL_PDA in %fs and x86_64
+        * relies on having 0 in %gs for the CPU PDA to work.)
+        */
+       if (fs_gs_ldt_reload_needed) {
+               load_ldt(ldt_sel);
+               load_fs(fs_sel);
+               /*
+                * If we have to reload gs, we must take care to
+                * preserve our gs base.
+                */
+               local_irq_disable();
+               load_gs(gs_sel);
+#ifdef CONFIG_X86_64
+               wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
+#endif
+               local_irq_enable();
+
+               reload_tss();
+       }
        ++kvm_stat.exits;
 
        save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
@@ -1905,22 +1928,6 @@ again:
                kvm_run->exit_reason = vmcs_read32(VM_INSTRUCTION_ERROR);
                r = 0;
        } else {
-               if (fs_gs_ldt_reload_needed) {
-                       load_ldt(ldt_sel);
-                       load_fs(fs_sel);
-                       /*
-                        * If we have to reload gs, we must take care to
-                        * preserve our gs base.
-                        */
-                       local_irq_disable();
-                       load_gs(gs_sel);
-#ifdef CONFIG_X86_64
-                       wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
-#endif
-                       local_irq_enable();
-
-                       reload_tss();
-               }
                /*
                 * Profile KVM exit RIPs:
                 */