]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/kvm/kvm_main.c
KVM: Hoist SVM's get_cs_db_l_bits into core code.
[linux-2.6-omap-h63xx.git] / drivers / kvm / kvm_main.c
index 8f8bfc9160e32d42907f35812bd5fcee2aad4b87..9dffbbea46a77ae720127ecbe8ee8e4b90b9f4da 100644 (file)
@@ -249,6 +249,10 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
        vcpu->mmu.root_hpa = INVALID_PAGE;
        vcpu->kvm = kvm;
        vcpu->vcpu_id = id;
+       if (!irqchip_in_kernel(kvm) || id == 0)
+               vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
+       else
+               vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
        init_waitqueue_head(&vcpu->wq);
 
        page = alloc_page(GFP_KERNEL | __GFP_ZERO);
@@ -539,6 +543,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                return;
        }
        kvm_arch_ops->set_cr4(vcpu, cr4);
+       vcpu->cr4 = cr4;
        mutex_lock(&vcpu->kvm->lock);
        kvm_mmu_reset_context(vcpu);
        mutex_unlock(&vcpu->kvm->lock);
@@ -651,6 +656,7 @@ void fx_init(struct kvm_vcpu *vcpu)
        fx_restore(&vcpu->host_fx_image);
        preempt_enable();
 
+       vcpu->cr0 |= X86_CR0_ET;
        after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
        vcpu->guest_fx_image.mxcsr = 0x1f80;
        memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
@@ -1233,10 +1239,8 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
 
 int emulate_clts(struct kvm_vcpu *vcpu)
 {
-       unsigned long cr0;
-
-       cr0 = vcpu->cr0 & ~X86_CR0_TS;
-       kvm_arch_ops->set_cr0(vcpu, cr0);
+       vcpu->cr0 &= ~X86_CR0_TS;
+       kvm_arch_ops->set_cr0(vcpu, vcpu->cr0);
        return X86EMUL_CONTINUE;
 }
 
@@ -1371,7 +1375,7 @@ EXPORT_SYMBOL_GPL(emulate_instruction);
 /*
  * The vCPU has executed a HLT instruction with in-kernel mode enabled.
  */
-static void kvm_vcpu_kernel_halt(struct kvm_vcpu *vcpu)
+static void kvm_vcpu_block(struct kvm_vcpu *vcpu)
 {
        DECLARE_WAITQUEUE(wait, current);
 
@@ -1380,24 +1384,28 @@ static void kvm_vcpu_kernel_halt(struct kvm_vcpu *vcpu)
        /*
         * We will block until either an interrupt or a signal wakes us up
         */
-       while(!(irqchip_in_kernel(vcpu->kvm) && kvm_cpu_has_interrupt(vcpu))
-             && !vcpu->irq_summary
-             && !signal_pending(current)) {
+       while (!kvm_cpu_has_interrupt(vcpu)
+              && !signal_pending(current)
+              && vcpu->mp_state != VCPU_MP_STATE_RUNNABLE
+              && vcpu->mp_state != VCPU_MP_STATE_SIPI_RECEIVED) {
                set_current_state(TASK_INTERRUPTIBLE);
                vcpu_put(vcpu);
                schedule();
                vcpu_load(vcpu);
        }
 
+       __set_current_state(TASK_RUNNING);
        remove_wait_queue(&vcpu->wq, &wait);
-       set_current_state(TASK_RUNNING);
 }
 
 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 {
        ++vcpu->stat.halt_exits;
        if (irqchip_in_kernel(vcpu->kvm)) {
-               kvm_vcpu_kernel_halt(vcpu);
+               vcpu->mp_state = VCPU_MP_STATE_HALTED;
+               kvm_vcpu_block(vcpu);
+               if (vcpu->mp_state != VCPU_MP_STATE_RUNNABLE)
+                       return -EINTR;
                return 1;
        } else {
                vcpu->run->exit_reason = KVM_EXIT_HLT;
@@ -2001,11 +2009,18 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
        vcpu_load(vcpu);
 
+       if (unlikely(vcpu->mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
+               kvm_vcpu_block(vcpu);
+               vcpu_put(vcpu);
+               return -EAGAIN;
+       }
+
        if (vcpu->sigset_active)
                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 
        /* re-sync apic's tpr */
-       set_cr8(vcpu, kvm_run->cr8);
+       if (!irqchip_in_kernel(vcpu->kvm))
+               set_cr8(vcpu, kvm_run->cr8);
 
        if (vcpu->pio.cur_count) {
                r = complete_pio(vcpu);
@@ -2210,6 +2225,7 @@ static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        kvm_arch_ops->decache_cr4_guest_bits(vcpu);
 
        mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
+       vcpu->cr0 = sregs->cr0;
        kvm_arch_ops->set_cr0(vcpu, sregs->cr0);
 
        mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
@@ -2254,6 +2270,16 @@ static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        return 0;
 }
 
+void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
+{
+       struct kvm_segment cs;
+
+       get_segment(vcpu, &cs, VCPU_SREG_CS);
+       *db = cs.db;
+       *l = cs.l;
+}
+EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
+
 /*
  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.