]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kvm/x86.c
KVM: MMU: sync roots on mmu reload
[linux-2.6-omap-h63xx.git] / arch / x86 / kvm / x86.c
index 3f3cb7107c03179f8b46fc45488b72ce21075d66..88e6d9abbd2ba0f43525762a42925d60c1241dc6 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/module.h>
 #include <linux/mman.h>
 #include <linux/highmem.h>
+#include <linux/intel-iommu.h>
 
 #include <asm/uaccess.h>
 #include <asm/msr.h>
@@ -165,6 +166,43 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
        enable_irq(dev->host_irq);
 }
 
+static void kvm_free_assigned_device(struct kvm *kvm,
+                                    struct kvm_assigned_dev_kernel
+                                    *assigned_dev)
+{
+       if (irqchip_in_kernel(kvm) && assigned_dev->irq_requested)
+               free_irq(assigned_dev->host_irq, (void *)assigned_dev);
+
+       kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
+
+       if (cancel_work_sync(&assigned_dev->interrupt_work))
+               /* We had pending work. That means we will have to take
+                * care of kvm_put_kvm.
+                */
+               kvm_put_kvm(kvm);
+
+       pci_release_regions(assigned_dev->dev);
+       pci_disable_device(assigned_dev->dev);
+       pci_dev_put(assigned_dev->dev);
+
+       list_del(&assigned_dev->list);
+       kfree(assigned_dev);
+}
+
+static void kvm_free_all_assigned_devices(struct kvm *kvm)
+{
+       struct list_head *ptr, *ptr2;
+       struct kvm_assigned_dev_kernel *assigned_dev;
+
+       list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
+               assigned_dev = list_entry(ptr,
+                                         struct kvm_assigned_dev_kernel,
+                                         list);
+
+               kvm_free_assigned_device(kvm, assigned_dev);
+       }
+}
+
 static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
                                   struct kvm_assigned_irq
                                   *assigned_irq)
@@ -193,8 +231,8 @@ static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
 
        if (irqchip_in_kernel(kvm)) {
                if (!capable(CAP_SYS_RAWIO)) {
-                       return -EPERM;
-                       goto out;
+                       r = -EPERM;
+                       goto out_release;
                }
 
                if (assigned_irq->host_irq)
@@ -213,17 +251,18 @@ static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
                 */
                if (request_irq(match->host_irq, kvm_assigned_dev_intr, 0,
                                "kvm_assigned_device", (void *)match)) {
-                       printk(KERN_INFO "%s: couldn't allocate irq for pv "
-                              "device\n", __func__);
                        r = -EIO;
-                       goto out;
+                       goto out_release;
                }
        }
 
        match->irq_requested = true;
-out:
        mutex_unlock(&kvm->lock);
        return r;
+out_release:
+       mutex_unlock(&kvm->lock);
+       kvm_free_assigned_device(kvm, match);
+       return r;
 }
 
 static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
@@ -277,9 +316,18 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
 
        list_add(&match->list, &kvm->arch.assigned_dev_head);
 
+       if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
+               r = kvm_iommu_map_guest(kvm, match);
+               if (r)
+                       goto out_list_del;
+       }
+
 out:
        mutex_unlock(&kvm->lock);
        return r;
+out_list_del:
+       list_del(&match->list);
+       pci_release_regions(dev);
 out_disable:
        pci_disable_device(dev);
 out_put:
@@ -290,40 +338,6 @@ out_free:
        return r;
 }
 
-static void kvm_free_assigned_devices(struct kvm *kvm)
-{
-       struct list_head *ptr, *ptr2;
-       struct kvm_assigned_dev_kernel *assigned_dev;
-
-       list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
-               assigned_dev = list_entry(ptr,
-                                         struct kvm_assigned_dev_kernel,
-                                         list);
-
-               if (irqchip_in_kernel(kvm) && assigned_dev->irq_requested) {
-                       free_irq(assigned_dev->host_irq,
-                                (void *)assigned_dev);
-
-                       kvm_unregister_irq_ack_notifier(kvm,
-                                                       &assigned_dev->
-                                                       ack_notifier);
-               }
-
-               if (cancel_work_sync(&assigned_dev->interrupt_work))
-                       /* We had pending work. That means we will have to take
-                        * care of kvm_put_kvm.
-                        */
-                       kvm_put_kvm(kvm);
-
-               pci_release_regions(assigned_dev->dev);
-               pci_disable_device(assigned_dev->dev);
-               pci_dev_put(assigned_dev->dev);
-
-               list_del(&assigned_dev->list);
-               kfree(assigned_dev);
-       }
-}
-
 unsigned long segment_base(u16 selector)
 {
        struct descriptor_table gdt;
@@ -580,6 +594,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4);
 void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 {
        if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
+               kvm_mmu_sync_roots(vcpu);
                kvm_mmu_flush_tlb(vcpu);
                return;
        }
@@ -932,10 +947,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                /* ...but clean it before doing the actual write */
                vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
 
-               down_read(&current->mm->mmap_sem);
                vcpu->arch.time_page =
                                gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-               up_read(&current->mm->mmap_sem);
 
                if (is_error_page(vcpu->arch.time_page)) {
                        kvm_release_page_clean(vcpu->arch.time_page);
@@ -1147,6 +1160,9 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_PV_MMU:
                r = !tdp_enabled;
                break;
+       case KVM_CAP_IOMMU:
+               r = intel_iommu_found();
+               break;
        default:
                r = 0;
                break;
@@ -2305,9 +2321,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
 
                val = *(u64 *)new;
 
-               down_read(&current->mm->mmap_sem);
                page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
-               up_read(&current->mm->mmap_sem);
 
                kaddr = kmap_atomic(page, KM_USER0);
                set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
@@ -2798,11 +2812,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
        KVMTRACE_0D(HLT, vcpu, handler);
        if (irqchip_in_kernel(vcpu->kvm)) {
                vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
-               up_read(&vcpu->kvm->slots_lock);
-               kvm_vcpu_block(vcpu);
-               down_read(&vcpu->kvm->slots_lock);
-               if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
-                       return -EINTR;
                return 1;
        } else {
                vcpu->run->exit_reason = KVM_EXIT_HLT;
@@ -3077,9 +3086,7 @@ static void vapic_enter(struct kvm_vcpu *vcpu)
        if (!apic || !apic->vapic_addr)
                return;
 
-       down_read(&current->mm->mmap_sem);
        page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
-       up_read(&current->mm->mmap_sem);
 
        vcpu->arch.apic->vapic_page = page;
 }
@@ -3097,24 +3104,10 @@ static void vapic_exit(struct kvm_vcpu *vcpu)
        up_read(&vcpu->kvm->slots_lock);
 }
 
-static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        int r;
 
-       if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
-               pr_debug("vcpu %d received sipi with vector # %x\n",
-                      vcpu->vcpu_id, vcpu->arch.sipi_vector);
-               kvm_lapic_reset(vcpu);
-               r = kvm_x86_ops->vcpu_reset(vcpu);
-               if (r)
-                       return r;
-               vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
-       }
-
-       down_read(&vcpu->kvm->slots_lock);
-       vapic_enter(vcpu);
-
-again:
        if (vcpu->requests)
                if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
                        kvm_mmu_unload(vcpu);
@@ -3151,22 +3144,13 @@ again:
 
        local_irq_disable();
 
-       if (vcpu->requests || need_resched()) {
+       if (vcpu->requests || need_resched() || signal_pending(current)) {
                local_irq_enable();
                preempt_enable();
                r = 1;
                goto out;
        }
 
-       if (signal_pending(current)) {
-               local_irq_enable();
-               preempt_enable();
-               r = -EINTR;
-               kvm_run->exit_reason = KVM_EXIT_INTR;
-               ++vcpu->stat.signal_exits;
-               goto out;
-       }
-
        if (vcpu->guest_debug.enabled)
                kvm_x86_ops->guest_debug_pre(vcpu);
 
@@ -3227,26 +3211,63 @@ again:
        kvm_lapic_sync_from_vapic(vcpu);
 
        r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
+out:
+       return r;
+}
 
-       if (r > 0) {
-               if (dm_request_for_irq_injection(vcpu, kvm_run)) {
-                       r = -EINTR;
-                       kvm_run->exit_reason = KVM_EXIT_INTR;
-                       ++vcpu->stat.request_irq_exits;
-                       goto out;
-               }
-               if (!need_resched())
-                       goto again;
+static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       int r;
+
+       if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
+               printk("vcpu %d received sipi with vector # %x\n",
+                      vcpu->vcpu_id, vcpu->arch.sipi_vector);
+               kvm_lapic_reset(vcpu);
+               r = kvm_x86_ops->vcpu_reset(vcpu);
+               if (r)
+                       return r;
+               vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
        }
 
-out:
-       up_read(&vcpu->kvm->slots_lock);
-       if (r > 0) {
-               kvm_resched(vcpu);
-               down_read(&vcpu->kvm->slots_lock);
-               goto again;
+       down_read(&vcpu->kvm->slots_lock);
+       vapic_enter(vcpu);
+
+       r = 1;
+       while (r > 0) {
+               if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
+                       r = vcpu_enter_guest(vcpu, kvm_run);
+               else {
+                       up_read(&vcpu->kvm->slots_lock);
+                       kvm_vcpu_block(vcpu);
+                       down_read(&vcpu->kvm->slots_lock);
+                       if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
+                               if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
+                                       vcpu->arch.mp_state =
+                                                       KVM_MP_STATE_RUNNABLE;
+                       if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
+                               r = -EINTR;
+               }
+
+               if (r > 0) {
+                       if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+                               r = -EINTR;
+                               kvm_run->exit_reason = KVM_EXIT_INTR;
+                               ++vcpu->stat.request_irq_exits;
+                       }
+                       if (signal_pending(current)) {
+                               r = -EINTR;
+                               kvm_run->exit_reason = KVM_EXIT_INTR;
+                               ++vcpu->stat.signal_exits;
+                       }
+                       if (need_resched()) {
+                               up_read(&vcpu->kvm->slots_lock);
+                               kvm_resched(vcpu);
+                               down_read(&vcpu->kvm->slots_lock);
+                       }
+               }
        }
 
+       up_read(&vcpu->kvm->slots_lock);
        post_kvm_run_save(vcpu, kvm_run);
 
        vapic_exit(vcpu);
@@ -3266,6 +3287,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
        if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
                kvm_vcpu_block(vcpu);
+               clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
                r = -EAGAIN;
                goto out;
        }
@@ -3590,7 +3612,7 @@ static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
        return 0;
 }
 
-int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
+static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
 {
        struct kvm_segment segvar = {
                .base = selector << 4,
@@ -3949,6 +3971,12 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
        kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
 
+       /* Older userspace won't unhalt the vcpu on reset. */
+       if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 &&
+           sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
+           !(vcpu->arch.cr0 & X86_CR0_PE))
+               vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+
        vcpu_put(vcpu);
 
        return 0;
@@ -4266,7 +4294,8 @@ static void kvm_free_vcpus(struct kvm *kvm)
 
 void kvm_arch_destroy_vm(struct kvm *kvm)
 {
-       kvm_free_assigned_devices(kvm);
+       kvm_iommu_unmap_guest(kvm);
+       kvm_free_all_assigned_devices(kvm);
        kvm_free_pit(kvm);
        kfree(kvm->arch.vpic);
        kfree(kvm->arch.vioapic);