]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/kvm/x86.c
KVM: Portability: Make kvm_vcpu_ioctl_translate arch dependent
[linux-2.6-omap-h63xx.git] / drivers / kvm / x86.c
index 394da6605364b0aa9bfa3dd1eb01fbd43e562cc3..f1746afab9e5f3e094a4a1d968a4c75647bd4744 100644 (file)
@@ -16,6 +16,7 @@
 
 #include "kvm.h"
 #include "x86.h"
+#include "x86_emulate.h"
 #include "segment_descriptor.h"
 #include "irq.h"
 
@@ -25,6 +26,7 @@
 #include <linux/module.h>
 
 #include <asm/uaccess.h>
+#include <asm/msr.h>
 
 #define MAX_IO_MSRS 256
 #define CR0_RESERVED_BITS                                              \
@@ -42,6 +44,8 @@
 
 #define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
 
+struct kvm_x86_ops *kvm_x86_ops;
+
 struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "pf_fixed", STAT_OFFSET(pf_fixed) },
        { "pf_guest", STAT_OFFSET(pf_guest) },
@@ -560,6 +564,61 @@ out:
        return r;
 }
 
+/*
+ * Make sure that a cpu that is being hot-unplugged does not have any vcpus
+ * cached on it.
+ */
+void decache_vcpus_on_cpu(int cpu)
+{
+       struct kvm *vm;
+       struct kvm_vcpu *vcpu;
+       int i;
+
+       spin_lock(&kvm_lock);
+       list_for_each_entry(vm, &vm_list, vm_list)
+               for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+                       vcpu = vm->vcpus[i];
+                       if (!vcpu)
+                               continue;
+                       /*
+                        * If the vcpu is locked, then it is running on some
+                        * other cpu and therefore it is not cached on the
+                        * cpu in question.
+                        *
+                        * If it's not locked, check the last cpu it executed
+                        * on.
+                        */
+                       if (mutex_trylock(&vcpu->mutex)) {
+                               if (vcpu->cpu == cpu) {
+                                       kvm_x86_ops->vcpu_decache(vcpu);
+                                       vcpu->cpu = -1;
+                               }
+                               mutex_unlock(&vcpu->mutex);
+                       }
+               }
+       spin_unlock(&kvm_lock);
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+       int r;
+
+       switch (ext) {
+       case KVM_CAP_IRQCHIP:
+       case KVM_CAP_HLT:
+       case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
+       case KVM_CAP_USER_MEMORY:
+       case KVM_CAP_SET_TSS_ADDR:
+               r = 1;
+               break;
+       default:
+               r = 0;
+               break;
+       }
+       return r;
+
+}
+
 long kvm_arch_dev_ioctl(struct file *filp,
                        unsigned int ioctl, unsigned long arg)
 {
@@ -1606,11 +1665,36 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
 
-__init void kvm_arch_init(void)
+int kvm_arch_init(void *opaque)
 {
+       struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
+
        kvm_init_msr_list();
+
+       if (kvm_x86_ops) {
+               printk(KERN_ERR "kvm: already loaded the other module\n");
+               return -EEXIST;
+       }
+
+       if (!ops->cpu_has_kvm_support()) {
+               printk(KERN_ERR "kvm: no hardware support\n");
+               return -EOPNOTSUPP;
+       }
+       if (ops->disabled_by_bios()) {
+               printk(KERN_ERR "kvm: disabled by bios\n");
+               return -EOPNOTSUPP;
+       }
+
+       kvm_x86_ops = ops;
+
+       return 0;
 }
 
+void kvm_arch_exit(void)
+{
+       kvm_x86_ops = NULL;
+ }
+
 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 {
        ++vcpu->stat.halt_exits;
@@ -2234,6 +2318,28 @@ struct fxsave {
 #endif
 };
 
+/*
+ * Translate a guest virtual address to a guest physical address.
+ */
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+                                   struct kvm_translation *tr)
+{
+       unsigned long vaddr = tr->linear_address;
+       gpa_t gpa;
+
+       vcpu_load(vcpu);
+       mutex_lock(&vcpu->kvm->lock);
+       gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
+       tr->physical_address = gpa;
+       tr->valid = gpa != UNMAPPED_GVA;
+       tr->writeable = 1;
+       tr->usermode = 0;
+       mutex_unlock(&vcpu->kvm->lock);
+       vcpu_put(vcpu);
+
+       return 0;
+}
+
 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
        struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
@@ -2315,3 +2421,125 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
        fx_restore(&vcpu->host_fx_image);
 }
 EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+       kvm_x86_ops->vcpu_free(vcpu);
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
+                                               unsigned int id)
+{
+       int r;
+       struct kvm_vcpu *vcpu = kvm_x86_ops->vcpu_create(kvm, id);
+
+       if (IS_ERR(vcpu)) {
+               r = -ENOMEM;
+               goto fail;
+       }
+
+       /* We do fxsave: this must be aligned. */
+       BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
+
+       vcpu_load(vcpu);
+       r = kvm_arch_vcpu_reset(vcpu);
+       if (r == 0)
+               r = kvm_mmu_setup(vcpu);
+       vcpu_put(vcpu);
+       if (r < 0)
+               goto free_vcpu;
+
+       return vcpu;
+free_vcpu:
+       kvm_x86_ops->vcpu_free(vcpu);
+fail:
+       return ERR_PTR(r);
+}
+
+void kvm_arch_vcpu_destory(struct kvm_vcpu *vcpu)
+{
+       vcpu_load(vcpu);
+       kvm_mmu_unload(vcpu);
+       vcpu_put(vcpu);
+
+       kvm_x86_ops->vcpu_free(vcpu);
+}
+
+int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
+{
+       return kvm_x86_ops->vcpu_reset(vcpu);
+}
+
+void kvm_arch_hardware_enable(void *garbage)
+{
+       kvm_x86_ops->hardware_enable(garbage);
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+       kvm_x86_ops->hardware_disable(garbage);
+}
+
+int kvm_arch_hardware_setup(void)
+{
+       return kvm_x86_ops->hardware_setup();
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+       kvm_x86_ops->hardware_unsetup();
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+       kvm_x86_ops->check_processor_compatibility(rtn);
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       struct page *page;
+       struct kvm *kvm;
+       int r;
+
+       BUG_ON(vcpu->kvm == NULL);
+       kvm = vcpu->kvm;
+
+       vcpu->mmu.root_hpa = INVALID_PAGE;
+       if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
+               vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
+       else
+               vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
+
+       page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+       if (!page) {
+               r = -ENOMEM;
+               goto fail;
+       }
+       vcpu->pio_data = page_address(page);
+
+       r = kvm_mmu_create(vcpu);
+       if (r < 0)
+               goto fail_free_pio_data;
+
+       if (irqchip_in_kernel(kvm)) {
+               r = kvm_create_lapic(vcpu);
+               if (r < 0)
+                       goto fail_mmu_destroy;
+       }
+
+       return 0;
+
+fail_mmu_destroy:
+       kvm_mmu_destroy(vcpu);
+fail_free_pio_data:
+       free_page((unsigned long)vcpu->pio_data);
+fail:
+       return r;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+       kvm_free_lapic(vcpu);
+       kvm_mmu_destroy(vcpu);
+       free_page((unsigned long)vcpu->pio_data);
+}