]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kvm/svm.c
Merge ../linux-2.6
[linux-2.6-omap-h63xx.git] / arch / x86 / kvm / svm.c
index 218949cce1a0975eac7cd566bfa1cd75471a975d..b756e876dce391b222c6e4ec84c7dd6447e738de 100644 (file)
@@ -27,6 +27,8 @@
 
 #include <asm/desc.h>
 
+#define __ex(x) __kvm_handle_fault_on_reboot(x)
+
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
 
@@ -129,17 +131,17 @@ static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
 
 static inline void clgi(void)
 {
-       asm volatile (SVM_CLGI);
+       asm volatile (__ex(SVM_CLGI));
 }
 
 static inline void stgi(void)
 {
-       asm volatile (SVM_STGI);
+       asm volatile (__ex(SVM_STGI));
 }
 
 static inline void invlpga(unsigned long addr, u32 asid)
 {
-       asm volatile (SVM_INVLPGA :: "a"(addr), "c"(asid));
+       asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
 }
 
 static inline unsigned long kvm_read_cr2(void)
@@ -270,19 +272,11 @@ static int has_svm(void)
 
 static void svm_hardware_disable(void *garbage)
 {
-       struct svm_cpu_data *svm_data
-               = per_cpu(svm_data, raw_smp_processor_id());
-
-       if (svm_data) {
-               uint64_t efer;
+       uint64_t efer;
 
-               wrmsrl(MSR_VM_HSAVE_PA, 0);
-               rdmsrl(MSR_EFER, efer);
-               wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
-               per_cpu(svm_data, raw_smp_processor_id()) = NULL;
-               __free_page(svm_data->save_area);
-               kfree(svm_data);
-       }
+       wrmsrl(MSR_VM_HSAVE_PA, 0);
+       rdmsrl(MSR_EFER, efer);
+       wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
 }
 
 static void svm_hardware_enable(void *garbage)
@@ -321,6 +315,19 @@ static void svm_hardware_enable(void *garbage)
               page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
 }
 
+static void svm_cpu_uninit(int cpu)
+{
+       struct svm_cpu_data *svm_data
+               = per_cpu(svm_data, raw_smp_processor_id());
+
+       if (!svm_data)
+               return;
+
+       per_cpu(svm_data, raw_smp_processor_id()) = NULL;
+       __free_page(svm_data->save_area);
+       kfree(svm_data);
+}
+
 static int svm_cpu_init(int cpu)
 {
        struct svm_cpu_data *svm_data;
@@ -458,6 +465,11 @@ err:
 
 static __exit void svm_hardware_unsetup(void)
 {
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               svm_cpu_uninit(cpu);
+
        __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
        iopm_base = 0;
 }
@@ -707,10 +719,6 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
        rdtscll(vcpu->arch.host_tsc);
 }
 
-static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
-{
-}
-
 static void svm_cache_regs(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -1312,16 +1320,19 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
        case MSR_K7_EVNTSEL1:
        case MSR_K7_EVNTSEL2:
        case MSR_K7_EVNTSEL3:
+       case MSR_K7_PERFCTR0:
+       case MSR_K7_PERFCTR1:
+       case MSR_K7_PERFCTR2:
+       case MSR_K7_PERFCTR3:
                /*
-                * only support writing 0 to the performance counters for now
-                * to make Windows happy. Should be replaced by a real
-                * performance counter emulation later.
+                * Just discard all writes to the performance counters; this
+                * should keep both older linux and windows 64-bit guests
+                * happy
                 */
-               if (data != 0)
-                       goto unhandled;
+               pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data);
+
                break;
        default:
-       unhandled:
                return kvm_set_msr_common(vcpu, ecx, data);
        }
        return 0;
@@ -1699,9 +1710,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        sync_lapic_to_cr8(vcpu);
 
        save_host_msrs(vcpu);
-       fs_selector = read_fs();
-       gs_selector = read_gs();
-       ldt_selector = read_ldt();
+       fs_selector = kvm_read_fs();
+       gs_selector = kvm_read_gs();
+       ldt_selector = kvm_read_ldt();
        svm->host_cr2 = kvm_read_cr2();
        svm->host_dr6 = read_dr6();
        svm->host_dr7 = read_dr7();
@@ -1755,17 +1766,17 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                /* Enter guest mode */
                "push %%rax \n\t"
                "mov %c[vmcb](%[svm]), %%rax \n\t"
-               SVM_VMLOAD "\n\t"
-               SVM_VMRUN "\n\t"
-               SVM_VMSAVE "\n\t"
+               __ex(SVM_VMLOAD) "\n\t"
+               __ex(SVM_VMRUN) "\n\t"
+               __ex(SVM_VMSAVE) "\n\t"
                "pop %%rax \n\t"
 #else
                /* Enter guest mode */
                "push %%eax \n\t"
                "mov %c[vmcb](%[svm]), %%eax \n\t"
-               SVM_VMLOAD "\n\t"
-               SVM_VMRUN "\n\t"
-               SVM_VMSAVE "\n\t"
+               __ex(SVM_VMLOAD) "\n\t"
+               __ex(SVM_VMRUN) "\n\t"
+               __ex(SVM_VMSAVE) "\n\t"
                "pop %%eax \n\t"
 #endif
 
@@ -1834,9 +1845,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        write_dr7(svm->host_dr7);
        kvm_write_cr2(svm->host_cr2);
 
-       load_fs(fs_selector);
-       load_gs(gs_selector);
-       load_ldt(ldt_selector);
+       kvm_load_fs(fs_selector);
+       kvm_load_gs(gs_selector);
+       kvm_load_ldt(ldt_selector);
        load_host_msrs(vcpu);
 
        reload_tss(vcpu);
@@ -1928,7 +1939,6 @@ static struct kvm_x86_ops svm_x86_ops = {
        .prepare_guest_switch = svm_prepare_guest_switch,
        .vcpu_load = svm_vcpu_load,
        .vcpu_put = svm_vcpu_put,
-       .vcpu_decache = svm_vcpu_decache,
 
        .set_guest_debug = svm_guest_debug,
        .get_msr = svm_get_msr,