]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kvm/svm.c
KVM: MMU: make the __nonpaging_map function generic
[linux-2.6-omap-h63xx.git] / arch / x86 / kvm / svm.c
index de755cb1431dcef84617b04e29eacb5a06fc59d0..9e29a13136c44a0fd3f13ba0b1e0a6e2debcd1eb 100644 (file)
@@ -47,6 +47,11 @@ MODULE_LICENSE("GPL");
 #define SVM_FEATURE_LBRV (1 << 1)
 #define SVM_DEATURE_SVML (1 << 2)
 
+static bool npt_enabled = false;
+static int npt = 1;
+
+module_param(npt, int, S_IRUGO);
+
 static void kvm_reput_irq(struct vcpu_svm *svm);
 
 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
@@ -302,7 +307,6 @@ static void svm_hardware_enable(void *garbage)
        svm_data->asid_generation = 1;
        svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
        svm_data->next_asid = svm_data->max_asid + 1;
-       svm_features = cpuid_edx(SVM_CPUID_FUNC);
 
        asm volatile ("sgdt %0" : "=m"(gdt_descr));
        gdt = (struct desc_struct *)gdt_descr.address;
@@ -403,11 +407,30 @@ static __init int svm_hardware_setup(void)
        set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_ESP, 1, 1);
        set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_EIP, 1, 1);
 
+       if (boot_cpu_has(X86_FEATURE_NX))
+               kvm_enable_efer_bits(EFER_NX);
+
        for_each_online_cpu(cpu) {
                r = svm_cpu_init(cpu);
                if (r)
                        goto err_2;
        }
+
+       svm_features = cpuid_edx(SVM_CPUID_FUNC);
+
+       if (!svm_has(SVM_FEATURE_NPT))
+               npt_enabled = false;
+
+       if (npt_enabled && !npt) {
+               printk(KERN_INFO "kvm: Nested Paging disabled\n");
+               npt_enabled = false;
+       }
+
+       if (npt_enabled) {
+               printk(KERN_INFO "kvm: Nested Paging enabled\n");
+               kvm_enable_tdp();
+       }
+
        return 0;
 
 err_2:
@@ -792,6 +815,10 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        vcpu->arch.cr0 = cr0;
        cr0 |= X86_CR0_PG | X86_CR0_WP;
        cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
+       if (!vcpu->fpu_active) {
+               svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
+               cr0 |= X86_CR0_TS;
+       }
        svm->vmcb->save.cr0 = cr0;
 }
 
@@ -1096,6 +1123,24 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
        case MSR_IA32_SYSENTER_ESP:
                *data = svm->vmcb->save.sysenter_esp;
                break;
+       /* Nobody will change the following 5 values in the VMCB so
+          we can safely return them on rdmsr. They will always be 0
+          until LBRV is implemented. */
+       case MSR_IA32_DEBUGCTLMSR:
+               *data = svm->vmcb->save.dbgctl;
+               break;
+       case MSR_IA32_LASTBRANCHFROMIP:
+               *data = svm->vmcb->save.br_from;
+               break;
+       case MSR_IA32_LASTBRANCHTOIP:
+               *data = svm->vmcb->save.br_to;
+               break;
+       case MSR_IA32_LASTINTFROMIP:
+               *data = svm->vmcb->save.last_excp_from;
+               break;
+       case MSR_IA32_LASTINTTOIP:
+               *data = svm->vmcb->save.last_excp_to;
+               break;
        default:
                return kvm_get_msr_common(vcpu, ecx, data);
        }
@@ -1156,6 +1201,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
        case MSR_IA32_SYSENTER_ESP:
                svm->vmcb->save.sysenter_esp = data;
                break;
+       case MSR_IA32_DEBUGCTLMSR:
+               pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
+                               __FUNCTION__, data);
+               break;
        case MSR_K7_EVNTSEL0:
        case MSR_K7_EVNTSEL1:
        case MSR_K7_EVNTSEL2: