]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kvm/paging_tmpl.h
Merge branch 'fix/asoc' into for-linus
[linux-2.6-omap-h63xx.git] / arch / x86 / kvm / paging_tmpl.h
index 4a814bff21f27cb9b829df36071ffb40b43818ca..84eee43bbe742b005bd1825b0cdac24ba5efffbb 100644 (file)
 #if PTTYPE == 64
        #define pt_element_t u64
        #define guest_walker guest_walker64
+       #define shadow_walker shadow_walker64
        #define FNAME(name) paging##64_##name
        #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
        #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
        #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
-       #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
        #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
        #define PT_LEVEL_BITS PT64_LEVEL_BITS
        #ifdef CONFIG_X86_64
 #elif PTTYPE == 32
        #define pt_element_t u32
        #define guest_walker guest_walker32
+       #define shadow_walker shadow_walker32
        #define FNAME(name) paging##32_##name
        #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
        #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
        #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
-       #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
        #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
        #define PT_LEVEL_BITS PT32_LEVEL_BITS
        #define PT_MAX_FULL_LEVELS 2
@@ -73,6 +73,17 @@ struct guest_walker {
        u32 error_code;
 };
 
+struct shadow_walker {
+       struct kvm_shadow_walk walker;
+       struct guest_walker *guest_walker;
+       int user_fault;
+       int write_fault;
+       int largepage;
+       int *ptwrite;
+       pfn_t pfn;
+       u64 *sptep;
+};
+
 static gfn_t gpte_to_gfn(pt_element_t gpte)
 {
        return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
@@ -91,14 +102,10 @@ static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
        pt_element_t *table;
        struct page *page;
 
-       down_read(&current->mm->mmap_sem);
        page = gfn_to_page(kvm, table_gfn);
-       up_read(&current->mm->mmap_sem);
 
        table = kmap_atomic(page, KM_USER0);
-
        ret = CMPXCHG(&table[index], orig_pte, new_pte);
-
        kunmap_atomic(table, KM_USER0);
 
        kvm_release_page_dirty(page);
@@ -274,86 +281,90 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
 /*
  * Fetch a shadow pte for a specific level in the paging hierarchy.
  */
-static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
-                        struct guest_walker *walker,
-                        int user_fault, int write_fault, int largepage,
-                        int *ptwrite, pfn_t pfn)
+static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
+                                   struct kvm_vcpu *vcpu, u64 addr,
+                                   u64 *sptep, int level)
 {
-       hpa_t shadow_addr;
-       int level;
-       u64 *shadow_ent;
-       unsigned access = walker->pt_access;
-
-       if (!is_present_pte(walker->ptes[walker->level - 1]))
-               return NULL;
-
-       shadow_addr = vcpu->arch.mmu.root_hpa;
-       level = vcpu->arch.mmu.shadow_root_level;
-       if (level == PT32E_ROOT_LEVEL) {
-               shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
-               shadow_addr &= PT64_BASE_ADDR_MASK;
-               --level;
+       struct shadow_walker *sw =
+               container_of(_sw, struct shadow_walker, walker);
+       struct guest_walker *gw = sw->guest_walker;
+       unsigned access = gw->pt_access;
+       struct kvm_mmu_page *shadow_page;
+       u64 spte;
+       int metaphysical;
+       gfn_t table_gfn;
+       int r;
+       pt_element_t curr_pte;
+
+       if (level == PT_PAGE_TABLE_LEVEL
+           || (sw->largepage && level == PT_DIRECTORY_LEVEL)) {
+               mmu_set_spte(vcpu, sptep, access, gw->pte_access & access,
+                            sw->user_fault, sw->write_fault,
+                            gw->ptes[gw->level-1] & PT_DIRTY_MASK,
+                            sw->ptwrite, sw->largepage, gw->gfn, sw->pfn,
+                            false);
+               sw->sptep = sptep;
+               return 1;
        }
 
-       for (; ; level--) {
-               u32 index = SHADOW_PT_INDEX(addr, level);
-               struct kvm_mmu_page *shadow_page;
-               u64 shadow_pte;
-               int metaphysical;
-               gfn_t table_gfn;
-
-               shadow_ent = ((u64 *)__va(shadow_addr)) + index;
-               if (level == PT_PAGE_TABLE_LEVEL)
-                       break;
-
-               if (largepage && level == PT_DIRECTORY_LEVEL)
-                       break;
+       if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
+               return 0;
 
-               if (is_shadow_present_pte(*shadow_ent)
-                   && !is_large_pte(*shadow_ent)) {
-                       shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
-                       continue;
-               }
+       if (is_large_pte(*sptep)) {
+               set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
+               kvm_flush_remote_tlbs(vcpu->kvm);
+               rmap_remove(vcpu->kvm, sptep);
+       }
 
-               if (is_large_pte(*shadow_ent))
-                       rmap_remove(vcpu->kvm, shadow_ent);
-
-               if (level - 1 == PT_PAGE_TABLE_LEVEL
-                   && walker->level == PT_DIRECTORY_LEVEL) {
-                       metaphysical = 1;
-                       if (!is_dirty_pte(walker->ptes[level - 1]))
-                               access &= ~ACC_WRITE_MASK;
-                       table_gfn = gpte_to_gfn(walker->ptes[level - 1]);
-               } else {
-                       metaphysical = 0;
-                       table_gfn = walker->table_gfn[level - 2];
-               }
-               shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
-                                              metaphysical, access,
-                                              shadow_ent);
-               if (!metaphysical) {
-                       int r;
-                       pt_element_t curr_pte;
-                       r = kvm_read_guest_atomic(vcpu->kvm,
-                                                 walker->pte_gpa[level - 2],
-                                                 &curr_pte, sizeof(curr_pte));
-                       if (r || curr_pte != walker->ptes[level - 2]) {
-                               kvm_release_pfn_clean(pfn);
-                               return NULL;
-                       }
+       if (level == PT_DIRECTORY_LEVEL && gw->level == PT_DIRECTORY_LEVEL) {
+               metaphysical = 1;
+               if (!is_dirty_pte(gw->ptes[level - 1]))
+                       access &= ~ACC_WRITE_MASK;
+               table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
+       } else {
+               metaphysical = 0;
+               table_gfn = gw->table_gfn[level - 2];
+       }
+       shadow_page = kvm_mmu_get_page(vcpu, table_gfn, (gva_t)addr, level-1,
+                                      metaphysical, access, sptep);
+       if (!metaphysical) {
+               r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],
+                                         &curr_pte, sizeof(curr_pte));
+               if (r || curr_pte != gw->ptes[level - 2]) {
+                       kvm_mmu_put_page(shadow_page, sptep);
+                       kvm_release_pfn_clean(sw->pfn);
+                       sw->sptep = NULL;
+                       return 1;
                }
-               shadow_addr = __pa(shadow_page->spt);
-               shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
-                       | PT_WRITABLE_MASK | PT_USER_MASK;
-               set_shadow_pte(shadow_ent, shadow_pte);
        }
 
-       mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
-                    user_fault, write_fault,
-                    walker->ptes[walker->level-1] & PT_DIRTY_MASK,
-                    ptwrite, largepage, walker->gfn, pfn, false);
+       spte = __pa(shadow_page->spt) | PT_PRESENT_MASK | PT_ACCESSED_MASK
+               | PT_WRITABLE_MASK | PT_USER_MASK;
+       *sptep = spte;
+       return 0;
+}
+
+static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
+                        struct guest_walker *guest_walker,
+                        int user_fault, int write_fault, int largepage,
+                        int *ptwrite, pfn_t pfn)
+{
+       struct shadow_walker walker = {
+               .walker = { .entry = FNAME(shadow_walk_entry), },
+               .guest_walker = guest_walker,
+               .user_fault = user_fault,
+               .write_fault = write_fault,
+               .largepage = largepage,
+               .ptwrite = ptwrite,
+               .pfn = pfn,
+       };
+
+       if (!is_present_pte(guest_walker->ptes[guest_walker->level - 1]))
+               return NULL;
+
+       walk_shadow(&walker.walker, vcpu, addr);
 
-       return shadow_ent;
+       return walker.sptep;
 }
 
 /*
@@ -407,7 +418,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
                return 0;
        }
 
-       down_read(&current->mm->mmap_sem);
        if (walker.level == PT_DIRECTORY_LEVEL) {
                gfn_t large_gfn;
                large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
@@ -417,9 +427,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
                }
        }
        mmu_seq = vcpu->kvm->mmu_notifier_seq;
-       /* implicit mb(), we'll read before PT lock is unlocked */
+       smp_rmb();
        pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
-       up_read(&current->mm->mmap_sem);
 
        /* mmio */
        if (is_error_pfn(pfn)) {
@@ -453,6 +462,31 @@ out_unlock:
        return 0;
 }
 
+static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw,
+                                     struct kvm_vcpu *vcpu, u64 addr,
+                                     u64 *sptep, int level)
+{
+
+       if (level == PT_PAGE_TABLE_LEVEL) {
+               if (is_shadow_present_pte(*sptep))
+                       rmap_remove(vcpu->kvm, sptep);
+               set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
+               return 1;
+       }
+       if (!is_shadow_present_pte(*sptep))
+               return 1;
+       return 0;
+}
+
+static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
+{
+       struct shadow_walker walker = {
+               .walker = { .entry = FNAME(shadow_invlpg_entry), },
+       };
+
+       walk_shadow(&walker.walker, vcpu, gva);
+}
+
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
 {
        struct guest_walker walker;
@@ -499,12 +533,66 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
        }
 }
 
+/*
+ * Using the cached information from sp->gfns is safe because:
+ * - The spte has a reference to the struct page, so the pfn for a given gfn
+ *   can't change unless all sptes pointing to it are nuked first.
+ * - Alias changes zap the entire shadow cache.
+ */
+static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+{
+       int i, offset, nr_present;
+
+       offset = nr_present = 0;
+
+       if (PTTYPE == 32)
+               offset = sp->role.quadrant << PT64_LEVEL_BITS;
+
+       for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
+               unsigned pte_access;
+               pt_element_t gpte;
+               gpa_t pte_gpa;
+               gfn_t gfn = sp->gfns[i];
+
+               if (!is_shadow_present_pte(sp->spt[i]))
+                       continue;
+
+               pte_gpa = gfn_to_gpa(sp->gfn);
+               pte_gpa += (i+offset) * sizeof(pt_element_t);
+
+               if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
+                                         sizeof(pt_element_t)))
+                       return -EINVAL;
+
+               if (gpte_to_gfn(gpte) != gfn || !is_present_pte(gpte) ||
+                   !(gpte & PT_ACCESSED_MASK)) {
+                       u64 nonpresent;
+
+                       rmap_remove(vcpu->kvm, &sp->spt[i]);
+                       if (is_present_pte(gpte))
+                               nonpresent = shadow_trap_nonpresent_pte;
+                       else
+                               nonpresent = shadow_notrap_nonpresent_pte;
+                       set_shadow_pte(&sp->spt[i], nonpresent);
+                       continue;
+               }
+
+               nr_present++;
+               pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
+               set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
+                        is_dirty_pte(gpte), 0, gfn,
+                        spte_to_pfn(sp->spt[i]), true, false);
+       }
+
+       return !nr_present;
+}
+
 #undef pt_element_t
 #undef guest_walker
+#undef shadow_walker
 #undef FNAME
 #undef PT_BASE_ADDR_MASK
 #undef PT_INDEX
-#undef SHADOW_PT_INDEX
 #undef PT_LEVEL_MASK
 #undef PT_DIR_BASE_ADDR_MASK
 #undef PT_LEVEL_BITS