2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
24 #include <linux/types.h>
25 #include <linux/string.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
31 #include <asm/cmpxchg.h>
39 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
41 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
46 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
47 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
51 #define pgprintk(x...) do { } while (0)
52 #define rmap_printk(x...) do { } while (0)
56 #if defined(MMU_DEBUG) || defined(AUDIT)
61 #define ASSERT(x) do { } while (0)
65 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
66 __FILE__, __LINE__, #x); \
70 #define PT64_PT_BITS 9
71 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
72 #define PT32_PT_BITS 10
73 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
75 #define PT_WRITABLE_SHIFT 1
77 #define PT_PRESENT_MASK (1ULL << 0)
78 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
79 #define PT_USER_MASK (1ULL << 2)
80 #define PT_PWT_MASK (1ULL << 3)
81 #define PT_PCD_MASK (1ULL << 4)
82 #define PT_ACCESSED_MASK (1ULL << 5)
83 #define PT_DIRTY_MASK (1ULL << 6)
84 #define PT_PAGE_SIZE_MASK (1ULL << 7)
85 #define PT_PAT_MASK (1ULL << 7)
86 #define PT_GLOBAL_MASK (1ULL << 8)
87 #define PT64_NX_MASK (1ULL << 63)
89 #define PT_PAT_SHIFT 7
90 #define PT_DIR_PAT_SHIFT 12
91 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
93 #define PT32_DIR_PSE36_SIZE 4
94 #define PT32_DIR_PSE36_SHIFT 13
95 #define PT32_DIR_PSE36_MASK \
96 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
99 #define PT_FIRST_AVAIL_BITS_SHIFT 9
100 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
102 #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
104 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
106 #define PT64_LEVEL_BITS 9
108 #define PT64_LEVEL_SHIFT(level) \
109 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
111 #define PT64_LEVEL_MASK(level) \
112 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
114 #define PT64_INDEX(address, level)\
115 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
118 #define PT32_LEVEL_BITS 10
120 #define PT32_LEVEL_SHIFT(level) \
121 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
123 #define PT32_LEVEL_MASK(level) \
124 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
126 #define PT32_INDEX(address, level)\
127 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
130 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
131 #define PT64_DIR_BASE_ADDR_MASK \
132 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
134 #define PT32_BASE_ADDR_MASK PAGE_MASK
135 #define PT32_DIR_BASE_ADDR_MASK \
136 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
138 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
141 #define PFERR_PRESENT_MASK (1U << 0)
142 #define PFERR_WRITE_MASK (1U << 1)
143 #define PFERR_USER_MASK (1U << 2)
144 #define PFERR_FETCH_MASK (1U << 4)
146 #define PT64_ROOT_LEVEL 4
147 #define PT32_ROOT_LEVEL 2
148 #define PT32E_ROOT_LEVEL 3
150 #define PT_DIRECTORY_LEVEL 2
151 #define PT_PAGE_TABLE_LEVEL 1
155 struct kvm_rmap_desc {
156 u64 *shadow_ptes[RMAP_EXT];
157 struct kvm_rmap_desc *more;
160 static struct kmem_cache *pte_chain_cache;
161 static struct kmem_cache *rmap_desc_cache;
162 static struct kmem_cache *mmu_page_header_cache;
164 static u64 __read_mostly shadow_trap_nonpresent_pte;
165 static u64 __read_mostly shadow_notrap_nonpresent_pte;
167 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
169 shadow_trap_nonpresent_pte = trap_pte;
170 shadow_notrap_nonpresent_pte = notrap_pte;
172 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
174 static int is_write_protection(struct kvm_vcpu *vcpu)
176 return vcpu->cr0 & X86_CR0_WP;
179 static int is_cpuid_PSE36(void)
184 static int is_nx(struct kvm_vcpu *vcpu)
186 return vcpu->shadow_efer & EFER_NX;
189 static int is_present_pte(unsigned long pte)
191 return pte & PT_PRESENT_MASK;
194 static int is_shadow_present_pte(u64 pte)
196 pte &= ~PT_SHADOW_IO_MARK;
197 return pte != shadow_trap_nonpresent_pte
198 && pte != shadow_notrap_nonpresent_pte;
201 static int is_writeble_pte(unsigned long pte)
203 return pte & PT_WRITABLE_MASK;
206 static int is_dirty_pte(unsigned long pte)
208 return pte & PT_DIRTY_MASK;
211 static int is_io_pte(unsigned long pte)
213 return pte & PT_SHADOW_IO_MARK;
216 static int is_rmap_pte(u64 pte)
218 return pte != shadow_trap_nonpresent_pte
219 && pte != shadow_notrap_nonpresent_pte;
222 static gfn_t pse36_gfn_delta(u32 gpte)
224 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
226 return (gpte & PT32_DIR_PSE36_MASK) << shift;
229 static void set_shadow_pte(u64 *sptep, u64 spte)
232 set_64bit((unsigned long *)sptep, spte);
234 set_64bit((unsigned long long *)sptep, spte);
238 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
239 struct kmem_cache *base_cache, int min)
243 if (cache->nobjs >= min)
245 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
246 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
249 cache->objects[cache->nobjs++] = obj;
254 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
257 kfree(mc->objects[--mc->nobjs]);
260 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
265 if (cache->nobjs >= min)
267 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
268 page = alloc_page(GFP_KERNEL);
271 set_page_private(page, 0);
272 cache->objects[cache->nobjs++] = page_address(page);
277 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
280 free_page((unsigned long)mc->objects[--mc->nobjs]);
283 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
287 kvm_mmu_free_some_pages(vcpu);
288 r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
292 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
296 r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8);
299 r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
300 mmu_page_header_cache, 4);
305 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
307 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
308 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
309 mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
310 mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
313 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
319 p = mc->objects[--mc->nobjs];
324 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
326 return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
327 sizeof(struct kvm_pte_chain));
330 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
335 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
337 return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
338 sizeof(struct kvm_rmap_desc));
341 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
347 * Take gfn and return the reverse mapping to it.
348 * Note: gfn must be unaliased before this function get called
351 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
353 struct kvm_memory_slot *slot;
355 slot = gfn_to_memslot(kvm, gfn);
356 return &slot->rmap[gfn - slot->base_gfn];
360 * Reverse mapping data structures:
362 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
363 * that points to page_address(page).
365 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
366 * containing more mappings.
368 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
370 struct kvm_mmu_page *page;
371 struct kvm_rmap_desc *desc;
372 unsigned long *rmapp;
375 if (!is_rmap_pte(*spte))
377 gfn = unalias_gfn(vcpu->kvm, gfn);
378 page = page_header(__pa(spte));
379 page->gfns[spte - page->spt] = gfn;
380 rmapp = gfn_to_rmap(vcpu->kvm, gfn);
382 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
383 *rmapp = (unsigned long)spte;
384 } else if (!(*rmapp & 1)) {
385 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
386 desc = mmu_alloc_rmap_desc(vcpu);
387 desc->shadow_ptes[0] = (u64 *)*rmapp;
388 desc->shadow_ptes[1] = spte;
389 *rmapp = (unsigned long)desc | 1;
391 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
392 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
393 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
395 if (desc->shadow_ptes[RMAP_EXT-1]) {
396 desc->more = mmu_alloc_rmap_desc(vcpu);
399 for (i = 0; desc->shadow_ptes[i]; ++i)
401 desc->shadow_ptes[i] = spte;
405 static void rmap_desc_remove_entry(unsigned long *rmapp,
406 struct kvm_rmap_desc *desc,
408 struct kvm_rmap_desc *prev_desc)
412 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
414 desc->shadow_ptes[i] = desc->shadow_ptes[j];
415 desc->shadow_ptes[j] = NULL;
418 if (!prev_desc && !desc->more)
419 *rmapp = (unsigned long)desc->shadow_ptes[0];
422 prev_desc->more = desc->more;
424 *rmapp = (unsigned long)desc->more | 1;
425 mmu_free_rmap_desc(desc);
428 static void rmap_remove(struct kvm *kvm, u64 *spte)
430 struct kvm_rmap_desc *desc;
431 struct kvm_rmap_desc *prev_desc;
432 struct kvm_mmu_page *page;
433 struct page *release_page;
434 unsigned long *rmapp;
437 if (!is_rmap_pte(*spte))
439 page = page_header(__pa(spte));
440 release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
441 if (is_writeble_pte(*spte))
442 kvm_release_page_dirty(release_page);
444 kvm_release_page_clean(release_page);
445 rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
447 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
449 } else if (!(*rmapp & 1)) {
450 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
451 if ((u64 *)*rmapp != spte) {
452 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
458 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
459 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
462 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
463 if (desc->shadow_ptes[i] == spte) {
464 rmap_desc_remove_entry(rmapp,
476 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
478 struct kvm_rmap_desc *desc;
479 struct kvm_rmap_desc *prev_desc;
485 else if (!(*rmapp & 1)) {
487 return (u64 *)*rmapp;
490 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
494 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
495 if (prev_spte == spte)
496 return desc->shadow_ptes[i];
497 prev_spte = desc->shadow_ptes[i];
504 static void rmap_write_protect(struct kvm *kvm, u64 gfn)
506 unsigned long *rmapp;
509 gfn = unalias_gfn(kvm, gfn);
510 rmapp = gfn_to_rmap(kvm, gfn);
512 spte = rmap_next(kvm, rmapp, NULL);
515 BUG_ON(!(*spte & PT_PRESENT_MASK));
516 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
517 if (is_writeble_pte(*spte))
518 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
519 kvm_flush_remote_tlbs(kvm);
520 spte = rmap_next(kvm, rmapp, spte);
525 static int is_empty_shadow_page(u64 *spt)
530 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
531 if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
532 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
540 static void kvm_mmu_free_page(struct kvm *kvm,
541 struct kvm_mmu_page *page_head)
543 ASSERT(is_empty_shadow_page(page_head->spt));
544 list_del(&page_head->link);
545 __free_page(virt_to_page(page_head->spt));
546 __free_page(virt_to_page(page_head->gfns));
548 ++kvm->n_free_mmu_pages;
551 static unsigned kvm_page_table_hashfn(gfn_t gfn)
556 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
559 struct kvm_mmu_page *page;
561 if (!vcpu->kvm->n_free_mmu_pages)
564 page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
566 page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
567 page->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
568 set_page_private(virt_to_page(page->spt), (unsigned long)page);
569 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
570 ASSERT(is_empty_shadow_page(page->spt));
571 page->slot_bitmap = 0;
572 page->multimapped = 0;
573 page->parent_pte = parent_pte;
574 --vcpu->kvm->n_free_mmu_pages;
578 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
579 struct kvm_mmu_page *page, u64 *parent_pte)
581 struct kvm_pte_chain *pte_chain;
582 struct hlist_node *node;
587 if (!page->multimapped) {
588 u64 *old = page->parent_pte;
591 page->parent_pte = parent_pte;
594 page->multimapped = 1;
595 pte_chain = mmu_alloc_pte_chain(vcpu);
596 INIT_HLIST_HEAD(&page->parent_ptes);
597 hlist_add_head(&pte_chain->link, &page->parent_ptes);
598 pte_chain->parent_ptes[0] = old;
600 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
601 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
603 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
604 if (!pte_chain->parent_ptes[i]) {
605 pte_chain->parent_ptes[i] = parent_pte;
609 pte_chain = mmu_alloc_pte_chain(vcpu);
611 hlist_add_head(&pte_chain->link, &page->parent_ptes);
612 pte_chain->parent_ptes[0] = parent_pte;
615 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
618 struct kvm_pte_chain *pte_chain;
619 struct hlist_node *node;
622 if (!page->multimapped) {
623 BUG_ON(page->parent_pte != parent_pte);
624 page->parent_pte = NULL;
627 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
628 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
629 if (!pte_chain->parent_ptes[i])
631 if (pte_chain->parent_ptes[i] != parent_pte)
633 while (i + 1 < NR_PTE_CHAIN_ENTRIES
634 && pte_chain->parent_ptes[i + 1]) {
635 pte_chain->parent_ptes[i]
636 = pte_chain->parent_ptes[i + 1];
639 pte_chain->parent_ptes[i] = NULL;
641 hlist_del(&pte_chain->link);
642 mmu_free_pte_chain(pte_chain);
643 if (hlist_empty(&page->parent_ptes)) {
644 page->multimapped = 0;
645 page->parent_pte = NULL;
653 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm,
657 struct hlist_head *bucket;
658 struct kvm_mmu_page *page;
659 struct hlist_node *node;
661 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
662 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
663 bucket = &kvm->mmu_page_hash[index];
664 hlist_for_each_entry(page, node, bucket, hash_link)
665 if (page->gfn == gfn && !page->role.metaphysical) {
666 pgprintk("%s: found role %x\n",
667 __FUNCTION__, page->role.word);
673 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
678 unsigned hugepage_access,
681 union kvm_mmu_page_role role;
684 struct hlist_head *bucket;
685 struct kvm_mmu_page *page;
686 struct hlist_node *node;
689 role.glevels = vcpu->mmu.root_level;
691 role.metaphysical = metaphysical;
692 role.hugepage_access = hugepage_access;
693 if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
694 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
695 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
696 role.quadrant = quadrant;
698 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
700 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
701 bucket = &vcpu->kvm->mmu_page_hash[index];
702 hlist_for_each_entry(page, node, bucket, hash_link)
703 if (page->gfn == gfn && page->role.word == role.word) {
704 mmu_page_add_parent_pte(vcpu, page, parent_pte);
705 pgprintk("%s: found\n", __FUNCTION__);
708 page = kvm_mmu_alloc_page(vcpu, parent_pte);
711 pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
714 hlist_add_head(&page->hash_link, bucket);
715 vcpu->mmu.prefetch_page(vcpu, page);
717 rmap_write_protect(vcpu->kvm, gfn);
721 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
722 struct kvm_mmu_page *page)
730 if (page->role.level == PT_PAGE_TABLE_LEVEL) {
731 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
732 if (is_shadow_present_pte(pt[i]))
733 rmap_remove(kvm, &pt[i]);
734 pt[i] = shadow_trap_nonpresent_pte;
736 kvm_flush_remote_tlbs(kvm);
740 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
743 pt[i] = shadow_trap_nonpresent_pte;
744 if (!is_shadow_present_pte(ent))
746 ent &= PT64_BASE_ADDR_MASK;
747 mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
749 kvm_flush_remote_tlbs(kvm);
752 static void kvm_mmu_put_page(struct kvm_mmu_page *page,
755 mmu_page_remove_parent_pte(page, parent_pte);
758 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
762 for (i = 0; i < KVM_MAX_VCPUS; ++i)
764 kvm->vcpus[i]->last_pte_updated = NULL;
767 static void kvm_mmu_zap_page(struct kvm *kvm,
768 struct kvm_mmu_page *page)
772 ++kvm->stat.mmu_shadow_zapped;
773 while (page->multimapped || page->parent_pte) {
774 if (!page->multimapped)
775 parent_pte = page->parent_pte;
777 struct kvm_pte_chain *chain;
779 chain = container_of(page->parent_ptes.first,
780 struct kvm_pte_chain, link);
781 parent_pte = chain->parent_ptes[0];
784 kvm_mmu_put_page(page, parent_pte);
785 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
787 kvm_mmu_page_unlink_children(kvm, page);
788 if (!page->root_count) {
789 hlist_del(&page->hash_link);
790 kvm_mmu_free_page(kvm, page);
792 list_move(&page->link, &kvm->active_mmu_pages);
793 kvm_mmu_reset_last_pte_updated(kvm);
797 * Changing the number of mmu pages allocated to the vm
798 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
800 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
803 * If we set the number of mmu pages to be smaller be than the
804 * number of actived pages , we must to free some mmu pages before we
808 if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
810 int n_used_mmu_pages = kvm->n_alloc_mmu_pages
811 - kvm->n_free_mmu_pages;
813 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
814 struct kvm_mmu_page *page;
816 page = container_of(kvm->active_mmu_pages.prev,
817 struct kvm_mmu_page, link);
818 kvm_mmu_zap_page(kvm, page);
821 kvm->n_free_mmu_pages = 0;
824 kvm->n_free_mmu_pages += kvm_nr_mmu_pages
825 - kvm->n_alloc_mmu_pages;
827 kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
830 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
833 struct hlist_head *bucket;
834 struct kvm_mmu_page *page;
835 struct hlist_node *node, *n;
838 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
840 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
841 bucket = &kvm->mmu_page_hash[index];
842 hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
843 if (page->gfn == gfn && !page->role.metaphysical) {
844 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
846 kvm_mmu_zap_page(kvm, page);
852 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
854 struct kvm_mmu_page *page;
856 while ((page = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
857 pgprintk("%s: zap %lx %x\n",
858 __FUNCTION__, gfn, page->role.word);
859 kvm_mmu_zap_page(kvm, page);
863 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
865 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
866 struct kvm_mmu_page *page_head = page_header(__pa(pte));
868 __set_bit(slot, &page_head->slot_bitmap);
871 hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
876 ASSERT((gpa & HPA_ERR_MASK) == 0);
877 page = gfn_to_page(kvm, gpa >> PAGE_SHIFT);
878 hpa = ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) | (gpa & (PAGE_SIZE-1));
879 if (is_error_page(page))
880 return hpa | HPA_ERR_MASK;
884 hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
886 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
888 if (gpa == UNMAPPED_GVA)
890 return gpa_to_hpa(vcpu->kvm, gpa);
893 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
895 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
897 if (gpa == UNMAPPED_GVA)
899 return pfn_to_page(gpa_to_hpa(vcpu->kvm, gpa) >> PAGE_SHIFT);
902 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
906 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, struct page *page)
908 int level = PT32E_ROOT_LEVEL;
909 hpa_t table_addr = vcpu->mmu.root_hpa;
912 u32 index = PT64_INDEX(v, level);
916 ASSERT(VALID_PAGE(table_addr));
917 table = __va(table_addr);
923 was_rmapped = is_rmap_pte(pte);
924 if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
925 kvm_release_page_clean(page);
928 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
929 page_header_update_slot(vcpu->kvm, table,
931 table[index] = page_to_phys(page)
932 | PT_PRESENT_MASK | PT_WRITABLE_MASK
935 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
937 kvm_release_page_clean(page);
942 if (table[index] == shadow_trap_nonpresent_pte) {
943 struct kvm_mmu_page *new_table;
946 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
948 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
950 1, 3, &table[index]);
952 pgprintk("nonpaging_map: ENOMEM\n");
953 kvm_release_page_clean(page);
957 table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
958 | PT_WRITABLE_MASK | PT_USER_MASK;
960 table_addr = table[index] & PT64_BASE_ADDR_MASK;
964 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
965 struct kvm_mmu_page *sp)
969 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
970 sp->spt[i] = shadow_trap_nonpresent_pte;
973 static void mmu_free_roots(struct kvm_vcpu *vcpu)
976 struct kvm_mmu_page *page;
978 if (!VALID_PAGE(vcpu->mmu.root_hpa))
981 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
982 hpa_t root = vcpu->mmu.root_hpa;
984 page = page_header(root);
986 vcpu->mmu.root_hpa = INVALID_PAGE;
990 for (i = 0; i < 4; ++i) {
991 hpa_t root = vcpu->mmu.pae_root[i];
994 root &= PT64_BASE_ADDR_MASK;
995 page = page_header(root);
998 vcpu->mmu.pae_root[i] = INVALID_PAGE;
1000 vcpu->mmu.root_hpa = INVALID_PAGE;
1003 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1007 struct kvm_mmu_page *page;
1009 root_gfn = vcpu->cr3 >> PAGE_SHIFT;
1011 #ifdef CONFIG_X86_64
1012 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1013 hpa_t root = vcpu->mmu.root_hpa;
1015 ASSERT(!VALID_PAGE(root));
1016 page = kvm_mmu_get_page(vcpu, root_gfn, 0,
1017 PT64_ROOT_LEVEL, 0, 0, NULL);
1018 root = __pa(page->spt);
1020 vcpu->mmu.root_hpa = root;
1024 for (i = 0; i < 4; ++i) {
1025 hpa_t root = vcpu->mmu.pae_root[i];
1027 ASSERT(!VALID_PAGE(root));
1028 if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
1029 if (!is_present_pte(vcpu->pdptrs[i])) {
1030 vcpu->mmu.pae_root[i] = 0;
1033 root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
1034 } else if (vcpu->mmu.root_level == 0)
1036 page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1037 PT32_ROOT_LEVEL, !is_paging(vcpu),
1039 root = __pa(page->spt);
1041 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
1043 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
1046 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1051 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1057 r = mmu_topup_memory_caches(vcpu);
1062 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
1064 page = gfn_to_page(vcpu->kvm, gva >> PAGE_SHIFT);
1066 if (is_error_page(page)) {
1067 kvm_release_page_clean(page);
1071 return nonpaging_map(vcpu, gva & PAGE_MASK, page);
1074 static void nonpaging_free(struct kvm_vcpu *vcpu)
1076 mmu_free_roots(vcpu);
1079 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1081 struct kvm_mmu *context = &vcpu->mmu;
1083 context->new_cr3 = nonpaging_new_cr3;
1084 context->page_fault = nonpaging_page_fault;
1085 context->gva_to_gpa = nonpaging_gva_to_gpa;
1086 context->free = nonpaging_free;
1087 context->prefetch_page = nonpaging_prefetch_page;
1088 context->root_level = 0;
1089 context->shadow_root_level = PT32E_ROOT_LEVEL;
1090 context->root_hpa = INVALID_PAGE;
1094 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1096 ++vcpu->stat.tlb_flush;
1097 kvm_x86_ops->tlb_flush(vcpu);
1100 static void paging_new_cr3(struct kvm_vcpu *vcpu)
1102 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
1103 mmu_free_roots(vcpu);
1106 static void inject_page_fault(struct kvm_vcpu *vcpu,
1110 kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);
1113 static void paging_free(struct kvm_vcpu *vcpu)
1115 nonpaging_free(vcpu);
1119 #include "paging_tmpl.h"
1123 #include "paging_tmpl.h"
1126 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1128 struct kvm_mmu *context = &vcpu->mmu;
1130 ASSERT(is_pae(vcpu));
1131 context->new_cr3 = paging_new_cr3;
1132 context->page_fault = paging64_page_fault;
1133 context->gva_to_gpa = paging64_gva_to_gpa;
1134 context->prefetch_page = paging64_prefetch_page;
1135 context->free = paging_free;
1136 context->root_level = level;
1137 context->shadow_root_level = level;
1138 context->root_hpa = INVALID_PAGE;
1142 static int paging64_init_context(struct kvm_vcpu *vcpu)
1144 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1147 static int paging32_init_context(struct kvm_vcpu *vcpu)
1149 struct kvm_mmu *context = &vcpu->mmu;
1151 context->new_cr3 = paging_new_cr3;
1152 context->page_fault = paging32_page_fault;
1153 context->gva_to_gpa = paging32_gva_to_gpa;
1154 context->free = paging_free;
1155 context->prefetch_page = paging32_prefetch_page;
1156 context->root_level = PT32_ROOT_LEVEL;
1157 context->shadow_root_level = PT32E_ROOT_LEVEL;
1158 context->root_hpa = INVALID_PAGE;
1162 static int paging32E_init_context(struct kvm_vcpu *vcpu)
1164 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1167 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1170 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1172 if (!is_paging(vcpu))
1173 return nonpaging_init_context(vcpu);
1174 else if (is_long_mode(vcpu))
1175 return paging64_init_context(vcpu);
1176 else if (is_pae(vcpu))
1177 return paging32E_init_context(vcpu);
1179 return paging32_init_context(vcpu);
1182 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1185 if (VALID_PAGE(vcpu->mmu.root_hpa)) {
1186 vcpu->mmu.free(vcpu);
1187 vcpu->mmu.root_hpa = INVALID_PAGE;
1191 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1193 destroy_kvm_mmu(vcpu);
1194 return init_kvm_mmu(vcpu);
1196 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
1198 int kvm_mmu_load(struct kvm_vcpu *vcpu)
1202 mutex_lock(&vcpu->kvm->lock);
1203 r = mmu_topup_memory_caches(vcpu);
1206 mmu_alloc_roots(vcpu);
1207 kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
1208 kvm_mmu_flush_tlb(vcpu);
1210 mutex_unlock(&vcpu->kvm->lock);
1213 EXPORT_SYMBOL_GPL(kvm_mmu_load);
1215 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1217 mmu_free_roots(vcpu);
1220 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1221 struct kvm_mmu_page *page,
1225 struct kvm_mmu_page *child;
1228 if (is_shadow_present_pte(pte)) {
1229 if (page->role.level == PT_PAGE_TABLE_LEVEL)
1230 rmap_remove(vcpu->kvm, spte);
1232 child = page_header(pte & PT64_BASE_ADDR_MASK);
1233 mmu_page_remove_parent_pte(child, spte);
1236 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
1239 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1240 struct kvm_mmu_page *page,
1242 const void *new, int bytes,
1245 if (page->role.level != PT_PAGE_TABLE_LEVEL) {
1246 ++vcpu->kvm->stat.mmu_pde_zapped;
1250 ++vcpu->kvm->stat.mmu_pte_updated;
1251 if (page->role.glevels == PT32_ROOT_LEVEL)
1252 paging32_update_pte(vcpu, page, spte, new, bytes,
1255 paging64_update_pte(vcpu, page, spte, new, bytes,
1259 static bool need_remote_flush(u64 old, u64 new)
1261 if (!is_shadow_present_pte(old))
1263 if (!is_shadow_present_pte(new))
1265 if ((old ^ new) & PT64_BASE_ADDR_MASK)
1267 old ^= PT64_NX_MASK;
1268 new ^= PT64_NX_MASK;
1269 return (old & ~new & PT64_PERM_MASK) != 0;
1272 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1274 if (need_remote_flush(old, new))
1275 kvm_flush_remote_tlbs(vcpu->kvm);
1277 kvm_mmu_flush_tlb(vcpu);
1280 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1282 u64 *spte = vcpu->last_pte_updated;
1284 return !!(spte && (*spte & PT_ACCESSED_MASK));
1287 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1288 const u8 *new, int bytes)
1290 gfn_t gfn = gpa >> PAGE_SHIFT;
1291 struct kvm_mmu_page *page;
1292 struct hlist_node *node, *n;
1293 struct hlist_head *bucket;
1297 unsigned offset = offset_in_page(gpa);
1299 unsigned page_offset;
1300 unsigned misaligned;
1306 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1307 ++vcpu->kvm->stat.mmu_pte_write;
1308 kvm_mmu_audit(vcpu, "pre pte write");
1309 if (gfn == vcpu->last_pt_write_gfn
1310 && !last_updated_pte_accessed(vcpu)) {
1311 ++vcpu->last_pt_write_count;
1312 if (vcpu->last_pt_write_count >= 3)
1315 vcpu->last_pt_write_gfn = gfn;
1316 vcpu->last_pt_write_count = 1;
1317 vcpu->last_pte_updated = NULL;
1319 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1320 bucket = &vcpu->kvm->mmu_page_hash[index];
1321 hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
1322 if (page->gfn != gfn || page->role.metaphysical)
1324 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1325 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1326 misaligned |= bytes < 4;
1327 if (misaligned || flooded) {
1329 * Misaligned accesses are too much trouble to fix
1330 * up; also, they usually indicate a page is not used
1333 * If we're seeing too many writes to a page,
1334 * it may no longer be a page table, or we may be
1335 * forking, in which case it is better to unmap the
1338 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1339 gpa, bytes, page->role.word);
1340 kvm_mmu_zap_page(vcpu->kvm, page);
1341 ++vcpu->kvm->stat.mmu_flooded;
1344 page_offset = offset;
1345 level = page->role.level;
1347 if (page->role.glevels == PT32_ROOT_LEVEL) {
1348 page_offset <<= 1; /* 32->64 */
1350 * A 32-bit pde maps 4MB while the shadow pdes map
1351 * only 2MB. So we need to double the offset again
1352 * and zap two pdes instead of one.
1354 if (level == PT32_ROOT_LEVEL) {
1355 page_offset &= ~7; /* kill rounding error */
1359 quadrant = page_offset >> PAGE_SHIFT;
1360 page_offset &= ~PAGE_MASK;
1361 if (quadrant != page->role.quadrant)
1364 spte = &page->spt[page_offset / sizeof(*spte)];
1367 mmu_pte_write_zap_pte(vcpu, page, spte);
1368 mmu_pte_write_new_pte(vcpu, page, spte, new, bytes,
1369 page_offset & (pte_size - 1));
1370 mmu_pte_write_flush_tlb(vcpu, entry, *spte);
1374 kvm_mmu_audit(vcpu, "post pte write");
1377 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1379 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
1381 return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1384 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1386 while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
1387 struct kvm_mmu_page *page;
1389 page = container_of(vcpu->kvm->active_mmu_pages.prev,
1390 struct kvm_mmu_page, link);
1391 kvm_mmu_zap_page(vcpu->kvm, page);
1392 ++vcpu->kvm->stat.mmu_recycled;
1396 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1399 enum emulation_result er;
1401 mutex_lock(&vcpu->kvm->lock);
1402 r = vcpu->mmu.page_fault(vcpu, cr2, error_code);
1411 r = mmu_topup_memory_caches(vcpu);
1415 er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
1416 mutex_unlock(&vcpu->kvm->lock);
1421 case EMULATE_DO_MMIO:
1422 ++vcpu->stat.mmio_exits;
1425 kvm_report_emulation_failure(vcpu, "pagetable");
1431 mutex_unlock(&vcpu->kvm->lock);
1434 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1436 static void free_mmu_pages(struct kvm_vcpu *vcpu)
1438 struct kvm_mmu_page *page;
1440 while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
1441 page = container_of(vcpu->kvm->active_mmu_pages.next,
1442 struct kvm_mmu_page, link);
1443 kvm_mmu_zap_page(vcpu->kvm, page);
1445 free_page((unsigned long)vcpu->mmu.pae_root);
1448 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1455 if (vcpu->kvm->n_requested_mmu_pages)
1456 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
1458 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
1460 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1461 * Therefore we need to allocate shadow page tables in the first
1462 * 4GB of memory, which happens to fit the DMA32 zone.
1464 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1467 vcpu->mmu.pae_root = page_address(page);
1468 for (i = 0; i < 4; ++i)
1469 vcpu->mmu.pae_root[i] = INVALID_PAGE;
1474 free_mmu_pages(vcpu);
1478 int kvm_mmu_create(struct kvm_vcpu *vcpu)
1481 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1483 return alloc_mmu_pages(vcpu);
1486 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1489 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1491 return init_kvm_mmu(vcpu);
1494 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1498 destroy_kvm_mmu(vcpu);
1499 free_mmu_pages(vcpu);
1500 mmu_free_memory_caches(vcpu);
1503 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
1505 struct kvm_mmu_page *page;
1507 list_for_each_entry(page, &kvm->active_mmu_pages, link) {
1511 if (!test_bit(slot, &page->slot_bitmap))
1515 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1517 if (pt[i] & PT_WRITABLE_MASK)
1518 pt[i] &= ~PT_WRITABLE_MASK;
1522 void kvm_mmu_zap_all(struct kvm *kvm)
1524 struct kvm_mmu_page *page, *node;
1526 list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
1527 kvm_mmu_zap_page(kvm, page);
1529 kvm_flush_remote_tlbs(kvm);
1532 void kvm_mmu_module_exit(void)
1534 if (pte_chain_cache)
1535 kmem_cache_destroy(pte_chain_cache);
1536 if (rmap_desc_cache)
1537 kmem_cache_destroy(rmap_desc_cache);
1538 if (mmu_page_header_cache)
1539 kmem_cache_destroy(mmu_page_header_cache);
1542 int kvm_mmu_module_init(void)
1544 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
1545 sizeof(struct kvm_pte_chain),
1547 if (!pte_chain_cache)
1549 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
1550 sizeof(struct kvm_rmap_desc),
1552 if (!rmap_desc_cache)
1555 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1556 sizeof(struct kvm_mmu_page),
1558 if (!mmu_page_header_cache)
1564 kvm_mmu_module_exit();
1569 * Caculate mmu pages needed for kvm.
1571 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1574 unsigned int nr_mmu_pages;
1575 unsigned int nr_pages = 0;
1577 for (i = 0; i < kvm->nmemslots; i++)
1578 nr_pages += kvm->memslots[i].npages;
1580 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
1581 nr_mmu_pages = max(nr_mmu_pages,
1582 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
1584 return nr_mmu_pages;
1589 static const char *audit_msg;
1591 static gva_t canonicalize(gva_t gva)
1593 #ifdef CONFIG_X86_64
1594 gva = (long long)(gva << 16) >> 16;
1599 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1600 gva_t va, int level)
1602 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
1604 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
1606 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
1609 if (ent == shadow_trap_nonpresent_pte)
1612 va = canonicalize(va);
1614 if (ent == shadow_notrap_nonpresent_pte)
1615 printk(KERN_ERR "audit: (%s) nontrapping pte"
1616 " in nonleaf level: levels %d gva %lx"
1617 " level %d pte %llx\n", audit_msg,
1618 vcpu->mmu.root_level, va, level, ent);
1620 audit_mappings_page(vcpu, ent, va, level - 1);
1622 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
1623 hpa_t hpa = gpa_to_hpa(vcpu, gpa);
1626 if (is_shadow_present_pte(ent)
1627 && (ent & PT64_BASE_ADDR_MASK) != hpa)
1628 printk(KERN_ERR "xx audit error: (%s) levels %d"
1629 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
1630 audit_msg, vcpu->mmu.root_level,
1632 is_shadow_present_pte(ent));
1633 else if (ent == shadow_notrap_nonpresent_pte
1634 && !is_error_hpa(hpa))
1635 printk(KERN_ERR "audit: (%s) notrap shadow,"
1636 " valid guest gva %lx\n", audit_msg, va);
1637 page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
1639 kvm_release_page_clean(page);
1645 static void audit_mappings(struct kvm_vcpu *vcpu)
1649 if (vcpu->mmu.root_level == 4)
1650 audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
1652 for (i = 0; i < 4; ++i)
1653 if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
1654 audit_mappings_page(vcpu,
1655 vcpu->mmu.pae_root[i],
1660 static int count_rmaps(struct kvm_vcpu *vcpu)
1665 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1666 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
1667 struct kvm_rmap_desc *d;
1669 for (j = 0; j < m->npages; ++j) {
1670 unsigned long *rmapp = &m->rmap[j];
1674 if (!(*rmapp & 1)) {
1678 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
1680 for (k = 0; k < RMAP_EXT; ++k)
1681 if (d->shadow_ptes[k])
1692 static int count_writable_mappings(struct kvm_vcpu *vcpu)
1695 struct kvm_mmu_page *page;
1698 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1699 u64 *pt = page->spt;
1701 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1704 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1707 if (!(ent & PT_PRESENT_MASK))
1709 if (!(ent & PT_WRITABLE_MASK))
1717 static void audit_rmap(struct kvm_vcpu *vcpu)
1719 int n_rmap = count_rmaps(vcpu);
1720 int n_actual = count_writable_mappings(vcpu);
1722 if (n_rmap != n_actual)
1723 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1724 __FUNCTION__, audit_msg, n_rmap, n_actual);
1727 static void audit_write_protection(struct kvm_vcpu *vcpu)
1729 struct kvm_mmu_page *page;
1730 struct kvm_memory_slot *slot;
1731 unsigned long *rmapp;
1734 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1735 if (page->role.metaphysical)
1738 slot = gfn_to_memslot(vcpu->kvm, page->gfn);
1739 gfn = unalias_gfn(vcpu->kvm, page->gfn);
1740 rmapp = &slot->rmap[gfn - slot->base_gfn];
1742 printk(KERN_ERR "%s: (%s) shadow page has writable"
1743 " mappings: gfn %lx role %x\n",
1744 __FUNCTION__, audit_msg, page->gfn,
1749 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
1756 audit_write_protection(vcpu);
1757 audit_mappings(vcpu);